Commit 9af07af9 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'topic/drm-misc-2016-07-22' of git://anongit.freedesktop.org/drm-intel into drm-next

Suddenly everyone shows up with their trivial patch series!
- piles of if (!ptr) check removals from Markus Elfring
- more of_node_put fixes from Peter Chen
- make fbdev support really optional in all drivers (except vmwgfx),
  somehow this fell through the cracks when we did all the hard prep work
  a while ago. Patches from Tobias Jakobi.
- leftover patches for the connector reg/unreg cleanup (required that I
  backmerged drm-next) from Chris
- last vgem fence patch from Chris
- fix up warnings in the new sphinx gpu docs build
- misc other small bits

* tag 'topic/drm-misc-2016-07-22' of git://anongit.freedesktop.org/drm-intel: (57 commits)
  GPU-DRM-Exynos: Delete an unnecessary check before the function call "vunmap"
  GPU-DRM-sun4i: Delete an unnecessary check before drm_fbdev_cma_hotplug_event()
  drm/atomic: Delete an unnecessary check before drm_property_unreference_blob()
  drm/rockchip: analogix_dp: add missing clk_disable_unprepare() on error
  drm: drm_connector->s/connector_id/index/ for consistency
  drm/virtio: Fix non static symbol warning
  drm/arc: Remove redundant dev_err call in arcpgu_load()
  drm/arc: Fix some sparse warnings
  drm/vgem: Fix non static symbol warning
  drm/doc: Spinx leftovers
  drm/dp-mst: Missing kernel doc
  drm/dp-mst: Remove tx_down_in_progress
  drm/doc: Fix missing kerneldoc for drm_dp_helper.c
  drm: Extract&Document drm_irq.h
  drm/doc: document all the properties in drm_mode_config
  drm/drm-kms.rst: Remove unused drm_fourcc.h include directive
  drm/doc: Add kerneldoc for @index
  drm: Unexport drm_connector_unregister_all()
  drm/sun4i: Remove redundant call to drm_connector_unregister_all()
  drm/ttm: Delete an unnecessary check before the function call "ttm_tt_destroy"
  ...
parents 5e580523 e8d3ef02
......@@ -188,7 +188,8 @@ Manual IRQ Registration
Drivers that require multiple interrupt handlers can't use the managed
IRQ registration functions. In that case IRQs must be registered and
unregistered manually (usually with the :c:func:`request_irq()` and
:c:func:`free_irq()` functions, or their devm_\* equivalent).
:c:func:`free_irq()` functions, or their :c:func:`devm_request_irq()` and
:c:func:`devm_free_irq()` equivalents).
When manually registering IRQs, drivers must not set the
DRIVER_HAVE_IRQ driver feature flag, and must not provide the
......@@ -242,11 +243,13 @@ Open/Close, File Operations and IOCTLs
Open and Close
--------------
int (\*firstopen) (struct drm_device \*); void (\*lastclose) (struct
drm_device \*); int (\*open) (struct drm_device \*, struct drm_file
\*); void (\*preclose) (struct drm_device \*, struct drm_file \*);
void (\*postclose) (struct drm_device \*, struct drm_file \*);
Open and close handlers. None of those methods are mandatory.
Open and close handlers. None of those methods are mandatory::
int (*firstopen) (struct drm_device *);
void (*lastclose) (struct drm_device *);
int (*open) (struct drm_device *, struct drm_file *);
void (*preclose) (struct drm_device *, struct drm_file *);
void (*postclose) (struct drm_device *, struct drm_file *);
The firstopen method is called by the DRM core for legacy UMS (User Mode
Setting) drivers only when an application opens a device that has no
......
......@@ -67,9 +67,6 @@ drivers can manually clean up a framebuffer at module unload time with
DRM Format Handling
-------------------
.. kernel-doc:: include/drm/drm_fourcc.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
:export:
......@@ -652,5 +649,5 @@ Vertical Blanking and Interrupt Handling Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_irq.c
:export:
.. kernel-doc:: include/drm/drmP.h
:functions: drm_crtc_vblank_waitqueue
.. kernel-doc:: include/drm/drm_irq.h
:internal:
......@@ -334,6 +334,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
struct reservation_object *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
int ret;
if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
......@@ -357,8 +358,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
module_put(exp_info->owner);
return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
goto err_module;
}
dmabuf->priv = exp_info->priv;
......@@ -379,8 +380,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
exp_info->flags);
if (IS_ERR(file)) {
kfree(dmabuf);
return ERR_CAST(file);
ret = PTR_ERR(file);
goto err_dmabuf;
}
file->f_mode |= FMODE_LSEEK;
......@@ -394,6 +395,12 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
mutex_unlock(&db_list.lock);
return dmabuf;
err_dmabuf:
kfree(dmabuf);
err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dma_buf_export);
......
......@@ -2,7 +2,6 @@ config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
select DRM_KMS_CMA_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
......
......@@ -28,8 +28,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
{
struct arcpgu_drm_private *arcpgu = dev->dev_private;
if (arcpgu->fbdev)
drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
}
static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
......@@ -49,7 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
}
int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
......@@ -104,10 +103,8 @@ static int arcpgu_load(struct drm_device *drm)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(arcpgu->regs)) {
dev_err(drm->dev, "Could not remap IO mem\n");
if (IS_ERR(arcpgu->regs))
return PTR_ERR(arcpgu->regs);
}
dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
arc_pgu_read(arcpgu, ARCPGU_REG_ID));
......@@ -127,10 +124,11 @@ static int arcpgu_load(struct drm_device *drm)
encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
if (encoder_node) {
ret = arcpgu_drm_hdmi_init(drm, encoder_node);
of_node_put(encoder_node);
if (ret < 0)
return ret;
} else {
ret = arcpgu_drm_sim_init(drm, 0);
ret = arcpgu_drm_sim_init(drm, NULL);
if (ret < 0)
return ret;
}
......@@ -151,7 +149,7 @@ static int arcpgu_load(struct drm_device *drm)
return 0;
}
int arcpgu_unload(struct drm_device *drm)
static int arcpgu_unload(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
......
......@@ -9,7 +9,6 @@ config DRM_HDLCD
depends on COMMON_CLK
select DRM_ARM
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
......
......@@ -102,8 +102,7 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
{
struct hdlcd_drm_private *hdlcd = drm->dev_private;
if (hdlcd->fbdev)
drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
}
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
......
config DRM_ARMADA
tristate "DRM support for Marvell Armada SoCs"
depends on DRM && HAVE_CLK && ARM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
help
Support the "LCD" controllers found on the Marvell Armada 510
devices. There are two controllers on the device, each controller
......
......@@ -2,11 +2,7 @@ config DRM_AST
tristate "AST server chips"
depends on DRM && PCI
select DRM_TTM
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
help
Say yes for experimental AST GPU driver. Do not enable
......
......@@ -295,9 +295,8 @@ static int ast_get_dram_info(struct drm_device *dev)
static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
if (ast_fb->obj)
drm_gem_object_unreference_unlocked(ast_fb->obj);
drm_gem_object_unreference_unlocked(ast_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
......
......@@ -3,7 +3,6 @@ config DRM_ATMEL_HLCDC
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
help
......
......@@ -2,10 +2,6 @@ config DRM_BOCHS
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_TTM
help
Choose this option for qemu.
......
......@@ -465,8 +465,8 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
if (bochs_fb->obj)
drm_gem_object_unreference_unlocked(bochs_fb->obj);
drm_gem_object_unreference_unlocked(bochs_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
......
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for emulated cirrus device in qemu.
......
......@@ -17,8 +17,8 @@
static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
if (cirrus_fb->obj)
drm_gem_object_unreference_unlocked(cirrus_fb->obj);
drm_gem_object_unreference_unlocked(cirrus_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
......
......@@ -404,8 +404,7 @@ drm_atomic_replace_property_blob(struct drm_property_blob **blob,
if (old_blob == new_blob)
return;
if (old_blob)
drm_property_unreference_blob(old_blob);
drm_property_unreference_blob(old_blob);
if (new_blob)
drm_property_reference_blob(new_blob);
*blob = new_blob;
......@@ -1589,72 +1588,6 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_clean_old_fb);
int drm_atomic_remove_fb(struct drm_framebuffer *fb)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_device *dev = fb->dev;
struct drm_atomic_state *state;
struct drm_plane *plane;
int ret = 0;
unsigned plane_mask;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
drm_modeset_acquire_init(&ctx, 0);
state->acquire_ctx = &ctx;
retry:
plane_mask = 0;
ret = drm_modeset_lock_all_ctx(dev, &ctx);
if (ret)
goto unlock;
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
if (plane->state->fb != fb)
continue;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto unlock;
}
drm_atomic_set_fb_for_plane(plane_state, NULL);
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret)
goto unlock;
plane_mask |= BIT(drm_plane_index(plane));
plane->old_fb = plane->fb;
plane->fb = NULL;
}
if (plane_mask)
ret = drm_atomic_commit(state);
unlock:
if (plane_mask)
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
if (ret || !plane_mask)
drm_atomic_state_free(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
......
......@@ -613,11 +613,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
* in this manner.
*/
if (drm_framebuffer_read_refcount(fb) > 1) {
if (dev->mode_config.funcs->atomic_commit) {
drm_atomic_remove_fb(fb);
goto out;
}
drm_modeset_lock_all(dev);
/* remove from any CRTC */
drm_for_each_crtc(crtc, dev) {
......@@ -635,7 +630,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
drm_modeset_unlock_all(dev);
}
out:
drm_framebuffer_unreference(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
......@@ -934,11 +928,11 @@ int drm_connector_init(struct drm_device *dev,
connector->dev = dev;
connector->funcs = funcs;
connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
if (connector->connector_id < 0) {
ret = connector->connector_id;
ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out_put;
}
connector->index = ret;
ret = 0;
connector->connector_type = connector_type;
connector->connector_type_id =
......@@ -986,7 +980,7 @@ int drm_connector_init(struct drm_device *dev,
ida_remove(connector_ida, connector->connector_type_id);
out_put_id:
if (ret)
ida_remove(&config->connector_ida, connector->connector_id);
ida_remove(&config->connector_ida, connector->index);
out_put:
if (ret)
drm_mode_object_unregister(dev, &connector->base);
......@@ -1030,7 +1024,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->connector_type_id);
ida_remove(&dev->mode_config.connector_ida,
connector->connector_id);
connector->index);
kfree(connector->display_info.bus_formats);
drm_mode_object_unregister(dev, &connector->base);
......@@ -1113,6 +1107,15 @@ void drm_connector_unregister(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_connector_unregister);
static void drm_connector_unregister_all(struct drm_device *dev)
{
struct drm_connector *connector;
/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_unregister(connector);
}
static int drm_connector_register_all(struct drm_device *dev)
{
struct drm_connector *connector;
......@@ -1136,26 +1139,6 @@ static int drm_connector_register_all(struct drm_device *dev)
return ret;
}
/**
* drm_connector_unregister_all - unregister connector userspace interfaces
* @dev: drm device
*
* This functions unregisters all connectors from sysfs and other places so
* that userspace can no longer access them. Drivers should call this as the
* first step tearing down the device instace, or when the underlying
* physical device disappeared (e.g. USB unplug), right before calling
* drm_dev_unregister().
*/
void drm_connector_unregister_all(struct drm_device *dev)
{
struct drm_connector *connector;
/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_unregister(connector);
}
EXPORT_SYMBOL(drm_connector_unregister_all);
static int drm_encoder_register_all(struct drm_device *dev)
{
struct drm_encoder *encoder;
......
......@@ -125,7 +125,6 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_atomic_remove_fb(struct drm_framebuffer *fb);
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
......@@ -1493,11 +1493,8 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
WARN_ON(!mutex_is_locked(&mgr->qlock));
/* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_downq)) {
mgr->tx_down_in_progress = false;
if (list_empty(&mgr->tx_msg_downq))
return;
}
mgr->tx_down_in_progress = true;
txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
......@@ -1512,10 +1509,6 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up(&mgr->tx_waitq);
}
if (list_empty(&mgr->tx_msg_downq)) {
mgr->tx_down_in_progress = false;
return;
}
}
/* called holding qlock */
......@@ -1538,7 +1531,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
if (!mgr->tx_down_in_progress)
if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
......@@ -2372,6 +2365,7 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port
* @mgr: manager for this port
* @port: unverified pointer to a port
*
......@@ -2887,7 +2881,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
if (mgr->tx_down_in_progress)
if (!list_empty(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
......
......@@ -1695,7 +1695,6 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
vblwait->request.sequence, pipe);
vblank->last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
(((drm_vblank_count(dev, pipe) -
vblwait->request.sequence) <= (1 << 23)) ||
......
......@@ -269,8 +269,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
struct drm_framebuffer *fb;
if (exynos_gem->kvaddr)
vunmap(exynos_gem->kvaddr);
vunmap(exynos_gem->kvaddr);
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
......
......@@ -1820,6 +1820,7 @@ static int hdmi_probe(struct platform_device *pdev)
DRM_ERROR("Failed to find ddc node in device tree\n");
return -ENODEV;
}
of_node_put(dev->of_node);
out_get_ddc_adpt:
hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
......@@ -1838,6 +1839,7 @@ static int hdmi_probe(struct platform_device *pdev)
ret = -ENODEV;
goto err_ddc;
}
of_node_put(dev->of_node);
out_get_phy_port:
if (hdata->drv_data->is_apb_phy) {
......
......@@ -5,12 +5,7 @@ config DRM_FSL_DCU
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_KMS_FB_HELPER
select DRM_PANEL
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select REGMAP_MMIO
select VIDEOMODE_HELPERS
help
......
config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
depends on DRM && PCI && X86
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
......
config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
......
config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
depends on DRM && PCI
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for the MGA G200 server chips, it
......
......@@ -3,13 +3,7 @@ config DRM_NOUVEAU
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
......
......@@ -4,11 +4,6 @@ config DRM_OMAP
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
select OMAP2_DSS
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
default n
help
DRM display driver for OMAP2/3/4 based boards.
......
......@@ -255,6 +255,7 @@ static int dvic_probe_of(struct platform_device *pdev)
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
adapter = of_get_i2c_adapter_by_node(adapter_node);
of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
omap_dss_put_device(ddata->in);
......
......@@ -125,15 +125,16 @@ u32 dss_of_port_get_port_number(struct device_node *port)
static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
{
struct device_node *np;
struct device_node *np, *np_parent;
np = of_parse_phandle(node, "remote-endpoint", 0);
if (!np)
return NULL;
np = of_get_next_parent(np);
np_parent = of_get_next_parent(np);
of_node_put(np);
return np;
return np_parent;
}
struct device_node *
......
config DRM_QXL
tristate "QXL virtual GPU"
depends on DRM && PCI
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
select CRC32
help
......
......@@ -6,7 +6,6 @@ config DRM_RCAR_DU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
......
......@@ -4,11 +4,7 @@ config DRM_ROCKCHIP
depends on RESET_CONTROLLER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_PANEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select VIDEOMODE_HELPERS
help
Choose this option if you have a Rockchip soc chipset.
......
......@@ -96,6 +96,7 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
dev_err(dp->dev, "failed to dp pre init %d\n", ret);
clk_disable_unprepare(dp->pclk);
return ret;
}
......@@ -272,6 +273,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
dev_err(dp->dev, "failed to pre init %d\n", ret);
clk_disable_unprepare(dp->pclk);
return ret;
}
......
......@@ -6,7 +6,6 @@ config DRM_SHMOBILE
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
......
......@@ -185,7 +185,6 @@ static void sun4i_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
drm_connector_unregister_all(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
sun4i_framebuffer_free(drm);
......
......@@ -20,8 +20,7 @@ static void sun4i_de_output_poll_changed(struct drm_device *drm)
{
struct sun4i_drv *drv = drm->dev_private;
if (drv->fbdev)
drm_fbdev_cma_hotplug_event(drv->fbdev);
drm_fbdev_cma_hotplug_event(drv->fbdev);
}
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
......
......@@ -68,7 +68,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
struct tegra_bo *bo = fb->planes[i];
if (bo) {
if (bo->pages && bo->vaddr)
if (bo->pages)
vunmap(bo->vaddr);
drm_gem_object_unreference_unlocked(&bo->gem);
......
......@@ -2,7 +2,6 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
......
......@@ -146,7 +146,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
fence_put(bo->moving);
......
......@@ -4,12 +4,7 @@ config DRM_UDL
depends on USB_SUPPORT
depends on USB_ARCH_HAS_HCD
select USB
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
Say M/Y to add support for these devices via drm/kms interfaces.
......@@ -91,8 +91,7 @@ static void vc4_lastclose(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (vc4->fbdev)
drm_fbdev_cma_restore_mode(vc4->fbdev);
drm_fbdev_cma_restore_mode(vc4->fbdev);
}
static const struct file_operations vc4_drm_fops = {
......
......@@ -26,8 +26,7 @@ static void vc4_output_poll_changed(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (vc4->fbdev)
drm_fbdev_cma_hotplug_event(vc4->fbdev);
drm_fbdev_cma_hotplug_event(vc4->fbdev);
}
struct vc4_commit {
......
ccflags-y := -Iinclude/drm
vgem-y := vgem_drv.o
vgem-y := vgem_drv.o vgem_fence.o
obj-$(CONFIG_DRM_VGEM) += vgem.o
......@@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
.close = drm_gem_vm_close,
};
static int vgem_open(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile;
int ret;
vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
if (!vfile)
return -ENOMEM;
file->driver_priv = vfile;
ret = vgem_fence_open(vfile);
if (ret) {
kfree(vfile);
return ret;
}
return 0;
}
static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile = file->driver_priv;
vgem_fence_close(vfile);
kfree(vfile);
}
/* ioctls */
static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
......@@ -164,6 +192,8 @@ static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
}
static struct drm_ioctl_desc vgem_ioctls[] = {
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
......@@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
static struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_PRIME,
.open = vgem_open,
.preclose = vgem_preclose,
.gem_free_object_unlocked = vgem_gem_free_object,
.gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
.dumb_create = vgem_gem_dumb_create,
......@@ -328,5 +361,6 @@ module_init(vgem_init);
module_exit(vgem_exit);
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
......@@ -32,9 +32,25 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <uapi/drm/vgem_drm.h>
struct vgem_file {
struct idr fence_idr;
struct mutex fence_mutex;
};
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
};
int vgem_fence_open(struct vgem_file *file);
int vgem_fence_attach_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file);
int vgem_fence_signal_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file);
void vgem_fence_close(struct vgem_file *file);
#endif
/*
* Copyright 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software")
* to deal in the software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* them Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-buf.h>
#include <linux/reservation.h>
#include "vgem_drv.h"
#define VGEM_FENCE_TIMEOUT (10*HZ)
struct vgem_fence {
struct fence base;
struct spinlock lock;
struct timer_list timer;
};
static const char *vgem_fence_get_driver_name(struct fence *fence)
{
return "vgem";
}
static const char *vgem_fence_get_timeline_name(struct fence *fence)
{
return "unbound";
}
static bool vgem_fence_signaled(struct fence *fence)
{
return false;
}
static bool vgem_fence_enable_signaling(struct fence *fence)
{
return true;
}
static void vgem_fence_release(struct fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
del_timer_sync(&fence->timer);
fence_free(&fence->base);
}
static void vgem_fence_value_str(struct fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
int size)
{
snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
}
static const struct fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.enable_signaling = vgem_fence_enable_signaling,
.signaled = vgem_fence_signaled,
.wait = fence_default_wait,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
.timeline_value_str = vgem_fence_timeline_value_str,
};
static void vgem_fence_timeout(unsigned long data)
{
struct vgem_fence *fence = (struct vgem_fence *)data;
fence_signal(&fence->base);
}
static struct fence *vgem_fence_create(struct vgem_file *vfile,
unsigned int flags)
{
struct vgem_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return NULL;
spin_lock_init(&fence->lock);
fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
fence_context_alloc(1), 1);
setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
return &fence->base;
}
static int attach_dmabuf(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct dma_buf *dmabuf;
if (obj->dma_buf)
return 0;
dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
obj->dma_buf = dmabuf;
drm_gem_object_reference(obj);
return 0;
}
/*
* vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
*
* Create and attach a fence to the vGEM handle. This fence is then exposed
* via the dma-buf reservation object and visible to consumers of the exported
* dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
* vGEM buffer is being written to by the client and is exposed as an exclusive
* fence, otherwise the fence indicates the client is current reading from the
* buffer and all future writes should wait for the client to signal its
* completion. Note that if a conflicting fence is already on the dma-buf (i.e.
* an exclusive fence when adding a read, or any fence when adding a write),
* -EBUSY is reported. Serialisation between operations should be handled
* by waiting upon the dma-buf.
*
* This returns the handle for the new fence that must be signaled within 10
* seconds (or otherwise it will automatically expire). See
* vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
*
* If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
*/
int vgem_fence_attach_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
struct drm_vgem_fence_attach *arg = data;
struct vgem_file *vfile = file->driver_priv;
struct reservation_object *resv;
struct drm_gem_object *obj;
struct fence *fence;
int ret;
if (arg->flags & ~VGEM_FENCE_WRITE)
return -EINVAL;
if (arg->pad)
return -EINVAL;
obj = drm_gem_object_lookup(file, arg->handle);
if (!obj)
return -ENOENT;
ret = attach_dmabuf(dev, obj);
if (ret)
goto err;
fence = vgem_fence_create(vfile, arg->flags);
if (!fence) {
ret = -ENOMEM;
goto err;
}
/* Check for a conflicting fence */
resv = obj->dma_buf->resv;
if (!reservation_object_test_signaled_rcu(resv,
arg->flags & VGEM_FENCE_WRITE)) {
ret = -EBUSY;
goto err_fence;
}
/* Expose the fence via the dma-buf */
ret = 0;
mutex_lock(&resv->lock.base);
if (arg->flags & VGEM_FENCE_WRITE)
reservation_object_add_excl_fence(resv, fence);
else if ((ret = reservation_object_reserve_shared(resv)) == 0)
reservation_object_add_shared_fence(resv, fence);
mutex_unlock(&resv->lock.base);
/* Record the fence in our idr for later signaling */
if (ret == 0) {
mutex_lock(&vfile->fence_mutex);
ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
mutex_unlock(&vfile->fence_mutex);
if (ret > 0) {
arg->out_fence = ret;
ret = 0;
}
}
err_fence:
if (ret) {
fence_signal(fence);
fence_put(fence);
}
err:
drm_gem_object_unreference_unlocked(obj);
return ret;
}
/*
* vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
*
* Signal and consume a fence ealier attached to a vGEM handle using
* vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
*
* All fences must be signaled within 10s of attachment or otherwise they
* will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
*
* Signaling a fence indicates to all consumers of the dma-buf that the
* client has completed the operation associated with the fence, and that the
* buffer is then ready for consumption.
*
* If the fence does not exist (or has already been signaled by the client),
* vgem_fence_signal_ioctl returns -ENOENT.
*/
int vgem_fence_signal_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
struct vgem_file *vfile = file->driver_priv;
struct drm_vgem_fence_signal *arg = data;
struct fence *fence;
int ret = 0;
if (arg->flags)
return -EINVAL;
mutex_lock(&vfile->fence_mutex);
fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
mutex_unlock(&vfile->fence_mutex);
if (!fence)
return -ENOENT;
if (IS_ERR(fence))
return PTR_ERR(fence);
if (fence_is_signaled(fence))
ret = -ETIMEDOUT;
fence_signal(fence);
fence_put(fence);
return ret;
}
int vgem_fence_open(struct vgem_file *vfile)
{
mutex_init(&vfile->fence_mutex);
idr_init(&vfile->fence_idr);
return 0;
}
static int __vgem_fence_idr_fini(int id, void *p, void *data)
{
fence_signal(p);
fence_put(p);
return 0;
}
void vgem_fence_close(struct vgem_file *vfile)
{
idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
idr_destroy(&vfile->fence_idr);
}
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
......
......@@ -53,8 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
if (virtio_gpu_fb->obj)
drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(virtio_gpu_fb);
}
......@@ -326,8 +325,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
if (obj)
drm_gem_object_unreference_unlocked(obj);
drm_gem_object_unreference_unlocked(obj);
return NULL;
}
......@@ -348,7 +346,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
}
struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
static struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
.atomic_commit_tail = vgdev_atomic_commit_tail,
};
......
......@@ -52,7 +52,6 @@
#include <linux/poll.h>
#include <linux/ratelimit.h>
#include <linux/sched.h>
#include <linux/seqlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
......@@ -87,6 +86,7 @@ struct drm_device_dma;
struct drm_dma_handle;
struct drm_gem_object;
struct drm_master;
struct drm_vblank_crtc;
struct device_node;
struct videomode;
......@@ -684,35 +684,6 @@ struct drm_minor {
struct mutex debugfs_lock; /* Protects debugfs_list. */
};
struct drm_pending_vblank_event {
struct drm_pending_event base;
unsigned int pipe;
struct drm_event_vblank event;
};
struct drm_vblank_crtc {
struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timer_list disable_timer; /* delayed disable timer */
seqlock_t seqlock; /* protects vblank count and time */
u32 count; /* vblank counter */
struct timeval time; /* vblank timestamp */
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
u32 last_wait; /* Last vblank seqno waited per CRTC */
unsigned int inmodeset; /* Display driver is setting mode */
unsigned int pipe; /* crtc index */
int framedur_ns; /* frame/field duration in ns */
int linedur_ns; /* line duration in ns */
bool enabled; /* so we don't call enable more than
once per disable */
};
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
......@@ -847,6 +818,8 @@ struct drm_device {
int switch_power_state;
};
#include <drm/drm_irq.h>
#define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
......@@ -933,56 +906,6 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
/* IRQ support (drm_irq.h) */
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_display_mode *mode);
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
* This function returns a pointer to the vblank waitqueue for the CRTC.
* Drivers can use this to implement vblank waits using wait_event() & co.
*/
static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
{
return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
}
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
......
This diff is collapsed.
......@@ -747,7 +747,14 @@ struct drm_dp_aux {
struct mutex hw_mutex;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
unsigned i2c_nack_count, i2c_defer_count;
/**
* @i2c_nack_count: Counts I2C NACKs, used for DP validation.
*/
unsigned i2c_nack_count;
/**
* @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
*/
unsigned i2c_defer_count;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
......
......@@ -87,7 +87,15 @@ struct drm_dp_mst_port {
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
struct edid *cached_edid; /* for DP logical ports - make tiling work */
/**
* @cached_edid: for DP logical ports - make tiling work by ensuring
* that the EDID for all connectors is read immediately.
*/
struct edid *cached_edid;
/**
* @has_audio: Tracks whether the sink connector to this port is
* audio-capable.
*/
bool has_audio;
};
......@@ -397,71 +405,154 @@ struct drm_dp_payload {
/**
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager
* @dev: device pointer for adding i2c devices etc.
* @cbs: callbacks for connector addition and destruction.
* @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
* @aux: aux channel for the DP connector.
* @max_payloads: maximum number of payloads the GPU can generate.
* @conn_base_id: DRM connector ID this mgr is connected to.
* @down_rep_recv: msg receiver state for down replies.
* @up_req_recv: msg receiver state for up requests.
* @lock: protects mst state, primary, dpcd.
* @mst_state: if this manager is enabled for an MST capable port.
* @mst_primary: pointer to the primary branch device.
* @dpcd: cache of DPCD for primary port.
* @pbn_div: PBN to slots divisor.
*
* This struct represents the toplevel displayport MST topology manager.
* There should be one instance of this for every MST capable DP connector
* on the GPU.
*/
struct drm_dp_mst_topology_mgr {
/**
* @dev: device pointer for adding i2c devices etc.
*/
struct device *dev;
/**
* @cbs: callbacks for connector addition and destruction.
*/
const struct drm_dp_mst_topology_cbs *cbs;
/**
* @max_dpcd_transaction_bytes: maximum number of bytes to read/write
* in one go.
*/
int max_dpcd_transaction_bytes;
struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
/**
* @aux: AUX channel for the DP MST connector this topolgy mgr is
* controlling.
*/
struct drm_dp_aux *aux;
/**
* @max_payloads: maximum number of payloads the GPU can generate.
*/
int max_payloads;
/**
* @conn_base_id: DRM connector ID this mgr is connected to. Only used
* to build the MST connector path value.
*/
int conn_base_id;
/* only ever accessed from the workqueue - which should be serialised */
/**
* @down_rep_recv: Message receiver state for down replies. This and
* @up_req_recv are only ever access from the work item, which is
* serialised.
*/
struct drm_dp_sideband_msg_rx down_rep_recv;
/**
* @up_req_recv: Message receiver state for up requests. This and
* @down_rep_recv are only ever access from the work item, which is
* serialised.
*/
struct drm_dp_sideband_msg_rx up_req_recv;
/* pointer to info about the initial MST device */
struct mutex lock; /* protects mst_state + primary + dpcd */
/**
* @lock: protects mst state, primary, dpcd.
*/
struct mutex lock;
/**
* @mst_state: If this manager is enabled for an MST capable port. False
* if no MST sink/branch devices is connected.
*/
bool mst_state;
/**
* @mst_primary: Pointer to the primary/first branch device.
*/
struct drm_dp_mst_branch *mst_primary;
/**
* @dpcd: Cache of DPCD for primary port.
*/
u8 dpcd[DP_RECEIVER_CAP_SIZE];
/**
* @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
*/
u8 sink_count;
/**
* @pbn_div: PBN to slots divisor.
*/
int pbn_div;
/**
* @total_slots: Total slots that can be allocated.
*/
int total_slots;
/**
* @avail_slots: Still available slots that can be allocated.
*/
int avail_slots;
/**
* @total_pbn: Total PBN count.
*/
int total_pbn;
/* messages to be transmitted */
/* qlock protects the upq/downq and in_progress,
the mstb tx_slots and txmsg->state once they are queued */
/**
* @qlock: protects @tx_msg_downq, the tx_slots in struct
* &drm_dp_mst_branch and txmsg->state once they are queued
*/
struct mutex qlock;
/**
* @tx_msg_downq: List of pending down replies.
*/
struct list_head tx_msg_downq;
bool tx_down_in_progress;
/* payload info + lock for it */
/**
* @payload_lock: Protect payload information.
*/
struct mutex payload_lock;
/**
* @proposed_vcpis: Array of pointers for the new VCPI allocation. The
* VCPI structure itself is embedded into the corresponding
* &drm_dp_mst_port structure.
*/
struct drm_dp_vcpi **proposed_vcpis;
/**
* @payloads: Array of payloads.
*/
struct drm_dp_payload *payloads;
/**
* @payload_mask: Elements of @payloads actually in use. Since
* reallocation of active outputs isn't possible gaps can be created by
* disabling outputs out of order compared to how they've been enabled.
*/
unsigned long payload_mask;
/**
* @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
*/
unsigned long vcpi_mask;
/**
* @tx_waitq: Wait to queue stall for the tx worker.
*/
wait_queue_head_t tx_waitq;
/**
* @work: Probe work.
*/
struct work_struct work;
/**
* @tx_work: Sideband transmit worker. This can nest within the main
* @work worker for each transaction @work launches.
*/
struct work_struct tx_work;
/**
* @destroy_connector_list: List of to be destroyed connectors.
*/
struct list_head destroy_connector_list;
/**
* @destroy_connector_lock: Protects @connector_list.
*/
struct mutex destroy_connector_lock;
/**
* @destroy_connector_work: Work item to destroy connectors. Needed to
* avoid locking inversion.
*/
struct work_struct destroy_connector_work;
};
......
/*
* Copyright 2016 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _DRM_IRQ_H_
#define _DRM_IRQ_H_
#include <linux/seqlock.h>
/**
* struct drm_pending_vblank_event - pending vblank event tracking
*/
struct drm_pending_vblank_event {
/**
* @base: Base structure for tracking pending DRM events.
*/
struct drm_pending_event base;
/**
* @pipe: drm_crtc_index() of the &drm_crtc this event is for.
*/
unsigned int pipe;
/**
* @event: Actual event which will be sent to userspace.
*/
struct drm_event_vblank event;
};
/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
*
* Note that for historical reasons - the vblank handling code is still shared
* with legacy/non-kms drivers - this is a free-standing structure not directly
* connected to struct &drm_crtc. But all public interface functions are taking
* a struct &drm_crtc to hide this implementation detail.
*/
struct drm_vblank_crtc {
/**
* @dev: Pointer to the &drm_device.
*/
struct drm_device *dev;
/**
* @queue: Wait queue for vblank waiters.
*/
wait_queue_head_t queue; /**< VBLANK wait queue */
/**
* @disable_timer: Disable timer for the delayed vblank disabling
* hysteresis logic. Vblank disabling is controlled through the
* drm_vblank_offdelay module option and the setting of the
* max_vblank_count value in the &drm_device structure.
*/
struct timer_list disable_timer;
/**
* @seqlock: Protect vblank count and time.
*/
seqlock_t seqlock; /* protects vblank count and time */
/**
* @count: Current software vblank counter.
*/
u32 count;
/**
* @time: Vblank timestamp corresponding to @count.
*/
struct timeval time;
/**
* @refcount: Number of users/waiters of the vblank interrupt. Only when
* this refcount reaches 0 can the hardware interrupt be disabled using
* @disable_timer.
*/
atomic_t refcount; /* number of users of vblank interruptsper crtc */
/**
* @last: Protected by dev->vbl_lock, used for wraparound handling.
*/
u32 last;
/**
* @inmodeset: Tracks whether the vblank is disabled due to a modeset.
* For legacy driver bit 2 additionally tracks whether an additional
* temporary vblank reference has been acquired to paper over the
* hardware counter resetting/jumping. KMS drivers should instead just
* call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
* save and restore the vblank count.
*/
unsigned int inmodeset; /* Display driver is setting mode */
/**
* @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
* structure.
*/
unsigned int pipe;
/**
* @framedur_ns: Frame/Field duration in ns, used by
* drm_calc_vbltimestamp_from_scanoutpos() and computed by
* drm_calc_timestamping_constants().
*/
int framedur_ns;
/**
* @linedur_ns: Line duration in ns, used by
* drm_calc_vbltimestamp_from_scanoutpos() and computed by
* drm_calc_timestamping_constants().
*/
int linedur_ns;
/**
* @enabled: Tracks the enabling state of the corresponding &drm_crtc to
* avoid double-disabling and hence corrupting saved state. Needed by
* drivers not using atomic KMS, since those might go through their CRTC
* disabling functions multiple times.
*/
bool enabled;
};
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_display_mode *mode);
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
* This function returns a pointer to the vblank waitqueue for the CRTC.
* Drivers can use this to implement vblank waits using wait_event() and related
* functions.
*/
static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
{
return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
}
#endif
/*
* Copyright 2016 Intel Corporation
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _UAPI_VGEM_DRM_H_
#define _UAPI_VGEM_DRM_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
#define DRM_VGEM_FENCE_ATTACH 0x1
#define DRM_VGEM_FENCE_SIGNAL 0x2
#define DRM_IOCTL_VGEM_FENCE_ATTACH DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
#define DRM_IOCTL_VGEM_FENCE_SIGNAL DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
struct drm_vgem_fence_attach {
__u32 handle;
__u32 flags;
#define VGEM_FENCE_WRITE 0x1
__u32 out_fence;
__u32 pad;
};
struct drm_vgem_fence_signal {
__u32 fence;
__u32 flags;
};
#if defined(__cplusplus)
}
#endif
#endif /* _UAPI_VGEM_DRM_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment