Commit 4e40f0f3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2019-04-26' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular drm fixes, nothing too outstanding, I'm guessing Easter was
  slowing people down.

  i915:
   - FEC enable fix
   - BXT display lanes fix

  ttm:
   - fix reinit for reloading drivers regression

  imx:
   - DP CSC fix

  sun4i:
   - module unload/load fix

  vc4:
   - memory leak fix
   - compile fix

  dw-hdmi:
   - rockchip scdc overflow fix

  sched:
   - docs fix

  vmwgfx:
   - dma api layering fix"

* tag 'drm-fixes-2019-04-26' of git://anongit.freedesktop.org/drm/drm:
  drm/bridge: dw-hdmi: fix SCDC configuration for ddc-i2c-bus
  drm/vmwgfx: Fix dma API layer violation
  drm/vc4: Fix compilation error reported by kbuild test bot
  drm/sun4i: Unbind components before releasing DRM and memory
  drm/vc4: Fix memory leak during gpu reset.
  drm/sched: Fix description of drm_sched_stop
  drm/imx: don't skip DP channel disable for background plane
  gpu: ipu-v3: dp: fix CSC handling
  drm/ttm: fix re-init of global structures
  drm/sun4i: Fix component unbinding and component master deletion
  drm/sun4i: Set device driver data at bind time for use in unbind
  drm/sun4i: Add missing drm_atomic_helper_shutdown at driver unbind
  drm/i915: Restore correct bxt_ddi_phy_calc_lane_lat_optim_mask() calculation
  drm/i915: Do not enable FEC without DSC
  drm: bridge: dw-hdmi: Fix overflow workaround for Rockchip SoCs
parents d0473f97 6db71bea
...@@ -1046,6 +1046,10 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi) ...@@ -1046,6 +1046,10 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
if (hdmi->version < 0x200a) if (hdmi->version < 0x200a)
return false; return false;
/* Disable if no DDC bus */
if (!hdmi->ddc)
return false;
/* Disable if SCDC is not supported, or if an HF-VSDB block is absent */ /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
if (!display->hdmi.scdc.supported || if (!display->hdmi.scdc.supported ||
!display->hdmi.scdc.scrambling.supported) !display->hdmi.scdc.scrambling.supported)
...@@ -1684,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, ...@@ -1684,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
* Source Devices compliant shall set the * Source Devices compliant shall set the
* Source Version = 1. * Source Version = 1.
*/ */
drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION, drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION,
&bytes); &bytes);
drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION, drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION,
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION)); min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
/* Enabled Scrambling in the Sink */ /* Enabled Scrambling in the Sink */
drm_scdc_set_scrambling(&hdmi->i2c->adap, 1); drm_scdc_set_scrambling(hdmi->ddc, 1);
/* /*
* To activate the scrambler feature, you must ensure * To activate the scrambler feature, you must ensure
...@@ -1706,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, ...@@ -1706,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL); hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
HDMI_MC_SWRSTZ); HDMI_MC_SWRSTZ);
drm_scdc_set_scrambling(&hdmi->i2c->adap, 0); drm_scdc_set_scrambling(hdmi->ddc, 0);
} }
} }
...@@ -1800,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) ...@@ -1800,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
* iteration for others. * iteration for others.
* The Amlogic Meson GX SoCs (v2.01a) have been identified as needing * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
* the workaround with a single iteration. * the workaround with a single iteration.
* The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
* been identified as needing the workaround with a single iteration.
*/ */
switch (hdmi->version) { switch (hdmi->version) {
...@@ -1808,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) ...@@ -1808,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
break; break;
case 0x131a: case 0x131a:
case 0x132a: case 0x132a:
case 0x200a:
case 0x201a: case 0x201a:
case 0x211a:
case 0x212a: case 0x212a:
count = 1; count = 1;
break; break;
......
...@@ -3862,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, ...@@ -3862,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
else else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state); ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
if (ret)
return ret;
if (IS_GEN9_LP(dev_priv) && ret) if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask = pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
return ret; return 0;
} }
......
...@@ -1886,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, ...@@ -1886,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
int pipe_bpp; int pipe_bpp;
int ret; int ret;
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
intel_dp_supports_fec(intel_dp, pipe_config);
if (!intel_dp_supports_dsc(intel_dp, pipe_config)) if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL; return -EINVAL;
...@@ -2116,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -2116,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL; return -EINVAL;
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
intel_dp_supports_fec(intel_dp, pipe_config);
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, ...@@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
if (disable_partial) if (disable_partial)
ipu_plane_disable(ipu_crtc->plane[1], true); ipu_plane_disable(ipu_crtc->plane[1], true);
if (disable_full) if (disable_full)
ipu_plane_disable(ipu_crtc->plane[0], false); ipu_plane_disable(ipu_crtc->plane[0], true);
} }
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
......
...@@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) ...@@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
EXPORT_SYMBOL(drm_sched_increase_karma); EXPORT_SYMBOL(drm_sched_increase_karma);
/** /**
* drm_sched_hw_job_reset - stop the scheduler if it contains the bad job * drm_sched_stop - stop the scheduler
* *
* @sched: scheduler instance * @sched: scheduler instance
* @bad: bad scheduler job
* *
*/ */
void drm_sched_stop(struct drm_gpu_scheduler *sched) void drm_sched_stop(struct drm_gpu_scheduler *sched)
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/of_reserved_mem.h> #include <linux/of_reserved_mem.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_cma_helper.h>
...@@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev) ...@@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev)
ret = -ENOMEM; ret = -ENOMEM;
goto free_drm; goto free_drm;
} }
dev_set_drvdata(dev, drm);
drm->dev_private = drv; drm->dev_private = drv;
INIT_LIST_HEAD(&drv->frontend_list); INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list); INIT_LIST_HEAD(&drv->engine_list);
...@@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev) ...@@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev)
drm_dev_unregister(drm); drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm); drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm); drm_mode_config_cleanup(drm);
component_unbind_all(dev, NULL);
of_reserved_mem_device_release(dev); of_reserved_mem_device_release(dev);
drm_dev_put(drm); drm_dev_put(drm);
} }
...@@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev) ...@@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
static int sun4i_drv_remove(struct platform_device *pdev) static int sun4i_drv_remove(struct platform_device *pdev)
{ {
component_master_del(&pdev->dev, &sun4i_drv_master_ops);
return 0; return 0;
} }
......
...@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj); ...@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
* ttm_global_mutex - protecting the global BO state * ttm_global_mutex - protecting the global BO state
*/ */
DEFINE_MUTEX(ttm_global_mutex); DEFINE_MUTEX(ttm_global_mutex);
struct ttm_bo_global ttm_bo_glob = { unsigned ttm_bo_glob_use_count;
.use_count = 0 struct ttm_bo_global ttm_bo_glob;
};
static struct attribute ttm_bo_count = { static struct attribute ttm_bo_count = {
.name = "bo_count", .name = "bo_count",
...@@ -1531,12 +1530,13 @@ static void ttm_bo_global_release(void) ...@@ -1531,12 +1530,13 @@ static void ttm_bo_global_release(void)
struct ttm_bo_global *glob = &ttm_bo_glob; struct ttm_bo_global *glob = &ttm_bo_glob;
mutex_lock(&ttm_global_mutex); mutex_lock(&ttm_global_mutex);
if (--glob->use_count > 0) if (--ttm_bo_glob_use_count > 0)
goto out; goto out;
kobject_del(&glob->kobj); kobject_del(&glob->kobj);
kobject_put(&glob->kobj); kobject_put(&glob->kobj);
ttm_mem_global_release(&ttm_mem_glob); ttm_mem_global_release(&ttm_mem_glob);
memset(glob, 0, sizeof(*glob));
out: out:
mutex_unlock(&ttm_global_mutex); mutex_unlock(&ttm_global_mutex);
} }
...@@ -1548,7 +1548,7 @@ static int ttm_bo_global_init(void) ...@@ -1548,7 +1548,7 @@ static int ttm_bo_global_init(void)
unsigned i; unsigned i;
mutex_lock(&ttm_global_mutex); mutex_lock(&ttm_global_mutex);
if (++glob->use_count > 1) if (++ttm_bo_glob_use_count > 1)
goto out; goto out;
ret = ttm_mem_global_init(&ttm_mem_glob); ret = ttm_mem_global_init(&ttm_mem_glob);
......
...@@ -461,8 +461,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) ...@@ -461,8 +461,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
void ttm_mem_global_release(struct ttm_mem_global *glob) void ttm_mem_global_release(struct ttm_mem_global *glob)
{ {
unsigned int i;
struct ttm_mem_zone *zone; struct ttm_mem_zone *zone;
unsigned int i;
/* let the page allocator first stop the shrink work. */ /* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini(); ttm_page_alloc_fini();
...@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) ...@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i]; zone = glob->zones[i];
kobject_del(&zone->kobj); kobject_del(&zone->kobj);
kobject_put(&zone->kobj); kobject_put(&zone->kobj);
} }
kobject_del(&glob->kobj); kobject_del(&glob->kobj);
kobject_put(&glob->kobj); kobject_put(&glob->kobj);
memset(glob, 0, sizeof(*glob));
} }
static void ttm_check_swapping(struct ttm_mem_global *glob) static void ttm_check_swapping(struct ttm_mem_global *glob)
......
...@@ -1042,7 +1042,7 @@ static void ...@@ -1042,7 +1042,7 @@ static void
vc4_crtc_reset(struct drm_crtc *crtc) vc4_crtc_reset(struct drm_crtc *crtc)
{ {
if (crtc->state) if (crtc->state)
__drm_atomic_helper_crtc_destroy_state(crtc->state); vc4_crtc_destroy_state(crtc, crtc->state);
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
if (crtc->state) if (crtc->state)
......
...@@ -545,30 +545,14 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) ...@@ -545,30 +545,14 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
dev_priv->initial_height = height; dev_priv->initial_height = height;
} }
/**
* vmw_assume_iommu - Figure out whether coherent dma-remapping might be
* taking place.
* @dev: Pointer to the struct drm_device.
*
* Return: true if iommu present, false otherwise.
*/
static bool vmw_assume_iommu(struct drm_device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev->dev);
return !dma_is_direct(ops) && ops &&
ops->map_page != dma_direct_map_page;
}
/** /**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system. * system.
* *
* @dev_priv: Pointer to a struct vmw_private * @dev_priv: Pointer to a struct vmw_private
* *
* This functions tries to determine the IOMMU setup and what actions * This functions tries to determine what actions need to be taken by the
* need to be taken by the driver to make system pages visible to the * driver to make system pages visible to the device.
* device.
* If this function decides that DMA is not possible, it returns -EINVAL. * If this function decides that DMA is not possible, it returns -EINVAL.
* The driver may then try to disable features of the device that require * The driver may then try to disable features of the device that require
* DMA. * DMA.
...@@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) ...@@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
static const char *names[vmw_dma_map_max] = { static const char *names[vmw_dma_map_max] = {
[vmw_dma_phys] = "Using physical TTM page addresses.", [vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."}; [vmw_dma_map_bind] = "Giving up DMA mappings early."};
if (vmw_force_coherent) if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent; dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_assume_iommu(dev_priv->dev)) else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_populate; dev_priv->map_mode = vmw_dma_map_bind;
else if (!vmw_force_iommu)
dev_priv->map_mode = vmw_dma_phys;
else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
dev_priv->map_mode = vmw_dma_alloc_coherent;
else else
dev_priv->map_mode = vmw_dma_map_populate; dev_priv->map_mode = vmw_dma_map_populate;
if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
/* No TTM coherent page pool? FIXME: Ask TTM instead! */ /* No TTM coherent page pool? FIXME: Ask TTM instead! */
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
(dev_priv->map_mode == vmw_dma_alloc_coherent)) (dev_priv->map_mode == vmw_dma_alloc_coherent))
......
...@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp, ...@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs, ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
DP_COM_CONF_CSC_DEF_BOTH); DP_COM_CONF_CSC_DEF_BOTH);
} else { } else {
if (flow->foreground.in_cs == flow->out_cs) if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
flow->foreground.in_cs == flow->out_cs)
/* /*
* foreground identical to output, apply color * foreground identical to output, apply color
* conversion on background * conversion on background
...@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync) ...@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
struct ipu_dp_priv *priv = flow->priv; struct ipu_dp_priv *priv = flow->priv;
u32 reg, csc; u32 reg, csc;
dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
if (!dp->foreground) if (!dp->foreground)
return; return;
...@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync) ...@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
reg = readl(flow->base + DP_COM_CONF); reg = readl(flow->base + DP_COM_CONF);
csc = reg & DP_COM_CONF_CSC_DEF_MASK; csc = reg & DP_COM_CONF_CSC_DEF_MASK;
if (csc == DP_COM_CONF_CSC_DEF_FG) reg &= ~DP_COM_CONF_CSC_DEF_MASK;
reg &= ~DP_COM_CONF_CSC_DEF_MASK; if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
reg |= DP_COM_CONF_CSC_DEF_BG;
reg &= ~DP_COM_CONF_FG_EN; reg &= ~DP_COM_CONF_FG_EN;
writel(reg, flow->base + DP_COM_CONF); writel(reg, flow->base + DP_COM_CONF);
...@@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base) ...@@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
mutex_init(&priv->mutex); mutex_init(&priv->mutex);
for (i = 0; i < IPUV3_NUM_FLOWS; i++) { for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
priv->flow[i].foreground.foreground = true; priv->flow[i].foreground.foreground = true;
priv->flow[i].base = priv->base + ipu_dp_flow_base[i]; priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
priv->flow[i].priv = priv; priv->flow[i].priv = priv;
......
...@@ -420,7 +420,6 @@ extern struct ttm_bo_global { ...@@ -420,7 +420,6 @@ extern struct ttm_bo_global {
/** /**
* Protected by ttm_global_mutex. * Protected by ttm_global_mutex.
*/ */
unsigned int use_count;
struct list_head device_list; struct list_head device_list;
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment