Commit f8981e03 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'msm-fixes-2019_08_01' of https://gitlab.freedesktop.org/drm/msm into drm-fixes

- Fix the dma_sync calls applied last week (Rob)
- Fix mdp5 dsi command mode (Brian)
- Squash fall through warnings (Jordan)
- Don't add disabled gpu nodes to the of device list (Jeffrey)

Cc: Jeffrey Hugo <jeffrey.l.hugo@gmail.com>
Cc: Jordan Crouse <jcrouse@codeaurora.org>
Cc: Brian Masney <masneyb@onstation.org>
Cc: Rob Clark <robdclark@chromium.org>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

# gpg: Signature made Fri 02 Aug 2019 05:54:27 AM AEST
# gpg:                using RSA key 96F70DFDA84A070A
# gpg: Can't check signature: public key not found
From: Sean Paul <sean@poorly.run>
Link: https://patchwork.freedesktop.org/patch/msgid/20190801200439.GV104440@art_vandelay
parents 412e85b6 9ca7ad6c
...@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit ...@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx) if (priv->lastctx == ctx)
break; break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF: case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */ /* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj; obj = submit->bos[submit->cmd[i].idx].obj;
...@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx) if (priv->lastctx == ctx)
break; break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF: case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
......
...@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx) if (priv->lastctx == ctx)
break; break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF: case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
......
...@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore if there has not been a ctx switch: */ /* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx) if (priv->lastctx == ctx)
break; break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF: case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
......
...@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, ...@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp5_crtc->enabled = false; mdp5_crtc->enabled = false;
} }
static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
{
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
u32 count;
count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
drm_crtc_set_max_vblank_count(crtc, count);
drm_crtc_vblank_on(crtc);
}
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state) struct drm_crtc_state *old_state)
{ {
...@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, ...@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
} }
/* Restore vblank irq handling after power is enabled */ /* Restore vblank irq handling after power is enabled */
drm_crtc_vblank_on(crtc); mdp5_crtc_vblank_on(crtc);
mdp5_crtc_mode_set_nofb(crtc); mdp5_crtc_mode_set_nofb(crtc);
...@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) ...@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
mdp5_crtc_destroy_state(crtc, crtc->state); mdp5_crtc_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
drm_crtc_vblank_reset(crtc);
} }
static const struct drm_crtc_funcs mdp5_crtc_funcs = { static const struct drm_crtc_funcs mdp5_crtc_funcs = {
......
...@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ...@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = mdp5_get_scanoutpos; dev->driver->get_scanout_position = mdp5_get_scanoutpos;
dev->driver->get_vblank_counter = mdp5_get_vblank_counter; dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
dev->max_vblank_count = 0xffffffff; dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true; dev->vblank_disable_immediate = true;
return kms; return kms;
......
...@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev, ...@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
if (!np) if (!np)
return 0; return 0;
drm_of_component_match_add(dev, matchptr, compare_of, np); if (of_device_is_available(np))
drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np); of_node_put(np);
......
...@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) ...@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
return !msm_obj->vram_node; return !msm_obj->vram_node;
} }
/*
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
* API. Really GPU cache is out of scope here (handled on cmdstream)
* and all we need to do is invalidate newly allocated pages before
* mapping to CPU as uncached/writecombine.
*
* On top of this, we have the added headache, that depending on
* display generation, the display's iommu may be wired up to either
* the toplevel drm device (mdss), or to the mdp sub-node, meaning
* that here we either have dma-direct or iommu ops.
*
* Let this be a cautionary tail of abstraction gone wrong.
*/
static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev)) {
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
dma_map_sg(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
}
}
static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev)) {
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
}
}
/* allocate pages from VRAM carveout, used when no IOMMU: */ /* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{ {
...@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
* because display controller, GPU, etc. are not coherent: * because display controller, GPU, etc. are not coherent:
*/ */
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, sync_for_device(msm_obj);
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} }
return msm_obj->pages; return msm_obj->pages;
...@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) ...@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
* GPU, etc. are not coherent: * GPU, etc. are not coherent:
*/ */
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl, sync_for_cpu(msm_obj);
msm_obj->sgt->nents,
DMA_BIDIRECTIONAL);
sg_free_table(msm_obj->sgt); sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt); kfree(msm_obj->sgt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment