Commit 58b58f6e authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-fixes-4.12-rc4' of git://people.freedesktop.org/~robclark/linux into drm-fixes

a few fixes for 4.12..

* 'msm-fixes-4.12-rc4' of git://people.freedesktop.org/~robclark/linux:
  drm/msm: Fix the check for the command size
  drm/msm: Take the mutex before calling msm_gem_new_impl
  drm/msm: for array in-fences, check if all backing fences are from our own context before waiting
  drm/msm: constify irq_domain_ops
  drm/msm/mdp5: release hwpipe(s) for unused planes
  drm/msm: Reuse dma_fence_release.
  drm/msm: Expose our reservation object when exporting a dmabuf.
  drm/msm/gpu: check legacy clk names in get_clocks()
  drm/msm/mdp5: use __drm_atomic_helper_plane_duplicate_state()
  drm/msm: select PM_OPP
parents 25f480e8 d72fea53
...@@ -13,6 +13,7 @@ config DRM_MSM ...@@ -13,6 +13,7 @@ config DRM_MSM
select QCOM_SCM select QCOM_SCM
select SND_SOC_HDMI_CODEC if SND_SOC select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE select SYNC_FILE
select PM_OPP
default y default y
help help
DRM/KMS driver for MSM/snapdragon. DRM/KMS driver for MSM/snapdragon.
......
...@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, ...@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0; return 0;
} }
static struct irq_domain_ops mdss_hw_irqdomain_ops = { static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
.map = mdss_hw_irqdomain_map, .map = mdss_hw_irqdomain_map,
.xlate = irq_domain_xlate_onecell, .xlate = irq_domain_xlate_onecell,
}; };
......
...@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) ...@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
sizeof(*mdp5_state), GFP_KERNEL); sizeof(*mdp5_state), GFP_KERNEL);
if (!mdp5_state)
return NULL;
if (mdp5_state && mdp5_state->base.fb) __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
drm_framebuffer_reference(mdp5_state->base.fb);
return &mdp5_state->base; return &mdp5_state->base;
} }
...@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, ...@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
mdp5_pipe_release(state->state, old_hwpipe); mdp5_pipe_release(state->state, old_hwpipe);
mdp5_pipe_release(state->state, old_right_hwpipe); mdp5_pipe_release(state->state, old_right_hwpipe);
} }
} else {
mdp5_pipe_release(state->state, mdp5_state->hwpipe);
mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
} }
return 0; return 0;
......
...@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = { ...@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export, .gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import, .gem_prime_import = drm_gem_prime_import,
.gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin, .gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin, .gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table, .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
......
...@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); ...@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj); void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg); struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj); int msm_gem_prime_pin(struct drm_gem_object *obj);
......
...@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) ...@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
} }
struct msm_fence { struct msm_fence {
struct msm_fence_context *fctx;
struct dma_fence base; struct dma_fence base;
struct msm_fence_context *fctx;
}; };
static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
...@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence) ...@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
return fence_completed(f->fctx, f->base.seqno); return fence_completed(f->fctx, f->base.seqno);
} }
static void msm_fence_release(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
kfree_rcu(f, base.rcu);
}
static const struct dma_fence_ops msm_fence_ops = { static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name, .get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name, .get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling, .enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled, .signaled = msm_fence_signaled,
.wait = dma_fence_default_wait, .wait = dma_fence_default_wait,
.release = msm_fence_release, .release = dma_fence_free,
}; };
struct dma_fence * struct dma_fence *
......
...@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct msm_gem_object *msm_obj; struct msm_gem_object *msm_obj;
bool use_vram = false; bool use_vram = false;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
switch (flags & MSM_BO_CACHE_MASK) { switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED: case MSM_BO_UNCACHED:
case MSM_BO_CACHED: case MSM_BO_CACHED:
...@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size); size = PAGE_ALIGN(dmabuf->size);
/* Take mutex so we can modify the inactive list in msm_gem_new_impl */
mutex_lock(&dev->struct_mutex);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
mutex_unlock(&dev->struct_mutex);
if (ret) if (ret)
goto fail; goto fail;
......
...@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) ...@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
if (!obj->import_attach) if (!obj->import_attach)
msm_gem_put_pages(obj); msm_gem_put_pages(obj);
} }
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
return msm_obj->resv;
}
...@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!in_fence) if (!in_fence)
return -EINVAL; return -EINVAL;
/* TODO if we get an array-fence due to userspace merging multiple /*
* fences, we need a way to determine if all the backing fences * Wait if the fence is from a foreign context, or if the fence
* are from our own context.. * array contains any fence from a foreign context.
*/ */
if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
if (in_fence->context != gpu->fctx->context) {
ret = dma_fence_wait(in_fence, true); ret = dma_fence_wait(in_fence, true);
if (ret) if (ret)
return ret; return ret;
...@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out; goto out;
} }
if ((submit_cmd.size + submit_cmd.submit_offset) >= if (!submit_cmd.size ||
msm_obj->base.size) { ((submit_cmd.size + submit_cmd.submit_offset) >
msm_obj->base.size)) {
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
......
...@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) ...@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
gpu->grp_clks[i] = get_clock(dev, name); gpu->grp_clks[i] = get_clock(dev, name);
/* Remember the key clocks that we need to control later */ /* Remember the key clocks that we need to control later */
if (!strcmp(name, "core")) if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
gpu->core_clk = gpu->grp_clks[i]; gpu->core_clk = gpu->grp_clks[i];
else if (!strcmp(name, "rbbmtimer")) else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
gpu->rbbmtimer_clk = gpu->grp_clks[i]; gpu->rbbmtimer_clk = gpu->grp_clks[i];
++i; ++i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment