Commit 8d08c055 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-next-2018-06-11' of git://anongit.freedesktop.org/drm/drm

Pull drm msm updates and misc fixes from Dave Airlie:
 "I looked at Rob's msm tree, he kept it small due to being late, and it
  was in -next for a while before he was ill, so I think it should be
  fine.

  Otherwise this contains a set of i915 fixes and a v3d build fix, and
  vc4 leak fix"

* tag 'drm-next-2018-06-11' of git://anongit.freedesktop.org/drm/drm: (31 commits)
  drm/i915/icl: Don't update enabled dbuf slices struct until updated in hw
  drm/i915/icl: fix icl_unmap/map_plls_to_ports
  drm/i915: Remove bogus NV12 PLANE_COLOR_CTL setup
  drm/msm: Fix NULL deref on bind/probe deferral
  drm/msm: Switch to atomic_helper_commit()
  drm/msm: Remove msm_commit/worker, use atomic helper commit
  drm/msm: Issue queued events when disabling crtc
  drm/msm: Move implicit sync handling to prepare_fb
  drm/msm: Refactor complete_commit() to look more the helpers
  drm/msm: Don't subclass drm_atomic_state anymore
  drm/msm/mdp5: Use the new private_obj state
  drm/msm/mdp5: Add global state as a private atomic object
  drm/msm: use correct aspace pointer in msm_gem_put_iova()
  drm/msm: remove unbalanced mutex unlock
  drm/msm: don't deref error pointer in the msm_fbdev_create error path
  drm/msm/dsi: use correct enum in dsi_get_cmd_fmt
  drm/msm: Fix possible null dereference on failure of get_pages()
  drm/msm: Add modifier to mdp_get_format arguments
  drm/msm: Mark the crtc->state->event consumed
  drm/msm/dsi: implement auto PHY timing calculator for 10nm PHY
  ...
parents f0dc7f9c 33ce21d6
......@@ -2909,6 +2909,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
if (info) {
gvt_err("%s %s duplicated\n", e->info->name,
info->name);
kfree(e);
return -EEXIST;
}
......
......@@ -67,7 +67,7 @@
#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
#define AUX_BURST_SIZE 16
#define AUX_BURST_SIZE 20
/* DPCD addresses */
#define DPCD_REV 0x000
......
......@@ -903,11 +903,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
}
/*
* Write request format: (command + address) occupies
* 3 bytes, followed by (len + 1) bytes of data.
* Write request format: Headr (command + address + size) occupies
* 4 bytes, followed by (len + 1) bytes of data. See details at
* intel_dp_aux_transfer().
*/
if (WARN_ON((len + 4) > AUX_BURST_SIZE))
if ((len + 1 + 4) > AUX_BURST_SIZE) {
gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
return -EINVAL;
}
/* unpack data from vreg to buf */
for (t = 0; t < 4; t++) {
......@@ -971,8 +974,10 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
/*
* Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
*/
if (WARN_ON((len + 2) > AUX_BURST_SIZE))
if ((len + 2) > AUX_BURST_SIZE) {
gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
return -EINVAL;
}
/* read from virtual DPCD to vreg */
/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
......
......@@ -123,6 +123,12 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
return -EINVAL;
}
if (!pfn_valid(pfn)) {
gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
return -EINVAL;
}
/* Setup DMA mapping. */
page = pfn_to_page(pfn);
*dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
......@@ -583,6 +589,17 @@ static int intel_vgpu_open(struct mdev_device *mdev)
return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{
struct eventfd_ctx *trigger;
trigger = vgpu->vdev.msi_trigger;
if (trigger) {
eventfd_ctx_put(trigger);
vgpu->vdev.msi_trigger = NULL;
}
}
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
struct kvmgt_guest_info *info;
......@@ -607,6 +624,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
vgpu->vdev.kvm = NULL;
vgpu->handle = 0;
}
......@@ -987,7 +1006,8 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
return PTR_ERR(trigger);
}
vgpu->vdev.msi_trigger = trigger;
}
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
return 0;
}
......@@ -1592,6 +1612,18 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
/*
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's
* config and mmio register isn't restored to default during guest
* poweroff. If this vgpu is still used in next vm, this vgpu's pipe
* may be enabled, then once this vgpu is active, it will get inject
* vblank interrupt request. But msi_trigger is null until msi is
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
if (vgpu->vdev.msi_trigger == NULL)
return 0;
if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
return 0;
......
......@@ -2972,23 +2972,22 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
struct i915_request *request, *active = NULL;
unsigned long flags;
/* We are called by the error capture and reset at a random
* point in time. In particular, note that neither is crucially
* ordered with an interrupt. After a hang, the GPU is dead and we
* assume that no more writes can happen (we waited long enough for
* all writes that were in transaction to be flushed) - adding an
/*
* We are called by the error capture, reset and to dump engine
* state at random points in time. In particular, note that neither is
* crucially ordered with an interrupt. After a hang, the GPU is dead
* and we assume that no more writes can happen (we waited long enough
* for all writes that were in transaction to be flushed) - adding an
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
spin_lock_irqsave(&engine->timeline.lock, flags);
list_for_each_entry(request, &engine->timeline.requests, link) {
if (__i915_request_completed(request, request->global_seqno))
continue;
GEM_BUG_ON(request->engine != engine);
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&request->fence.flags));
active = request;
break;
}
......
......@@ -2453,12 +2453,13 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc,
for_each_new_connector_in_state(old_state, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
enum port port = encoder->port;
enum port port;
uint32_t val;
if (conn_state->crtc != crtc)
continue;
port = encoder->port;
mutex_lock(&dev_priv->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL);
......@@ -2490,11 +2491,12 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
enum port port = encoder->port;
enum port port;
if (old_conn_state->crtc != crtc)
continue;
port = encoder->port;
mutex_lock(&dev_priv->dpll_lock);
I915_WRITE(DPCLKA_CFGCR0_ICL,
I915_READ(DPCLKA_CFGCR0_ICL) |
......
......@@ -3690,11 +3690,6 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
if (intel_format_is_yuv(fb->format->format)) {
if (fb->format->format == DRM_FORMAT_NV12) {
plane_color_ctl |=
PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
goto out;
}
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
......@@ -3703,7 +3698,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
out:
return plane_color_ctl;
}
......
......@@ -1679,23 +1679,6 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
return bpp;
}
static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
struct drm_display_mode *m2)
{
bool bres = false;
if (m1 && m2)
bres = (m1->hdisplay == m2->hdisplay &&
m1->hsync_start == m2->hsync_start &&
m1->hsync_end == m2->hsync_end &&
m1->htotal == m2->htotal &&
m1->vdisplay == m2->vdisplay &&
m1->vsync_start == m2->vsync_start &&
m1->vsync_end == m2->vsync_end &&
m1->vtotal == m2->vtotal);
return bres;
}
/* Adjust link config limits based on compliance test requests. */
static void
intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
......@@ -1860,16 +1843,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
struct drm_display_mode *panel_mode =
intel_connector->panel.alt_fixed_mode;
struct drm_display_mode *req_mode = &pipe_config->base.mode;
if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
panel_mode = intel_connector->panel.fixed_mode;
drm_mode_debug_printmodeline(panel_mode);
intel_fixed_panel_mode(panel_mode, adjusted_mode);
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
if (INTEL_GEN(dev_priv) >= 9) {
int ret;
......@@ -6159,7 +6134,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *alt_fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
......@@ -6214,14 +6188,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
/* prefer fixed mode from EDID if available, save an alt mode also */
/* prefer fixed mode from EDID if available */
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
intel_connector, fixed_mode);
} else if (!alt_fixed_mode) {
alt_fixed_mode = drm_mode_duplicate(dev, scan);
break;
}
}
......@@ -6258,8 +6231,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pipe_name(pipe));
}
intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode,
downclock_mode);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
......
......@@ -277,7 +277,6 @@ struct intel_encoder {
struct intel_panel {
struct drm_display_mode *fixed_mode;
struct drm_display_mode *alt_fixed_mode;
struct drm_display_mode *downclock_mode;
/* backlight */
......@@ -1850,7 +1849,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
struct drm_display_mode *alt_fixed_mode,
struct drm_display_mode *downclock_mode);
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
......
......@@ -1846,7 +1846,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
connector->display_info.width_mm = fixed_mode->width_mm;
connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL, NULL);
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
intel_dsi_add_properties(intel_connector);
......
......@@ -536,7 +536,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
*/
intel_panel_init(&intel_connector->panel,
intel_dvo_get_current_mode(intel_encoder),
NULL, NULL);
NULL);
intel_dvo->panel_wants_dither = true;
}
......
......@@ -1114,7 +1114,7 @@ static void print_request(struct drm_printer *m,
const char *prefix)
{
const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
char buf[80];
char buf[80] = "";
int x = 0;
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
......
......@@ -1175,8 +1175,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
out:
mutex_unlock(&dev->mode_config.mutex);
intel_panel_init(&intel_connector->panel, fixed_mode, NULL,
downclock_mode);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
......
......@@ -1928,13 +1928,11 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
struct drm_display_mode *alt_fixed_mode,
struct drm_display_mode *downclock_mode)
{
intel_panel_init_backlight_funcs(panel);
panel->fixed_mode = fixed_mode;
panel->alt_fixed_mode = alt_fixed_mode;
panel->downclock_mode = downclock_mode;
return 0;
......@@ -1948,10 +1946,6 @@ void intel_panel_fini(struct intel_panel *panel)
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
if (panel->alt_fixed_mode)
drm_mode_destroy(intel_connector->base.dev,
panel->alt_fixed_mode);
if (panel->downclock_mode)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
......
......@@ -5150,7 +5150,6 @@ skl_copy_ddb_for_pipe(struct skl_ddb_values *dst,
sizeof(dst->ddb.uv_plane[pipe]));
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
sizeof(dst->ddb.plane[pipe]));
dst->ddb.enabled_slices = src->ddb.enabled_slices;
}
static void
......
......@@ -98,21 +98,6 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
struct msm_kms *kms = &mdp4_kms->base.base;
struct drm_framebuffer *fb = new_state->fb;
if (!fb)
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
return msm_framebuffer_prepare(fb, kms->aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
......@@ -152,7 +137,7 @@ static void mdp4_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
.prepare_fb = mdp4_plane_prepare_fb,
.prepare_fb = msm_atomic_prepare_fb,
.cleanup_fb = mdp4_plane_cleanup_fb,
.atomic_check = mdp4_plane_atomic_check,
.atomic_update = mdp4_plane_atomic_update,
......
......@@ -430,6 +430,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct device *dev = &mdp5_kms->pdev->dev;
unsigned long flags;
DBG("%s", crtc->name);
......@@ -445,6 +446,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
pm_runtime_put_sync(dev);
if (crtc->state->event && !crtc->state->active) {
WARN_ON(mdp5_crtc->event);
spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
}
mdp5_crtc->enabled = false;
}
......
......@@ -70,60 +70,110 @@ static int mdp5_hw_init(struct msm_kms *kms)
return 0;
}
struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
/* Global/shared object state funcs */
/*
* This is a helper that returns the private state currently in operation.
* Note that this would return the "old_state" if called in the atomic check
* path, and the "new_state" after the atomic swap has been done.
*/
struct mdp5_global_state *
mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
{
return to_mdp5_global_state(mdp5_kms->glob_state.state);
}
/*
* This acquires the modeset lock set aside for global state, creates
* a new duplicated private object state.
*/
struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct msm_kms_state *state = to_kms_state(s);
struct mdp5_state *new_state;
struct drm_private_state *priv_state;
int ret;
if (state->state)
return state->state;
ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
if (ret)
return ERR_PTR(ret);
new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
if (!new_state)
return ERR_PTR(-ENOMEM);
priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
/* Copy state: */
new_state->hwpipe = mdp5_kms->state->hwpipe;
new_state->hwmixer = mdp5_kms->state->hwmixer;
if (mdp5_kms->smp)
new_state->smp = mdp5_kms->state->smp;
return to_mdp5_global_state(priv_state);
}
static struct drm_private_state *
mdp5_global_duplicate_state(struct drm_private_obj *obj)
{
struct mdp5_global_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
state->state = new_state;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return new_state;
return &state->base;
}
static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
static void mdp5_global_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
swap(to_kms_state(state)->state, mdp5_kms->state);
struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
kfree(mdp5_state);
}
static const struct drm_private_state_funcs mdp5_global_state_funcs = {
.atomic_duplicate_state = mdp5_global_duplicate_state,
.atomic_destroy_state = mdp5_global_destroy_state,
};
static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
{
struct mdp5_global_state *state;
drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->mdp5_kms = mdp5_kms;
drm_atomic_private_obj_init(&mdp5_kms->glob_state,
&state->base,
&mdp5_global_state_funcs);
return 0;
}
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
struct mdp5_global_state *global_state;
global_state = mdp5_get_existing_global_state(mdp5_kms);
pm_runtime_get_sync(dev);
if (mdp5_kms->smp)
mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
}
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
struct mdp5_global_state *global_state;
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
pm_runtime_put_sync(dev);
}
......@@ -229,7 +279,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
.swap_state = mdp5_swap_state,
.prepare_commit = mdp5_prepare_commit,
.complete_commit = mdp5_complete_commit,
.wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
......@@ -727,7 +776,8 @@ static void mdp5_destroy(struct platform_device *pdev)
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
kfree(mdp5_kms->state);
drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
}
static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
......@@ -880,12 +930,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
mdp5_kms->dev = dev;
mdp5_kms->pdev = pdev;
drm_modeset_lock_init(&mdp5_kms->state_lock);
mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
if (!mdp5_kms->state) {
ret = -ENOMEM;
ret = mdp5_global_obj_init(mdp5_kms);
if (ret)
goto fail;
}
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
......
......@@ -28,8 +28,6 @@
#include "mdp5_ctl.h"
#include "mdp5_smp.h"
struct mdp5_state;
struct mdp5_kms {
struct mdp_kms base;
......@@ -49,11 +47,12 @@ struct mdp5_kms {
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
/**
* Global atomic state. Do not access directly, use mdp5_get_state()
/*
* Global private object state, Do not access directly, use
* mdp5_global_get_state()
*/
struct mdp5_state *state;
struct drm_modeset_lock state_lock;
struct drm_modeset_lock glob_state_lock;
struct drm_private_obj glob_state;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
......@@ -81,19 +80,23 @@ struct mdp5_kms {
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
/* Global atomic state for tracking resources that are shared across
/* Global private object state for tracking resources that are shared across
* multiple kms objects (planes/crtcs/etc).
*
* For atomic updates which require modifying global state,
*/
struct mdp5_state {
#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base)
struct mdp5_global_state {
struct drm_private_state base;
struct drm_atomic_state *state;
struct mdp5_kms *mdp5_kms;
struct mdp5_hw_pipe_state hwpipe;
struct mdp5_hw_mixer_state hwmixer;
struct mdp5_smp_state smp;
};
struct mdp5_state *__must_check
mdp5_get_state(struct drm_atomic_state *s);
struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms);
struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s);
/* Atomic plane state. Subclasses the base drm_plane_state in order to
* track assigned hwpipe and hw specific state.
......
......@@ -52,14 +52,14 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_state *state = mdp5_get_state(s);
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
struct mdp5_hw_mixer_state *new_state;
int i;
if (IS_ERR(state))
return PTR_ERR(state);
if (IS_ERR(global_state))
return PTR_ERR(global_state);
new_state = &state->hwmixer;
new_state = &global_state->hwmixer;
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
......@@ -129,8 +129,8 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
struct mdp5_state *state = mdp5_get_state(s);
struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
if (!mixer)
return;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment