Commit 955289c7 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

Now that we have the bits needed for mdp5 atomic, here is the followup
pull request I mentioned.  Main highlights are:

1) mdp5 multiple crtc and public plane support (no more hard-coded mixer setup!)
2) mdp5 atomic conversion
3) couple atomic helper fixes for issues found during mdp5 atomic
debug (reviewed by danvet.. but he didn't plane to send an
atomic-fixes pull request so I agreed to tack them on to mine)

* 'msm-next' of git://people.freedesktop.org/~robclark/linux:
  drm/atomic: shutdown *current* encoder
  drm/atomic: check mode_changed *after* atomic_check
  drm/msm/mdp4: fix mixer setup for multi-crtc + planes
  drm/msm/mdp5: dpms(OFF) cleanups
  drm/msm/mdp5: atomic
  drm/msm: atomic fixes
  drm/msm/mdp5: remove global mdp5_ctl_mgr
  drm/msm/mdp5: don't use void * for opaque types
  drm/msm: add multiple CRTC and overlay support
  drm/msm/mdp5: set rate before enabling clk
  drm/msm/mdp5: introduce mdp5_cfg module
  drm/msm/mdp5: make SMP module dynamically configurable
  drm/msm/hdmi: remove useless kref
  drm/msm/mdp5: get the core clock rate from MDP5 config
  drm/msm/mdp5: use irqdomains
parents ed1e8777 46df9adb
...@@ -331,7 +331,7 @@ mode_fixup(struct drm_atomic_state *state) ...@@ -331,7 +331,7 @@ mode_fixup(struct drm_atomic_state *state)
} }
static int static int
drm_atomic_helper_check_prepare(struct drm_device *dev, drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
int ncrtcs = dev->mode_config.num_crtc; int ncrtcs = dev->mode_config.num_crtc;
...@@ -428,10 +428,6 @@ int drm_atomic_helper_check(struct drm_device *dev, ...@@ -428,10 +428,6 @@ int drm_atomic_helper_check(struct drm_device *dev,
int ncrtcs = dev->mode_config.num_crtc; int ncrtcs = dev->mode_config.num_crtc;
int i, ret = 0; int i, ret = 0;
ret = drm_atomic_helper_check_prepare(dev, state);
if (ret)
return ret;
for (i = 0; i < nplanes; i++) { for (i = 0; i < nplanes; i++) {
struct drm_plane_helper_funcs *funcs; struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane = state->planes[i]; struct drm_plane *plane = state->planes[i];
...@@ -475,6 +471,10 @@ int drm_atomic_helper_check(struct drm_device *dev, ...@@ -475,6 +471,10 @@ int drm_atomic_helper_check(struct drm_device *dev,
} }
} }
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
return ret; return ret;
} }
EXPORT_SYMBOL(drm_atomic_helper_check); EXPORT_SYMBOL(drm_atomic_helper_check);
...@@ -499,9 +499,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) ...@@ -499,9 +499,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state || !old_conn_state->crtc) if (!old_conn_state || !old_conn_state->crtc)
continue; continue;
encoder = connector->state->best_encoder; encoder = old_conn_state->best_encoder;
if (!encoder) /* We shouldn't get this far if we didn't previously have
* an encoder.. but WARN_ON() rather than explode.
*/
if (WARN_ON(!encoder))
continue; continue;
funcs = encoder->helper_private; funcs = encoder->helper_private;
......
...@@ -25,6 +25,8 @@ msm-y := \ ...@@ -25,6 +25,8 @@ msm-y := \
mdp/mdp4/mdp4_irq.o \ mdp/mdp4/mdp4_irq.o \
mdp/mdp4/mdp4_kms.o \ mdp/mdp4/mdp4_kms.o \
mdp/mdp4/mdp4_plane.o \ mdp/mdp4/mdp4_plane.o \
mdp/mdp5/mdp5_cfg.o \
mdp/mdp5/mdp5_ctl.o \
mdp/mdp5/mdp5_crtc.o \ mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \ mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \ mdp/mdp5/mdp5_irq.o \
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>. * this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/of_irq.h>
#include "hdmi.h" #include "hdmi.h"
void hdmi_set_mode(struct hdmi *hdmi, bool power_on) void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
...@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) ...@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
power_on ? "Enable" : "Disable", ctrl); power_on ? "Enable" : "Disable", ctrl);
} }
irqreturn_t hdmi_irq(int irq, void *dev_id) static irqreturn_t hdmi_irq(int irq, void *dev_id)
{ {
struct hdmi *hdmi = dev_id; struct hdmi *hdmi = dev_id;
...@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id) ...@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
void hdmi_destroy(struct kref *kref) static void hdmi_destroy(struct hdmi *hdmi)
{ {
struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
struct hdmi_phy *phy = hdmi->phy; struct hdmi_phy *phy = hdmi->phy;
if (phy) if (phy)
...@@ -84,8 +84,6 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) ...@@ -84,8 +84,6 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail; goto fail;
} }
kref_init(&hdmi->refcount);
hdmi->pdev = pdev; hdmi->pdev = pdev;
hdmi->config = config; hdmi->config = config;
...@@ -182,7 +180,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) ...@@ -182,7 +180,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
fail: fail:
if (hdmi) if (hdmi)
hdmi_destroy(&hdmi->refcount); hdmi_destroy(hdmi);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -200,7 +198,6 @@ int hdmi_modeset_init(struct hdmi *hdmi, ...@@ -200,7 +198,6 @@ int hdmi_modeset_init(struct hdmi *hdmi,
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = hdmi->pdev; struct platform_device *pdev = hdmi->pdev;
struct hdmi_platform_config *config = pdev->dev.platform_data;
int ret; int ret;
hdmi->dev = dev; hdmi->dev = dev;
...@@ -224,23 +221,21 @@ int hdmi_modeset_init(struct hdmi *hdmi, ...@@ -224,23 +221,21 @@ int hdmi_modeset_init(struct hdmi *hdmi,
goto fail; goto fail;
} }
if (!config->shared_irq) { hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
hdmi->irq = platform_get_irq(pdev, 0);
if (hdmi->irq < 0) { if (hdmi->irq < 0) {
ret = hdmi->irq; ret = hdmi->irq;
dev_err(dev->dev, "failed to get irq: %d\n", ret); dev_err(dev->dev, "failed to get irq: %d\n", ret);
goto fail; goto fail;
} }
ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, ret = devm_request_irq(&pdev->dev, hdmi->irq,
NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"hdmi_isr", hdmi); "hdmi_isr", hdmi);
if (ret < 0) { if (ret < 0) {
dev_err(dev->dev, "failed to request IRQ%u: %d\n", dev_err(dev->dev, "failed to request IRQ%u: %d\n",
hdmi->irq, ret); hdmi->irq, ret);
goto fail; goto fail;
} }
}
encoder->bridge = hdmi->bridge; encoder->bridge = hdmi->bridge;
...@@ -271,12 +266,6 @@ int hdmi_modeset_init(struct hdmi *hdmi, ...@@ -271,12 +266,6 @@ int hdmi_modeset_init(struct hdmi *hdmi,
#include <linux/of_gpio.h> #include <linux/of_gpio.h>
static void set_hdmi(struct drm_device *dev, struct hdmi *hdmi)
{
struct msm_drm_private *priv = dev->dev_private;
priv->hdmi = hdmi;
}
#ifdef CONFIG_OF #ifdef CONFIG_OF
static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
{ {
...@@ -297,6 +286,8 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char ...@@ -297,6 +286,8 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
static int hdmi_bind(struct device *dev, struct device *master, void *data) static int hdmi_bind(struct device *dev, struct device *master, void *data)
{ {
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
static struct hdmi_platform_config config = {}; static struct hdmi_platform_config config = {};
struct hdmi *hdmi; struct hdmi *hdmi;
#ifdef CONFIG_OF #ifdef CONFIG_OF
...@@ -318,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) ...@@ -318,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.pwr_clk_names = pwr_clk_names; config.pwr_clk_names = pwr_clk_names;
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
config.shared_irq = true;
} else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
...@@ -392,14 +382,19 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) ...@@ -392,14 +382,19 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi = hdmi_init(to_platform_device(dev)); hdmi = hdmi_init(to_platform_device(dev));
if (IS_ERR(hdmi)) if (IS_ERR(hdmi))
return PTR_ERR(hdmi); return PTR_ERR(hdmi);
set_hdmi(dev_get_drvdata(master), hdmi); priv->hdmi = hdmi;
return 0; return 0;
} }
static void hdmi_unbind(struct device *dev, struct device *master, static void hdmi_unbind(struct device *dev, struct device *master,
void *data) void *data)
{ {
set_hdmi(dev_get_drvdata(master), NULL); struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
if (priv->hdmi) {
hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
} }
static const struct component_ops hdmi_ops = { static const struct component_ops hdmi_ops = {
......
...@@ -38,8 +38,6 @@ struct hdmi_audio { ...@@ -38,8 +38,6 @@ struct hdmi_audio {
}; };
struct hdmi { struct hdmi {
struct kref refcount;
struct drm_device *dev; struct drm_device *dev;
struct platform_device *pdev; struct platform_device *pdev;
...@@ -97,13 +95,9 @@ struct hdmi_platform_config { ...@@ -97,13 +95,9 @@ struct hdmi_platform_config {
/* gpio's: */ /* gpio's: */
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
int mux_lpm_gpio; int mux_lpm_gpio;
/* older devices had their own irq, mdp5+ it is shared w/ mdp: */
bool shared_irq;
}; };
void hdmi_set_mode(struct hdmi *hdmi, bool power_on); void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
void hdmi_destroy(struct kref *kref);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
{ {
...@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) ...@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
return msm_readl(hdmi->mmio + reg); return msm_readl(hdmi->mmio + reg);
} }
static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
{
kref_get(&hdmi->refcount);
return hdmi;
}
static inline void hdmi_unreference(struct hdmi *hdmi)
{
kref_put(&hdmi->refcount, hdmi_destroy);
}
/* /*
* The phy appears to be different, for example between 8960 and 8x60, * The phy appears to be different, for example between 8960 and 8x60,
* so split the phy related functions out and load the correct one at * so split the phy related functions out and load the correct one at
......
...@@ -26,7 +26,6 @@ struct hdmi_bridge { ...@@ -26,7 +26,6 @@ struct hdmi_bridge {
static void hdmi_bridge_destroy(struct drm_bridge *bridge) static void hdmi_bridge_destroy(struct drm_bridge *bridge)
{ {
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
hdmi_unreference(hdmi_bridge->hdmi);
drm_bridge_cleanup(bridge); drm_bridge_cleanup(bridge);
kfree(hdmi_bridge); kfree(hdmi_bridge);
} }
...@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) ...@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
goto fail; goto fail;
} }
hdmi_bridge->hdmi = hdmi_reference(hdmi); hdmi_bridge->hdmi = hdmi;
bridge = &hdmi_bridge->base; bridge = &hdmi_bridge->base;
......
...@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector) ...@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_unregister(connector); drm_connector_unregister(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
hdmi_unreference(hdmi_connector->hdmi);
kfree(hdmi_connector); kfree(hdmi_connector);
} }
...@@ -425,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) ...@@ -425,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
goto fail; goto fail;
} }
hdmi_connector->hdmi = hdmi_reference(hdmi); hdmi_connector->hdmi = hdmi;
INIT_WORK(&hdmi_connector->hpd_work, hotplug_work); INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
connector = &hdmi_connector->base; connector = &hdmi_connector->base;
......
...@@ -167,18 +167,8 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc, ...@@ -167,18 +167,8 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
return true; return true;
} }
static void blend_setup(struct drm_crtc *crtc) /* statically (for now) map planes to mixer stage (z-order): */
{ static const int idxs[] = {
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_plane *plane;
int i, ovlp = mdp4_crtc->ovlp;
uint32_t mixer_cfg = 0;
static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
[VG1] = 1, [VG1] = 1,
[VG2] = 2, [VG2] = 2,
[RGB1] = 0, [RGB1] = 0,
...@@ -187,14 +177,44 @@ static void blend_setup(struct drm_crtc *crtc) ...@@ -187,14 +177,44 @@ static void blend_setup(struct drm_crtc *crtc)
[VG3] = 3, [VG3] = 3,
[VG4] = 4, [VG4] = 4,
}; };
bool alpha[4]= { false, false, false, false };
/* Don't rely on value read back from hw, but instead use our /* setup mixer config, for which we need to consider all crtc's and
* own shadowed value. Possibly disable/reenable looses the * the planes attached to them
* previous value and goes back to power-on default? *
* TODO may possibly need some extra locking here
*/ */
mixer_cfg = mdp4_kms->mixer_cfg; static void setup_mixer(struct mdp4_kms *mdp4_kms)
{
struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
struct drm_crtc *crtc;
uint32_t mixer_cfg = 0;
static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
list_for_each_entry(crtc, &config->crtc_list, head) {
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane;
for_each_plane_on_crtc(crtc, plane) {
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id];
mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
pipe_id, stages[idx]);
}
}
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
}
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_plane *plane;
int i, ovlp = mdp4_crtc->ovlp;
bool alpha[4]= { false, false, false, false };
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
...@@ -209,13 +229,8 @@ static void blend_setup(struct drm_crtc *crtc) ...@@ -209,13 +229,8 @@ static void blend_setup(struct drm_crtc *crtc)
to_mdp_format(msm_framebuffer_format(plane->fb)); to_mdp_format(msm_framebuffer_format(plane->fb));
alpha[idx-1] = format->alpha_enable; alpha[idx-1] = format->alpha_enable;
} }
mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
pipe_id, stages[idx]);
} }
/* this shouldn't happen.. and seems to cause underflow: */
WARN_ON(!mixer_cfg);
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
uint32_t op; uint32_t op;
...@@ -238,8 +253,7 @@ static void blend_setup(struct drm_crtc *crtc) ...@@ -238,8 +253,7 @@ static void blend_setup(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
} }
mdp4_kms->mixer_cfg = mixer_cfg; setup_mixer(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
} }
static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
......
...@@ -32,13 +32,6 @@ struct mdp4_kms { ...@@ -32,13 +32,6 @@ struct mdp4_kms {
int rev; int rev;
/* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
* crtcs/encoders is in one shared register, we need to update it
* via read/modify/write. But to avoid getting confused by power-
* on-default values after resume, use this shadow value instead:
*/
uint32_t mixer_cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */ /* mapper-id used to request GEM buffer mapped for scanout: */
int id; int id;
......
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mdp5_kms.h"
#include "mdp5_cfg.h"
struct mdp5_cfg_handler {
int revision;
struct mdp5_cfg config;
};
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
const struct mdp5_cfg_hw msm8x74_config = {
.name = "msm8x74",
.smp = {
.mmb_count = 22,
.mmb_size = 4096,
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
},
.pipe_vig = {
.count = 3,
.base = { 0x01200, 0x01600, 0x01a00 },
},
.pipe_rgb = {
.count = 3,
.base = { 0x01e00, 0x02200, 0x02600 },
},
.pipe_dma = {
.count = 2,
.base = { 0x02a00, 0x02e00 },
},
.lm = {
.count = 5,
.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
.nb_stages = 5,
},
.dspp = {
.count = 3,
.base = { 0x04600, 0x04a00, 0x04e00 },
},
.ad = {
.count = 2,
.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
},
.intf = {
.count = 4,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
},
.max_clk = 200000000,
};
const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
.smp = {
.mmb_count = 44,
.mmb_size = 8192,
.reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
.reserved[CID_RGB0] = 2,
.reserved[CID_RGB1] = 2,
.reserved[CID_RGB2] = 2,
.reserved[CID_RGB3] = 2,
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
},
.pipe_vig = {
.count = 4,
.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
},
.pipe_rgb = {
.count = 4,
.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
},
.pipe_dma = {
.count = 2,
.base = { 0x03200, 0x03600 },
},
.lm = {
.count = 6,
.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
.nb_stages = 5,
},
.dspp = {
.count = 4,
.base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
},
.ad = {
.count = 3,
.base = { 0x13500, 0x13700, 0x13900 },
},
.intf = {
.count = 5,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
},
.max_clk = 320000000,
};
static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74_config } },
{ .revision = 2, .config = { .hw = &msm8x74_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
};
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
{
return cfg_handler->config.hw;
}
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
{
return &cfg_handler->config;
}
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
{
return cfg_handler->revision;
}
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
{
kfree(cfg_handler);
}
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
struct platform_device *pdev = dev->platformdev;
struct mdp5_cfg_handler *cfg_handler;
struct mdp5_cfg_platform *pconfig;
int i, ret = 0;
cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
if (unlikely(!cfg_handler)) {
ret = -ENOMEM;
goto fail;
}
if (major != 1) {
dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
}
/* only after mdp5_cfg global pointer's init can we access the hw */
for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
if (cfg_handlers[i].revision != minor)
continue;
mdp5_cfg = cfg_handlers[i].config.hw;
break;
}
if (unlikely(!mdp5_cfg)) {
dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
}
cfg_handler->revision = minor;
cfg_handler->config.hw = mdp5_cfg;
pconfig = mdp5_get_config(pdev);
memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
DBG("MDP5: %s hw config selected", mdp5_cfg->name);
return cfg_handler;
fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
return NULL;
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
{
static struct mdp5_cfg_platform config = {};
#ifdef CONFIG_OF
/* TODO */
#endif
config.iommu = iommu_domain_alloc(&platform_bus_type);
return &config;
}
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MDP5_CFG_H__
#define __MDP5_CFG_H__
#include "msm_drv.h"
/*
* mdp5_cfg
*
* This module configures the dynamic offsets used by mdp5.xml.h
* (initialized in mdp5_cfg.c)
*/
extern const struct mdp5_cfg_hw *mdp5_cfg;
#define MAX_CTL 8
#define MAX_BASES 8
#define MAX_SMP_BLOCKS 44
#define MAX_CLIENTS 32
typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
#define MDP5_SUB_BLOCK_DEFINITION \
int count; \
uint32_t base[MAX_BASES]
struct mdp5_sub_block {
MDP5_SUB_BLOCK_DEFINITION;
};
struct mdp5_lm_block {
MDP5_SUB_BLOCK_DEFINITION;
uint32_t nb_stages; /* number of stages per blender */
};
struct mdp5_smp_block {
int mmb_count; /* number of SMP MMBs */
int mmb_size; /* MMB: size in bytes */
mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
};
struct mdp5_cfg_hw {
char *name;
struct mdp5_smp_block smp;
struct mdp5_sub_block ctl;
struct mdp5_sub_block pipe_vig;
struct mdp5_sub_block pipe_rgb;
struct mdp5_sub_block pipe_dma;
struct mdp5_lm_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
struct mdp5_sub_block intf;
uint32_t max_clk;
};
/* platform config data (ie. from DT, or pdata) */
struct mdp5_cfg_platform {
struct iommu_domain *iommu;
};
struct mdp5_cfg {
const struct mdp5_cfg_hw *hw;
struct mdp5_cfg_platform platform;
};
struct mdp5_kms;
struct mdp5_cfg_handler;
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
#endif /* __MDP5_CFG_H__ */
/* /*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat * Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com> * Author: Rob Clark <robdclark@gmail.com>
* *
...@@ -17,41 +18,35 @@ ...@@ -17,41 +18,35 @@
#include "mdp5_kms.h" #include "mdp5_kms.h"
#include <linux/sort.h>
#include <drm/drm_mode.h> #include <drm/drm_mode.h>
#include "drm_crtc.h" #include "drm_crtc.h"
#include "drm_crtc_helper.h" #include "drm_crtc_helper.h"
#include "drm_flip_work.h" #include "drm_flip_work.h"
#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
struct mdp5_crtc { struct mdp5_crtc {
struct drm_crtc base; struct drm_crtc base;
char name[8]; char name[8];
int id; int id;
bool enabled; bool enabled;
/* which mixer/encoder we route output to: */ /* layer mixer used for this CRTC (+ its lock): */
int mixer; #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
int lm;
spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
/* CTL used for this CRTC: */
struct mdp5_ctl *ctl;
/* if there is a pending flip, these will be non-null: */ /* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1 #define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2 #define PENDING_FLIP 0x2
atomic_t pending; atomic_t pending;
/* the fb that we logically (from PoV of KMS API) hold a ref
* to. Which we may not yet be scanning out (we may still
* be scanning out previous in case of page_flip while waiting
* for gpu rendering to complete:
*/
struct drm_framebuffer *fb;
/* the fb that we currently hold a scanout ref to: */
struct drm_framebuffer *scanout_fb;
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
struct mdp_irq vblank; struct mdp_irq vblank;
struct mdp_irq err; struct mdp_irq err;
}; };
...@@ -71,66 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) ...@@ -71,66 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
} }
static void crtc_flush(struct drm_crtc *crtc) #define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
int id = mdp5_crtc->id;
struct drm_plane *plane;
uint32_t flush = 0;
for_each_plane_on_crtc(crtc, plane) {
enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
flush |= pipe2flush(pipe);
}
flush |= mixer2flush(mdp5_crtc->id);
flush |= MDP5_CTL_FLUSH_CTL;
DBG("%s: flush=%08x", mdp5_crtc->name, flush);
mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
}
static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_framebuffer *old_fb = mdp5_crtc->fb;
/* grab reference to incoming scanout fb: */ DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
drm_framebuffer_reference(new_fb); mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
mdp5_crtc->base.primary->fb = new_fb;
mdp5_crtc->fb = new_fb;
if (old_fb)
drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
} }
/* unlike update_fb(), take a ref to the new scanout fb *before* updating /*
* plane, then call this. Needed to ensure we don't unref the buffer that * flush updates, to make sure hw is updated to new scanout fb,
* is actually still being scanned out.
*
* Note that this whole thing goes away with atomic.. since we can defer
* calling into driver until rendering is done.
*/
static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
/* flush updates, to make sure hw is updated to new scanout fb,
* so that we can safely queue unref to current fb (ie. next * so that we can safely queue unref to current fb (ie. next
* vblank we know hw is done w/ previous scanout_fb). * vblank we know hw is done w/ previous scanout_fb).
*/ */
crtc_flush(crtc); static void crtc_flush_all(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_plane *plane;
uint32_t flush_mask = 0;
if (mdp5_crtc->scanout_fb) /* we could have already released CTL in the disable path: */
drm_flip_work_queue(&mdp5_crtc->unref_fb_work, if (!mdp5_crtc->ctl)
mdp5_crtc->scanout_fb); return;
mdp5_crtc->scanout_fb = fb; for_each_plane_on_crtc(crtc, plane) {
flush_mask |= mdp5_plane_get_flush(plane);
}
flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
/* enable vblank to complete flip: */ crtc_flush(crtc, flush_mask);
request_pending(crtc, PENDING_FLIP);
} }
/* if file!=NULL, this is preclose potential cancel-flip path */ /* if file!=NULL, this is preclose potential cancel-flip path */
...@@ -151,6 +118,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) ...@@ -151,6 +118,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/ */
if (!file || (event->base.file_priv == file)) { if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL; mdp5_crtc->event = NULL;
DBG("%s: send event: %p", mdp5_crtc->name, event);
drm_send_vblank_event(dev, mdp5_crtc->id, event); drm_send_vblank_event(dev, mdp5_crtc->id, event);
} }
} }
...@@ -160,38 +128,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) ...@@ -160,38 +128,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
mdp5_plane_complete_flip(plane); mdp5_plane_complete_flip(plane);
} }
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp5_crtc *mdp5_crtc =
container_of(cb, struct mdp5_crtc, pageflip_cb);
struct drm_crtc *crtc = &mdp5_crtc->base;
struct drm_framebuffer *fb = mdp5_crtc->fb;
if (!fb)
return;
drm_framebuffer_reference(fb);
mdp5_plane_set_scanout(crtc->primary, fb);
update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
{
struct mdp5_crtc *mdp5_crtc =
container_of(work, struct mdp5_crtc, unref_fb_work);
struct drm_device *dev = mdp5_crtc->base.dev;
mutex_lock(&dev->mode_config.mutex);
drm_framebuffer_unreference(val);
mutex_unlock(&dev->mode_config.mutex);
}
static void mdp5_crtc_destroy(struct drm_crtc *crtc) static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
drm_crtc_cleanup(crtc); drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
kfree(mdp5_crtc); kfree(mdp5_crtc);
} }
...@@ -209,6 +150,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode) ...@@ -209,6 +150,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
mdp5_enable(mdp5_kms); mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
} else { } else {
/* set STAGE_UNUSED for all layers */
mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
mdp5_disable(mdp5_kms); mdp5_disable(mdp5_kms);
} }
...@@ -223,54 +166,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc, ...@@ -223,54 +166,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
return true; return true;
} }
/*
* blend_setup() - blend all the planes of a CRTC
*
* When border is enabled, the border color will ALWAYS be the base layer.
* Therefore, the first plane (private RGB pipe) will start at STAGE0.
* If disabled, the first plane starts at STAGE_BASE.
*
* Note:
* Border is not enabled here because the private plane is exactly
* the CRTC resolution.
*/
static void blend_setup(struct drm_crtc *crtc) static void blend_setup(struct drm_crtc *crtc)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc);
int id = mdp5_crtc->id; struct drm_plane *plane;
const struct mdp5_cfg_hw *hw_cfg;
uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
unsigned long flags;
#define blender(stage) ((stage) - STAGE_BASE)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
/* ctl could be released already when we are shutting down: */
if (!mdp5_crtc->ctl)
goto out;
for_each_plane_on_crtc(crtc, plane) {
enum mdp_mixer_stage_id stage =
to_mdp5_plane_state(plane->state)->stage;
/* /*
* Hard-coded setup for now until I figure out how the * Note: This cannot happen with current implementation but
* layer-mixer works * we need to check this condition once z property is added
*/ */
BUG_ON(stage > hw_cfg->lm.nb_stages);
/* LM[id]: */ /* LM */
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), mdp5_write(mdp5_kms,
MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) | MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA); mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff); blender(stage)), 0xff);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00); mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
blender(stage)), 0x00);
/* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but /* CTL */
* we want to be setting CTL[m].LAYER[n]. Not sure what the blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
* point of having CTL[m].LAYER[o] (for o!=n).. maybe that is DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
* used when chaining up mixers for high resolution displays? pipe2name(mdp5_plane_pipe(plane)), stage);
*/ }
/* CTL[id]: */ DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
MDP5_CTL_LAYER_REG_BORDER_COLOR); out:
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
} }
static int mdp5_crtc_mode_set(struct drm_crtc *crtc, static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc);
int ret; unsigned long flags;
struct drm_display_mode *mode;
mode = adjusted_mode; if (WARN_ON(!crtc->state))
return;
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mdp5_crtc->name, mode->base.id, mode->name, mdp5_crtc->name, mode->base.id, mode->name,
...@@ -281,28 +248,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc, ...@@ -281,28 +248,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal, mode->vsync_end, mode->vtotal,
mode->type, mode->flags); mode->type, mode->flags);
/* grab extra ref for update_scanout() */ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
drm_framebuffer_reference(crtc->primary->fb); mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
ret = mdp5_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
drm_framebuffer_unreference(crtc->primary->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp5_crtc->name, ret);
return ret;
}
mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
update_fb(crtc, crtc->primary->fb);
update_scanout(crtc, crtc->primary->fb);
return 0;
} }
static void mdp5_crtc_prepare(struct drm_crtc *crtc) static void mdp5_crtc_prepare(struct drm_crtc *crtc)
...@@ -316,65 +266,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc) ...@@ -316,65 +266,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
static void mdp5_crtc_commit(struct drm_crtc *crtc) static void mdp5_crtc_commit(struct drm_crtc *crtc)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
DBG("%s", mdp5_crtc->name);
mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
crtc_flush(crtc); crtc_flush_all(crtc);
/* drop the ref to mdp clk's that we got in prepare: */ /* drop the ref to mdp clk's that we got in prepare: */
mdp5_disable(get_kms(crtc)); mdp5_disable(get_kms(crtc));
} }
static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
struct drm_framebuffer *old_fb) {
}
struct plane_state {
struct drm_plane *plane;
struct mdp5_plane_state *state;
};
static int pstate_cmp(const void *a, const void *b)
{
struct plane_state *pa = (struct plane_state *)a;
struct plane_state *pb = (struct plane_state *)b;
return pa->state->zpos - pb->state->zpos;
}
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{ {
struct drm_plane *plane = crtc->primary; struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_display_mode *mode = &crtc->mode; struct mdp5_kms *mdp5_kms = get_kms(crtc);
int ret; struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
/* grab extra ref for update_scanout() */ struct plane_state pstates[STAGE3 + 1];
drm_framebuffer_reference(crtc->primary->fb); int cnt = 0, i;
ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb, DBG("%s: check", mdp5_crtc->name);
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16, if (mdp5_crtc->event) {
mode->hdisplay << 16, mode->vdisplay << 16); dev_err(dev->dev, "already pending flip!\n");
if (ret) { return -EBUSY;
drm_framebuffer_unreference(crtc->primary->fb); }
return ret;
/* request a free CTL, if none is already allocated for this CRTC */
if (state->enable && !mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
if (WARN_ON(!mdp5_crtc->ctl))
return -EINVAL;
}
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
for_each_pending_plane_on_crtc(state->state, crtc, plane) {
struct drm_plane_state *pstate;
if (cnt >= ARRAY_SIZE(pstates)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
pstate = state->state->plane_states[drm_plane_index(plane)];
/* plane might not have changed, in which case take
* current state:
*/
if (!pstate)
pstate = plane->state;
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
cnt++;
} }
update_fb(crtc, crtc->primary->fb); sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
update_scanout(crtc, crtc->primary->fb);
for (i = 0; i < cnt; i++) {
pstates[i].state->stage = STAGE_BASE + i;
DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
pipe2name(mdp5_plane_pipe(pstates[i].plane)),
pstates[i].state->stage);
}
return 0; return 0;
} }
static void mdp5_crtc_load_lut(struct drm_crtc *crtc) static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
DBG("%s: begin", mdp5_crtc->name);
} }
static int mdp5_crtc_page_flip(struct drm_crtc *crtc, static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_framebuffer *new_fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_gem_object *obj;
unsigned long flags; unsigned long flags;
if (mdp5_crtc->event) { DBG("%s: flush", mdp5_crtc->name);
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
obj = msm_framebuffer_bo(new_fb, 0); WARN_ON(mdp5_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
mdp5_crtc->event = event; mdp5_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
update_fb(crtc, new_fb); blend_setup(crtc);
crtc_flush_all(crtc);
request_pending(crtc, PENDING_FLIP);
return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb); if (mdp5_crtc->ctl && !crtc->state->enable) {
mdp5_ctl_release(mdp5_crtc->ctl);
mdp5_crtc->ctl = NULL;
}
} }
static int mdp5_crtc_set_property(struct drm_crtc *crtc, static int mdp5_crtc_set_property(struct drm_crtc *crtc,
...@@ -385,27 +389,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc, ...@@ -385,27 +389,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
} }
static const struct drm_crtc_funcs mdp5_crtc_funcs = { static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_crtc_helper_set_config, .set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy, .destroy = mdp5_crtc_destroy,
.page_flip = mdp5_crtc_page_flip, .page_flip = drm_atomic_helper_page_flip,
.set_property = mdp5_crtc_set_property, .set_property = mdp5_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
}; };
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.dpms = mdp5_crtc_dpms, .dpms = mdp5_crtc_dpms,
.mode_fixup = mdp5_crtc_mode_fixup, .mode_fixup = mdp5_crtc_mode_fixup,
.mode_set = mdp5_crtc_mode_set, .mode_set_nofb = mdp5_crtc_mode_set_nofb,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp5_crtc_prepare, .prepare = mdp5_crtc_prepare,
.commit = mdp5_crtc_commit, .commit = mdp5_crtc_commit,
.mode_set_base = mdp5_crtc_mode_set_base,
.load_lut = mdp5_crtc_load_lut, .load_lut = mdp5_crtc_load_lut,
.atomic_check = mdp5_crtc_atomic_check,
.atomic_begin = mdp5_crtc_atomic_begin,
.atomic_flush = mdp5_crtc_atomic_flush,
}; };
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{ {
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
struct drm_crtc *crtc = &mdp5_crtc->base; struct drm_crtc *crtc = &mdp5_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending; unsigned pending;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
...@@ -414,16 +424,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) ...@@ -414,16 +424,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_FLIP) { if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL); complete_flip(crtc, NULL);
drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
} }
} }
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{ {
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
struct drm_crtc *crtc = &mdp5_crtc->base;
DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
crtc_flush(crtc);
} }
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
...@@ -444,10 +452,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, ...@@ -444,10 +452,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc);
static const enum mdp5_intfnum intfnum[] = { uint32_t flush_mask = 0;
INTF0, INTF1, INTF2, INTF3,
};
uint32_t intf_sel; uint32_t intf_sel;
unsigned long flags;
/* now that we know what irq's we want: */ /* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf); mdp5_crtc->err.irqmask = intf2err(intf);
...@@ -457,6 +464,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, ...@@ -457,6 +464,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
if (!mdp5_kms) if (!mdp5_kms)
return; return;
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf) { switch (intf) {
...@@ -481,39 +489,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, ...@@ -481,39 +489,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
break; break;
} }
blend_setup(crtc); mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); crtc_flush(crtc, flush_mask);
mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
MDP5_CTL_OP_MODE(MODE_NONE) |
MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
crtc_flush(crtc);
} }
static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, int mdp5_crtc_get_lm(struct drm_crtc *crtc)
struct drm_plane *plane)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
blend_setup(crtc); if (WARN_ON(!crtc))
if (mdp5_crtc->enabled && (plane != crtc->primary)) return -EINVAL;
crtc_flush(crtc);
}
void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
{
set_attach(crtc, mdp5_plane_pipe(plane), plane);
}
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) return mdp5_crtc->lm;
{
/* don't actually detatch our primary plane: */
if (crtc->primary == plane)
return;
set_attach(crtc, mdp5_plane_pipe(plane), NULL);
} }
/* initialize crtc */ /* initialize crtc */
...@@ -530,6 +524,9 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, ...@@ -530,6 +524,9 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
crtc = &mdp5_crtc->base; crtc = &mdp5_crtc->base;
mdp5_crtc->id = id; mdp5_crtc->id = id;
mdp5_crtc->lm = GET_LM_ID(id);
spin_lock_init(&mdp5_crtc->lm_lock);
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq;
...@@ -537,11 +534,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, ...@@ -537,11 +534,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
pipe2name(mdp5_plane_pipe(plane)), id); pipe2name(mdp5_plane_pipe(plane)), id);
drm_flip_work_init(&mdp5_crtc->unref_fb_work,
"unref fb", unref_fb_worker);
INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
plane->crtc = crtc; plane->crtc = crtc;
......
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mdp5_kms.h"
#include "mdp5_ctl.h"
/*
* CTL - MDP Control Pool Manager
*
* Controls are shared between all CRTCs.
*
* They are intended to be used for data path configuration.
* The top level register programming describes the complete data path for
* a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
*
* Hardware capabilities determine the number of concurrent data paths
*
* In certain use cases (high-resolution dual pipe), one single CTL can be
* shared across multiple CRTCs.
*
* Because the number of CTLs can be less than the number of CRTCs,
* CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
* requested by the client (in mdp5_crtc_mode_set()).
*/
struct mdp5_ctl {
struct mdp5_ctl_manager *ctlm;
u32 id;
/* whether this CTL has been allocated or not: */
bool busy;
/* memory output connection (@see mdp5_ctl_mode): */
u32 mode;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
u32 reg_offset;
/* flush mask used to commit CTL registers */
u32 flush_mask;
bool cursor_on;
struct drm_crtc *crtc;
};
struct mdp5_ctl_manager {
struct drm_device *dev;
/* number of CTL / Layer Mixers in this hw config: */
u32 nlm;
u32 nctl;
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
spinlock_t pool_lock;
struct mdp5_ctl ctls[MAX_CTL];
};
static inline
struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
{
struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static inline
void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
{
struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
mdp5_write(mdp5_kms, reg, data);
}
static inline
u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
{
struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
return mdp5_read(mdp5_kms, reg);
}
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
{
unsigned long flags;
static const enum mdp5_intfnum intfnum[] = {
INTF0, INTF1, INTF2, INTF3,
};
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
MDP5_CTL_OP_MODE(ctl->mode) |
MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
spin_unlock_irqrestore(&ctl->hw_lock, flags);
return 0;
}
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
int lm;
lm = mdp5_crtc_get_lm(ctl->crtc);
if (unlikely(WARN_ON(lm < 0))) {
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
ctl->id, lm);
return -EINVAL;
}
spin_lock_irqsave(&ctl->hw_lock, flags);
blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
if (enable)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
else
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
ctl->cursor_on = enable;
return 0;
}
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
unsigned long flags;
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
else
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
return 0;
}
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
int lm = mdp5_crtc_get_lm(ctl->crtc);
if (unlikely(WARN_ON(lm < 0))) {
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
ctl->id, lm);
return -EINVAL;
}
/* for current targets, cursor bit is the same as LM bit */
flush_mask |= mdp_ctl_flush_mask_lm(lm);
}
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
return 0;
}
u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
{
return ctl->flush_mask;
}
void mdp5_ctl_release(struct mdp5_ctl *ctl)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
ctl->id, ctl->busy);
return;
}
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
ctl->busy = false;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
DBG("CTL %d released", ctl->id);
}
/*
* mdp5_ctl_request() - CTL dynamic allocation
*
* Note: Current implementation considers that we can only have one CRTC per CTL
*
* @return first free CTL
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
struct drm_crtc *crtc)
{
struct mdp5_ctl *ctl = NULL;
unsigned long flags;
int c;
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
for (c = 0; c < ctl_mgr->nctl; c++)
if (!ctl_mgr->ctls[c].busy)
break;
if (unlikely(c >= ctl_mgr->nctl)) {
dev_err(ctl_mgr->dev->dev, "No more CTL available!");
goto unlock;
}
ctl = &ctl_mgr->ctls[c];
ctl->crtc = crtc;
ctl->busy = true;
DBG("CTL %d allocated", ctl->id);
unlock:
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
return ctl;
}
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
{
unsigned long flags;
int c;
for (c = 0; c < ctl_mgr->nctl; c++) {
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
}
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
{
kfree(ctl_mgr);
}
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
{
struct mdp5_ctl_manager *ctl_mgr;
const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
dev_err(dev->dev, "failed to allocate CTL manager\n");
ret = -ENOMEM;
goto fail;
}
if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
dev_err(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
goto fail;
}
/* initialize the CTL manager: */
ctl_mgr->dev = dev;
ctl_mgr->nlm = hw_cfg->lm.count;
ctl_mgr->nctl = ctl_cfg->count;
spin_lock_init(&ctl_mgr->pool_lock);
/* initialize each CTL of the pool: */
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
for (c = 0; c < ctl_mgr->nctl; c++) {
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
dev_err(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
goto fail;
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
ctl->mode = MODE_NONE;
ctl->reg_offset = ctl_cfg->base[c];
ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
ctl->busy = false;
spin_lock_init(&ctl->hw_lock);
}
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
return ctl_mgr;
fail:
if (ctl_mgr)
mdp5_ctlm_destroy(ctl_mgr);
return ERR_PTR(ret);
}
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MDP5_CTL_H__
#define __MDP5_CTL_H__
#include "msm_drv.h"
/*
* CTL Manager prototypes:
* mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
* which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
*/
struct mdp5_ctl_manager;
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
/*
* CTL prototypes:
* mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
/* @blend_cfg: see LM blender config definition below */
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
/* @flush_mask: see CTL flush masks definitions below */
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
void mdp5_ctl_release(struct mdp5_ctl *ctl);
/*
* blend_cfg (LM blender config):
*
* The function below allows the caller of mdp5_ctl_blend() to specify how pipes
* are being blended according to their stage (z-order), through @blend_cfg arg.
*/
static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
enum mdp_mixer_stage_id stage)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
default: return 0;
}
}
/*
* flush_mask (CTL flush masks):
*
* The following functions allow each DRM entity to get and store
* their own flush mask.
* Once stored, these masks will then be accessed through each DRM's
* interface and used by the caller of mdp5_ctl_commit() to specify
* which block(s) need to be flushed through @flush_mask parameter.
*/
#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
{
/* TODO: use id once multiple cursor support is present */
(void)cursor_id;
return MDP5_CTL_FLUSH_CURSOR_DUMMY;
}
static inline u32 mdp_ctl_flush_mask_lm(int lm)
{
switch (lm) {
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
case 5: return MDP5_CTL_FLUSH_LM5;
default: return 0;
}
}
static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
default: return 0;
}
}
#endif /* __MDP5_CTL_H__ */
...@@ -24,6 +24,7 @@ struct mdp5_encoder { ...@@ -24,6 +24,7 @@ struct mdp5_encoder {
struct drm_encoder base; struct drm_encoder base;
int intf; int intf;
enum mdp5_intf intf_id; enum mdp5_intf intf_id;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled; bool enabled;
uint32_t bsc; uint32_t bsc;
}; };
...@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode) ...@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
struct mdp5_kms *mdp5_kms = get_kms(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf; int intf = mdp5_encoder->intf;
bool enabled = (mode == DRM_MODE_DPMS_ON); bool enabled = (mode == DRM_MODE_DPMS_ON);
unsigned long flags;
DBG("mode=%d", mode); DBG("mode=%d", mode);
...@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode) ...@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
if (enabled) { if (enabled) {
bs_set(mdp5_encoder, 1); bs_set(mdp5_encoder, 1);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
} else { } else {
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
bs_set(mdp5_encoder, 0); bs_set(mdp5_encoder, 0);
} }
...@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, ...@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
uint32_t display_v_start, display_v_end; uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x; uint32_t hsync_start_x, hsync_end_x;
uint32_t format; uint32_t format;
unsigned long flags;
mode = adjusted_mode; mode = adjusted_mode;
...@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, ...@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
...@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, ...@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
} }
static void mdp5_encoder_prepare(struct drm_encoder *encoder) static void mdp5_encoder_prepare(struct drm_encoder *encoder)
...@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, ...@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
mdp5_encoder->intf_id = intf_id; mdp5_encoder->intf_id = intf_id;
encoder = &mdp5_encoder->base; encoder = &mdp5_encoder->base;
spin_lock_init(&mdp5_encoder->intf_lock);
drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
DRM_MODE_ENCODER_TMDS); DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>. * this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include "msm_drv.h" #include "msm_drv.h"
#include "mdp5_kms.h" #include "mdp5_kms.h"
...@@ -82,18 +84,23 @@ irqreturn_t mdp5_irq(struct msm_kms *kms) ...@@ -82,18 +84,23 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
{ {
struct mdp_kms *mdp_kms = to_mdp_kms(kms); struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
uint32_t intr; uint32_t intr;
intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS); intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
VERB("intr=%08x", intr); VERB("intr=%08x", intr);
if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
mdp5_irq_mdp(mdp_kms); mdp5_irq_mdp(mdp_kms);
intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
}
if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI) while (intr) {
hdmi_irq(0, priv->hdmi); irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
mdp5_kms->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -110,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) ...@@ -110,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
mdp_update_vblank_mask(to_mdp_kms(kms), mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false); mdp5_crtc_vblank(crtc), false);
} }
/*
* interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
* can register to get their irq's delivered
*/
#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
MDP5_HW_INTR_STATUS_INTR_DSI1 | \
MDP5_HW_INTR_STATUS_INTR_HDMI | \
MDP5_HW_INTR_STATUS_INTR_EDP)
static void mdp5_hw_mask_irq(struct irq_data *irqd)
{
struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdp5_hw_unmask_irq(struct irq_data *irqd)
{
struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static struct irq_chip mdp5_hw_irq_chip = {
.name = "mdp5",
.irq_mask = mdp5_hw_mask_irq,
.irq_unmask = mdp5_hw_unmask_irq,
};
static int mdp5_hw_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
struct mdp5_kms *mdp5_kms = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
irq_set_chip_data(irq, mdp5_kms);
set_irq_flags(irq, IRQF_VALID);
return 0;
}
static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
.map = mdp5_hw_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
{
struct device *dev = mdp5_kms->dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32,
&mdp5_hw_irqdomain_ops, mdp5_kms);
if (!d) {
dev_err(dev, "mdp5 irq domain add failed\n");
return -ENXIO;
}
mdp5_kms->irqcontroller.enabled_mask = 0;
mdp5_kms->irqcontroller.domain = d;
return 0;
}
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
{
if (mdp5_kms->irqcontroller.domain) {
irq_domain_remove(mdp5_kms->irqcontroller.domain);
mdp5_kms->irqcontroller.domain = NULL;
}
}
/* /*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat * Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com> * Author: Rob Clark <robdclark@gmail.com>
* *
...@@ -24,145 +25,11 @@ static const char *iommu_ports[] = { ...@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
"mdp_0", "mdp_0",
}; };
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
const struct mdp5_config *mdp5_cfg;
static const struct mdp5_config msm8x74_config = {
.name = "msm8x74",
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
},
.pipe_vig = {
.count = 3,
.base = { 0x01200, 0x01600, 0x01a00 },
},
.pipe_rgb = {
.count = 3,
.base = { 0x01e00, 0x02200, 0x02600 },
},
.pipe_dma = {
.count = 2,
.base = { 0x02a00, 0x02e00 },
},
.lm = {
.count = 5,
.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
},
.dspp = {
.count = 3,
.base = { 0x04600, 0x04a00, 0x04e00 },
},
.ad = {
.count = 2,
.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
},
.intf = {
.count = 4,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
},
};
static const struct mdp5_config apq8084_config = {
.name = "apq8084",
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
},
.pipe_vig = {
.count = 4,
.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
},
.pipe_rgb = {
.count = 4,
.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
},
.pipe_dma = {
.count = 2,
.base = { 0x03200, 0x03600 },
},
.lm = {
.count = 6,
.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
},
.dspp = {
.count = 4,
.base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
},
.ad = {
.count = 3,
.base = { 0x13500, 0x13700, 0x13900 },
},
.intf = {
.count = 5,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
},
};
struct mdp5_config_entry {
int revision;
const struct mdp5_config *config;
};
static const struct mdp5_config_entry mdp5_configs[] = {
{ .revision = 0, .config = &msm8x74_config },
{ .revision = 2, .config = &msm8x74_config },
{ .revision = 3, .config = &apq8084_config },
};
static int mdp5_select_hw_cfg(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
uint32_t version, major, minor;
int i, ret = 0;
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
mdp5_disable(mdp5_kms);
major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
DBG("found MDP5 version v%d.%d", major, minor);
if (major != 1) {
dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
}
mdp5_kms->rev = minor;
/* only after mdp5_cfg global pointer's init can we access the hw */
for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
if (mdp5_configs[i].revision != minor)
continue;
mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
break;
}
if (unlikely(!mdp5_kms->hw_cfg)) {
dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
}
DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
return 0;
out:
return ret;
}
static int mdp5_hw_init(struct msm_kms *kms) static int mdp5_hw_init(struct msm_kms *kms)
{ {
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev; struct drm_device *dev = mdp5_kms->dev;
int i; unsigned long flags;
pm_runtime_get_sync(dev->dev); pm_runtime_get_sync(dev->dev);
...@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms) ...@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
* care. * care.
*/ */
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++) mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
pm_runtime_put_sync(dev->dev); pm_runtime_put_sync(dev->dev);
...@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms) ...@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu; struct msm_mmu *mmu = mdp5_kms->mmu;
mdp5_irq_domain_fini(mdp5_kms);
if (mmu) { if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu); mmu->funcs->destroy(mmu);
} }
if (mdp5_kms->ctlm)
mdp5_ctlm_destroy(mdp5_kms->ctlm);
if (mdp5_kms->smp)
mdp5_smp_destroy(mdp5_kms->smp);
if (mdp5_kms->cfg)
mdp5_cfg_destroy(mdp5_kms->cfg);
kfree(mdp5_kms); kfree(mdp5_kms);
} }
...@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) ...@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
static const enum mdp5_pipe crtcs[] = { static const enum mdp5_pipe crtcs[] = {
SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
}; };
static const enum mdp5_pipe pub_planes[] = {
SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
};
struct drm_device *dev = mdp5_kms->dev; struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder; struct drm_encoder *encoder;
const struct mdp5_cfg_hw *hw_cfg;
int i, ret; int i, ret;
/* construct CRTCs: */ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
/* register our interrupt-controller for hdmi/eDP/dsi/etc
* to use for irqs routed through mdp:
*/
ret = mdp5_irq_domain_init(mdp5_kms);
if (ret)
goto fail;
/* construct CRTCs and their private planes: */
for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane; struct drm_plane *plane;
struct drm_crtc *crtc; struct drm_crtc *crtc;
plane = mdp5_plane_init(dev, crtcs[i], true); plane = mdp5_plane_init(dev, crtcs[i], true,
hw_cfg->pipe_rgb.base[i]);
if (IS_ERR(plane)) { if (IS_ERR(plane)) {
ret = PTR_ERR(plane); ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane for %s (%d)\n", dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
...@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) ...@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->crtcs[priv->num_crtcs++] = crtc; priv->crtcs[priv->num_crtcs++] = crtc;
} }
/* Construct public planes: */
for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
struct drm_plane *plane;
plane = mdp5_plane_init(dev, pub_planes[i], false,
hw_cfg->pipe_vig.base[i]);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct %s plane: %d\n",
pipe2name(pub_planes[i]), ret);
goto fail;
}
}
/* Construct encoder for HDMI: */ /* Construct encoder for HDMI: */
encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
if (IS_ERR(encoder)) { if (IS_ERR(encoder)) {
...@@ -338,6 +244,21 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) ...@@ -338,6 +244,21 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
return ret; return ret;
} }
static void read_hw_revision(struct mdp5_kms *mdp5_kms,
uint32_t *major, uint32_t *minor)
{
uint32_t version;
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
mdp5_disable(mdp5_kms);
*major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
*minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp, static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name) const char *name)
{ {
...@@ -354,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp, ...@@ -354,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
struct msm_kms *mdp5_kms_init(struct drm_device *dev) struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{ {
struct platform_device *pdev = dev->platformdev; struct platform_device *pdev = dev->platformdev;
struct mdp5_platform_config *config = mdp5_get_config(pdev); struct mdp5_cfg *config;
struct mdp5_kms *mdp5_kms; struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL; struct msm_kms *kms = NULL;
struct msm_mmu *mmu; struct msm_mmu *mmu;
uint32_t major, minor;
int i, ret; int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
...@@ -367,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ...@@ -367,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
spin_lock_init(&mdp5_kms->resource_lock);
mdp_kms_init(&mdp5_kms->base, &kms_funcs); mdp_kms_init(&mdp5_kms->base, &kms_funcs);
kms = &mdp5_kms->base.base; kms = &mdp5_kms->base.base;
mdp5_kms->dev = dev; mdp5_kms->dev = dev;
mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) { if (IS_ERR(mdp5_kms->mmio)) {
...@@ -417,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ...@@ -417,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
if (ret) if (ret)
goto fail; goto fail;
ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); /* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
* more optimal rate:
*/
clk_set_rate(mdp5_kms->src_clk, 200000000);
ret = mdp5_select_hw_cfg(kms); read_hw_revision(mdp5_kms, &major, &minor);
if (ret)
mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
if (IS_ERR(mdp5_kms->cfg)) {
ret = PTR_ERR(mdp5_kms->cfg);
mdp5_kms->cfg = NULL;
goto fail;
}
config = mdp5_cfg_get_config(mdp5_kms->cfg);
/* TODO: compute core clock rate at runtime */
clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
if (IS_ERR(mdp5_kms->smp)) {
ret = PTR_ERR(mdp5_kms->smp);
mdp5_kms->smp = NULL;
goto fail; goto fail;
}
mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
if (IS_ERR(mdp5_kms->ctlm)) {
ret = PTR_ERR(mdp5_kms->ctlm);
mdp5_kms->ctlm = NULL;
goto fail;
}
/* make sure things are off before attaching iommu (bootloader could /* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if * have left things on, in which case we'll start getting faults if
* we don't disable): * we don't disable):
*/ */
mdp5_enable(mdp5_kms); mdp5_enable(mdp5_kms);
for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++) for (i = 0; i < config->hw->intf.count; i++)
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_disable(mdp5_kms); mdp5_disable(mdp5_kms);
mdelay(16); mdelay(16);
if (config->iommu) { if (config->platform.iommu) {
mmu = msm_iommu_new(&pdev->dev, config->iommu); mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
if (IS_ERR(mmu)) { if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu); ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret); dev_err(dev->dev, "failed to init iommu: %d\n", ret);
...@@ -475,18 +426,3 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ...@@ -475,18 +426,3 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_destroy(kms); mdp5_destroy(kms);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
{
static struct mdp5_platform_config config = {};
#ifdef CONFIG_OF
/* TODO */
#endif
config.iommu = iommu_domain_alloc(&platform_bus_type);
/* TODO hard-coded in downstream mdss, but should it be? */
config.max_clk = 200000000;
/* TODO get from DT: */
config.smp_blk_cnt = 22;
return &config;
}
...@@ -21,25 +21,9 @@ ...@@ -21,25 +21,9 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_kms.h" #include "msm_kms.h"
#include "mdp/mdp_kms.h" #include "mdp/mdp_kms.h"
/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */ #include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#define MDP5_MAX_BASES 8
struct mdp5_sub_block {
int count;
uint32_t base[MDP5_MAX_BASES];
};
struct mdp5_config {
char *name;
struct mdp5_sub_block ctl;
struct mdp5_sub_block pipe_vig;
struct mdp5_sub_block pipe_rgb;
struct mdp5_sub_block pipe_dma;
struct mdp5_sub_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
struct mdp5_sub_block intf;
};
extern const struct mdp5_config *mdp5_cfg;
#include "mdp5.xml.h" #include "mdp5.xml.h"
#include "mdp5_ctl.h"
#include "mdp5_smp.h" #include "mdp5_smp.h"
struct mdp5_kms { struct mdp5_kms {
...@@ -47,17 +31,14 @@ struct mdp5_kms { ...@@ -47,17 +31,14 @@ struct mdp5_kms {
struct drm_device *dev; struct drm_device *dev;
int rev; struct mdp5_cfg_handler *cfg;
const struct mdp5_config *hw_cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */ /* mapper-id used to request GEM buffer mapped for scanout: */
int id; int id;
struct msm_mmu *mmu; struct msm_mmu *mmu;
/* for tracking smp allocation amongst pipes: */ struct mdp5_smp *smp;
mdp5_smp_state_t smp_state; struct mdp5_ctl_manager *ctlm;
struct mdp5_client_smp_state smp_client_state[CID_MAX];
int smp_blk_cnt;
/* io/register spaces: */ /* io/register spaces: */
void __iomem *mmio, *vbif; void __iomem *mmio, *vbif;
...@@ -71,16 +52,47 @@ struct mdp5_kms { ...@@ -71,16 +52,47 @@ struct mdp5_kms {
struct clk *lut_clk; struct clk *lut_clk;
struct clk *vsync_clk; struct clk *vsync_clk;
/*
* lock to protect access to global resources: ie., following register:
* - REG_MDP5_DISP_INTF_SEL
*/
spinlock_t resource_lock;
struct mdp_irq error_handler; struct mdp_irq error_handler;
struct {
volatile unsigned long enabled_mask;
struct irq_domain *domain;
} irqcontroller;
}; };
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
/* platform config data (ie. from DT, or pdata) */ struct mdp5_plane_state {
struct mdp5_platform_config { struct drm_plane_state base;
struct iommu_domain *iommu;
uint32_t max_clk; /* "virtual" zpos.. we calculate actual mixer-stage at runtime
int smp_blk_cnt; * by sorting the attached planes by zpos and then assigning
* mixer stage lowest to highest. Private planes get default
* zpos of zero, and public planes a unique value that is
* greater than zero. This way, things work out if a naive
* userspace assigns planes to a crtc without setting zpos.
*/
int zpos;
/* the actual mixer stage, calculated in crtc->atomic_check()
* NOTE: this should move to mdp5_crtc_state, when that exists
*/
enum mdp_mixer_stage_id stage;
/* some additional transactional status to help us know in the
* apply path whether we need to update SMP allocation, and
* whether current update is still pending:
*/
bool mode_changed : 1;
bool pending : 1;
}; };
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{ {
...@@ -105,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe) ...@@ -105,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
return names[pipe]; return names[pipe];
} }
static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
default: return 0;
}
}
static inline int pipe2nclients(enum mdp5_pipe pipe) static inline int pipe2nclients(enum mdp5_pipe pipe)
{ {
switch (pipe) { switch (pipe) {
...@@ -135,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe) ...@@ -135,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
} }
} }
static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
{
WARN_ON(plane >= pipe2nclients(pipe));
switch (pipe) {
case SSPP_VIG0: return CID_VIG0_Y + plane;
case SSPP_VIG1: return CID_VIG1_Y + plane;
case SSPP_VIG2: return CID_VIG2_Y + plane;
case SSPP_RGB0: return CID_RGB0;
case SSPP_RGB1: return CID_RGB1;
case SSPP_RGB2: return CID_RGB2;
case SSPP_DMA0: return CID_DMA0_Y + plane;
case SSPP_DMA1: return CID_DMA1_Y + plane;
case SSPP_VIG3: return CID_VIG3_Y + plane;
case SSPP_RGB3: return CID_RGB3;
default: return CID_UNUSED;
}
}
static inline uint32_t mixer2flush(int lm)
{
switch (lm) {
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
default: return 0;
}
}
static inline uint32_t intf2err(int intf) static inline uint32_t intf2err(int intf)
{ {
switch (intf) { switch (intf) {
...@@ -195,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms); ...@@ -195,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
irqreturn_t mdp5_irq(struct msm_kms *kms); irqreturn_t mdp5_irq(struct msm_kms *kms);
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
static inline static inline
uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
...@@ -208,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, ...@@ -208,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
void mdp5_plane_install_properties(struct drm_plane *plane, void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj); struct drm_mode_object *obj);
void mdp5_plane_set_scanout(struct drm_plane *plane, uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
struct drm_framebuffer *fb);
int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
void mdp5_plane_complete_flip(struct drm_plane *plane); void mdp5_plane_complete_flip(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev, struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane); enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
enum mdp5_intf intf_id); enum mdp5_intf intf_id);
void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id); struct drm_plane *plane, int id);
......
/* /*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat * Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com> * Author: Rob Clark <robdclark@gmail.com>
* *
...@@ -17,6 +18,7 @@ ...@@ -17,6 +18,7 @@
#include "mdp5_kms.h" #include "mdp5_kms.h"
#define MAX_PLANE 4
struct mdp5_plane { struct mdp5_plane {
struct drm_plane base; struct drm_plane base;
...@@ -24,6 +26,11 @@ struct mdp5_plane { ...@@ -24,6 +26,11 @@ struct mdp5_plane {
enum mdp5_pipe pipe; enum mdp5_pipe pipe;
spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
uint32_t reg_offset;
uint32_t flush_mask; /* used to commit pipe registers */
uint32_t nformats; uint32_t nformats;
uint32_t formats[32]; uint32_t formats[32];
...@@ -31,31 +38,24 @@ struct mdp5_plane { ...@@ -31,31 +38,24 @@ struct mdp5_plane {
}; };
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb);
static struct mdp5_kms *get_kms(struct drm_plane *plane) static struct mdp5_kms *get_kms(struct drm_plane *plane)
{ {
struct msm_drm_private *priv = plane->dev->dev_private; struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms)); return to_mdp5_kms(to_mdp_kms(priv->kms));
} }
static int mdp5_plane_update(struct drm_plane *plane, static bool plane_enabled(struct drm_plane_state *state)
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{ {
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); return state->fb && state->crtc;
mdp5_plane->enabled = true;
if (plane->fb)
drm_framebuffer_unreference(plane->fb);
drm_framebuffer_reference(fb);
return mdp5_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
} }
static int mdp5_plane_disable(struct drm_plane *plane) static int mdp5_plane_disable(struct drm_plane *plane)
...@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane) ...@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane)
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane); struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe; enum mdp5_pipe pipe = mdp5_plane->pipe;
int i;
DBG("%s: disable", mdp5_plane->name); DBG("%s: disable", mdp5_plane->name);
/* update our SMP request to zero (release all our blks): */ if (mdp5_kms) {
for (i = 0; i < pipe2nclients(pipe); i++) /* Release the memory we requested earlier from the SMP: */
mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0); mdp5_smp_release(mdp5_kms->smp, pipe);
}
/* TODO detaching now will cause us not to get the last
* vblank and mdp5_smp_commit().. so other planes will
* still see smp blocks previously allocated to us as
* in-use..
*/
if (plane->crtc)
mdp5_crtc_detach(plane->crtc, plane);
return 0; return 0;
} }
...@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane) ...@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane)
static void mdp5_plane_destroy(struct drm_plane *plane) static void mdp5_plane_destroy(struct drm_plane *plane)
{ {
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct msm_drm_private *priv = plane->dev->dev_private;
if (priv->kms)
mdp5_plane_disable(plane);
drm_plane_helper_disable(plane);
drm_plane_cleanup(plane); drm_plane_cleanup(plane);
kfree(mdp5_plane); kfree(mdp5_plane);
...@@ -109,109 +98,185 @@ int mdp5_plane_set_property(struct drm_plane *plane, ...@@ -109,109 +98,185 @@ int mdp5_plane_set_property(struct drm_plane *plane,
return -EINVAL; return -EINVAL;
} }
static void mdp5_plane_reset(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
if (plane->state && plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
mdp5_state->zpos = 0;
} else {
mdp5_state->zpos = 1 + drm_plane_index(plane);
}
plane->state = &mdp5_state->base;
}
static struct drm_plane_state *
mdp5_plane_duplicate_state(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
if (WARN_ON(!plane->state))
return NULL;
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
sizeof(*mdp5_state), GFP_KERNEL);
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
mdp5_state->mode_changed = false;
mdp5_state->pending = false;
return &mdp5_state->base;
}
static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
kfree(to_mdp5_plane_state(state));
}
static const struct drm_plane_funcs mdp5_plane_funcs = { static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = mdp5_plane_update, .update_plane = drm_atomic_helper_update_plane,
.disable_plane = mdp5_plane_disable, .disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy, .destroy = mdp5_plane_destroy,
.set_property = mdp5_plane_set_property, .set_property = mdp5_plane_set_property,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
}; };
void mdp5_plane_set_scanout(struct drm_plane *plane, static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb) struct drm_framebuffer *fb)
{ {
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane); struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
uint32_t iova[4];
int i;
for (i = 0; i < nplanes; i++) {
struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
}
for (; i < 4; i++)
iova[i] = 0;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]); DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]); return msm_framebuffer_prepare(fb, mdp5_kms->id);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
plane->fb = fb;
} }
/* NOTE: looks like if horizontal decimation is used (if we supported that) static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
* then the width used to calculate SMP block requirements is the post- struct drm_framebuffer *fb)
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
uint32_t nplanes, uint32_t width)
{ {
struct drm_device *dev = plane->dev;
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane); struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
int i, hsub, nlines, nblks, ret;
hsub = drm_format_horz_chroma_subsampling(format); DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, mdp5_kms->id);
}
/* different if BWC (compressed framebuffer?) enabled: */ static int mdp5_plane_atomic_check(struct drm_plane *plane,
nlines = 2; struct drm_plane_state *state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *old_state = plane->state;
for (i = 0, nblks = 0; i < nplanes; i++) { DBG("%s: check (%d -> %d)", mdp5_plane->name,
int n, fetch_stride, cpp; plane_enabled(old_state), plane_enabled(state));
cpp = drm_format_plane_cpp(format, i); if (plane_enabled(state) && plane_enabled(old_state)) {
fetch_stride = width * cpp / (i ? hsub : 1); /* we cannot change SMP block configuration during scanout: */
bool full_modeset = false;
if (state->fb->pixel_format != old_state->fb->pixel_format) {
DBG("%s: pixel_format change!", mdp5_plane->name);
full_modeset = true;
}
if (state->src_w != old_state->src_w) {
DBG("%s: src_w change!", mdp5_plane->name);
full_modeset = true;
}
if (to_mdp5_plane_state(old_state)->pending) {
DBG("%s: still pending!", mdp5_plane->name);
full_modeset = true;
}
if (full_modeset) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_crtc_state(state->state, state->crtc);
crtc_state->mode_changed = true;
to_mdp5_plane_state(state)->mode_changed = true;
}
} else {
to_mdp5_plane_state(state)->mode_changed = true;
}
n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); return 0;
}
/* for hw rev v1.00 */ static void mdp5_plane_atomic_update(struct drm_plane *plane)
if (mdp5_kms->rev == 0) {
n = roundup_pow_of_two(n); struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *state = plane->state;
DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); DBG("%s: update", mdp5_plane->name);
ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
if (ret) {
dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
nblks += n; if (!plane_enabled(state)) {
to_mdp5_plane_state(state)->pending = true;
mdp5_plane_disable(plane);
} else if (to_mdp5_plane_state(state)->mode_changed) {
int ret;
to_mdp5_plane_state(state)->pending = true;
ret = mdp5_plane_mode_set(plane,
state->crtc, state->fb,
state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
state->src_x, state->src_y,
state->src_w, state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
} else {
unsigned long flags;
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
set_scanout_locked(plane, state->fb);
spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
} }
/* in success case, return total # of blocks allocated: */
return nblks;
} }
static void set_fifo_thresholds(struct drm_plane *plane, int nblks) static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
.prepare_fb = mdp5_plane_prepare_fb,
.cleanup_fb = mdp5_plane_cleanup_fb,
.atomic_check = mdp5_plane_atomic_check,
.atomic_update = mdp5_plane_atomic_update,
};
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb)
{ {
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane); struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe; enum mdp5_pipe pipe = mdp5_plane->pipe;
uint32_t val;
/* 1/4 of SMP pool that is being fetched */ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
val = (nblks * SMP_ENTRIES_PER_BLK) / 4; MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 4));
plane->fb = fb;
} }
int mdp5_plane_mode_set(struct drm_plane *plane, static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h, unsigned int crtc_w, unsigned int crtc_h,
...@@ -225,7 +290,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -225,7 +290,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t nplanes, config = 0; uint32_t nplanes, config = 0;
uint32_t phasex_step = 0, phasey_step = 0; uint32_t phasex_step = 0, phasey_step = 0;
uint32_t hdecm = 0, vdecm = 0; uint32_t hdecm = 0, vdecm = 0;
int i, nblks; unsigned long flags;
int ret;
nplanes = drm_format_num_planes(fb->pixel_format); nplanes = drm_format_num_planes(fb->pixel_format);
...@@ -243,12 +309,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -243,12 +309,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
fb->base.id, src_x, src_y, src_w, src_h, fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
/* /* Request some memory from the SMP: */
* Calculate and request required # of smp blocks: ret = mdp5_smp_request(mdp5_kms->smp,
*/ mdp5_plane->pipe, fb->pixel_format, src_w);
nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w); if (ret)
if (nblks < 0) return ret;
return nblks;
/* /*
* Currently we update the hw for allocations/requests immediately, * Currently we update the hw for allocations/requests immediately,
...@@ -256,8 +321,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -256,8 +321,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
* would move into atomic->check_plane_state(), while updating the * would move into atomic->check_plane_state(), while updating the
* hw would remain here: * hw would remain here:
*/ */
for (i = 0; i < pipe2nclients(pipe); i++) mdp5_smp_configure(mdp5_kms->smp, pipe);
mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
if (src_w != crtc_w) { if (src_w != crtc_w) {
config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
...@@ -269,6 +333,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -269,6 +333,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
/* TODO calc phasey_step, vdecm */ /* TODO calc phasey_step, vdecm */
} }
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
...@@ -289,8 +355,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -289,8 +355,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_OUT_XY_X(crtc_x) | MDP5_PIPE_OUT_XY_X(crtc_x) |
MDP5_PIPE_OUT_XY_Y(crtc_y)); MDP5_PIPE_OUT_XY_Y(crtc_y));
mdp5_plane_set_scanout(plane, fb);
format = to_mdp_format(msm_framebuffer_format(fb)); format = to_mdp_format(msm_framebuffer_format(fb));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
...@@ -330,22 +394,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -330,22 +394,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
set_fifo_thresholds(plane, nblks); set_scanout_locked(plane, fb);
/* TODO detach from old crtc (if we had more than one) */ spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
mdp5_crtc_attach(crtc, plane);
return 0; return ret;
} }
void mdp5_plane_complete_flip(struct drm_plane *plane) void mdp5_plane_complete_flip(struct drm_plane *plane)
{ {
struct mdp5_kms *mdp5_kms = get_kms(plane); struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe; struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
int i; enum mdp5_pipe pipe = mdp5_plane->pipe;
DBG("%s: complete flip", mdp5_plane->name);
for (i = 0; i < pipe2nclients(pipe); i++) mdp5_smp_commit(mdp5_kms->smp, pipe);
mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
to_mdp5_plane_state(plane->state)->pending = false;
} }
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
...@@ -354,9 +420,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) ...@@ -354,9 +420,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
return mdp5_plane->pipe; return mdp5_plane->pipe;
} }
uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
return mdp5_plane->flush_mask;
}
/* initialize plane */ /* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev, struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane) enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
{ {
struct drm_plane *plane = NULL; struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane; struct mdp5_plane *mdp5_plane;
...@@ -377,10 +450,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, ...@@ -377,10 +450,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats)); ARRAY_SIZE(mdp5_plane->formats));
mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
mdp5_plane->reg_offset = reg_offset;
spin_lock_init(&mdp5_plane->pipe_lock);
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats, mdp5_plane->formats, mdp5_plane->nformats,
type); type);
if (ret)
goto fail;
drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
mdp5_plane_install_properties(plane, &plane->base); mdp5_plane_install_properties(plane, &plane->base);
......
/* /*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat * Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com> * Author: Rob Clark <robdclark@gmail.com>
* *
...@@ -29,8 +30,11 @@ ...@@ -29,8 +30,11 @@
* Based on the size of the attached scanout buffer, a certain # of * Based on the size of the attached scanout buffer, a certain # of
* blocks must be allocated to that client out of the shared pool. * blocks must be allocated to that client out of the shared pool.
* *
* For each block, it can be either free, or pending/in-use by a * In some hw, some blocks are statically allocated for certain pipes
* client. The updates happen in three steps: * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
*
* For each block that can be dynamically allocated, it can be either
* free, or pending/in-use by a client. The updates happen in three steps:
* *
* 1) mdp5_smp_request(): * 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of * When plane scanout is setup, calculate required number of
...@@ -61,21 +65,68 @@ ...@@ -61,21 +65,68 @@
* inuse and pending state of all clients.. * inuse and pending state of all clients..
*/ */
static DEFINE_SPINLOCK(smp_lock); struct mdp5_smp {
struct drm_device *dev;
int blk_cnt;
int blk_size;
spinlock_t state_lock;
mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
struct mdp5_client_smp_state client_state[CID_MAX];
};
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
struct msm_drm_private *priv = smp->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
{
WARN_ON(plane >= pipe2nclients(pipe));
switch (pipe) {
case SSPP_VIG0: return CID_VIG0_Y + plane;
case SSPP_VIG1: return CID_VIG1_Y + plane;
case SSPP_VIG2: return CID_VIG2_Y + plane;
case SSPP_RGB0: return CID_RGB0;
case SSPP_RGB1: return CID_RGB1;
case SSPP_RGB2: return CID_RGB2;
case SSPP_DMA0: return CID_DMA0_Y + plane;
case SSPP_DMA1: return CID_DMA1_Y + plane;
case SSPP_VIG3: return CID_VIG3_Y + plane;
case SSPP_RGB3: return CID_RGB3;
default: return CID_UNUSED;
}
}
/* step #1: update # of blocks pending for the client: */ /* step #1: update # of blocks pending for the client: */
int mdp5_smp_request(struct mdp5_kms *mdp5_kms, static int smp_request_block(struct mdp5_smp *smp,
enum mdp5_client_id cid, int nblks) enum mdp5_client_id cid, int nblks)
{ {
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; struct mdp5_kms *mdp5_kms = get_kms(smp);
int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt; const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
int reserved;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&smp_lock, flags); hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
reserved = hw_cfg->smp.reserved[cid];
spin_lock_irqsave(&smp->state_lock, flags);
avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt); nblks -= reserved;
if (reserved)
DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
avail = cnt - bitmap_weight(smp->state, cnt);
if (nblks > avail) { if (nblks > avail) {
dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
ret = -ENOSPC; ret = -ENOSPC;
goto fail; goto fail;
} }
...@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms, ...@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
if (nblks > cur_nblks) { if (nblks > cur_nblks) {
/* grow the existing pending reservation: */ /* grow the existing pending reservation: */
for (i = cur_nblks; i < nblks; i++) { for (i = cur_nblks; i < nblks; i++) {
int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt); int blk = find_first_zero_bit(smp->state, cnt);
set_bit(blk, ps->pending); set_bit(blk, ps->pending);
set_bit(blk, mdp5_kms->smp_state); set_bit(blk, smp->state);
} }
} else { } else {
/* shrink the existing pending reservation: */ /* shrink the existing pending reservation: */
...@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms, ...@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
} }
fail: fail:
spin_unlock_irqrestore(&smp_lock, flags); spin_unlock_irqrestore(&smp->state_lock, flags);
return 0;
}
static void set_fifo_thresholds(struct mdp5_smp *smp,
enum mdp5_pipe pipe, int nblks)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
u32 val;
/* 1/4 of SMP pool that is being fetched */
val = (nblks * smp_entries_per_blk) / 4;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
}
/*
* NOTE: looks like if horizontal decimation is used (if we supported that)
* then the width used to calculate SMP block requirements is the post-
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct drm_device *dev = mdp5_kms->dev;
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines, nblks, ret;
nplanes = drm_format_num_planes(fmt);
hsub = drm_format_horz_chroma_subsampling(fmt);
/* different if BWC (compressed framebuffer?) enabled: */
nlines = 2;
for (i = 0, nblks = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
cpp = drm_format_plane_cpp(fmt, i);
fetch_stride = width * cpp / (i ? hsub : 1);
n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
/* for hw rev v1.00 */
if (rev == 0)
n = roundup_pow_of_two(n);
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
ret = smp_request_block(smp, pipe2client(pipe, i), n);
if (ret) {
dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
nblks += n;
}
set_fifo_thresholds(smp, pipe, nblks);
return 0; return 0;
} }
static void update_smp_state(struct mdp5_kms *mdp5_kms, /* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
int i, nblks;
for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
smp_request_block(smp, pipe2client(pipe, i), 0);
set_fifo_thresholds(smp, pipe, 0);
}
static void update_smp_state(struct mdp5_smp *smp,
enum mdp5_client_id cid, mdp5_smp_state_t *assigned) enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
{ {
int cnt = mdp5_kms->smp_blk_cnt; struct mdp5_kms *mdp5_kms = get_kms(smp);
uint32_t blk, val; int cnt = smp->blk_cnt;
u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) { for_each_set_bit(blk, *assigned, cnt) {
int idx = blk / 3; int idx = blk / 3;
...@@ -135,22 +259,31 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms, ...@@ -135,22 +259,31 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
} }
/* step #2: configure hw for union(pending, inuse): */ /* step #2: configure hw for union(pending, inuse): */
void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{ {
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; int cnt = smp->blk_cnt;
int cnt = mdp5_kms->smp_blk_cnt;
mdp5_smp_state_t assigned; mdp5_smp_state_t assigned;
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
enum mdp5_client_id cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
bitmap_or(assigned, ps->inuse, ps->pending, cnt); bitmap_or(assigned, ps->inuse, ps->pending, cnt);
update_smp_state(mdp5_kms, cid, &assigned); update_smp_state(smp, cid, &assigned);
}
} }
/* step #3: after vblank, copy pending -> inuse: */ /* step #3: after vblank, copy pending -> inuse: */
void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{ {
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; int cnt = smp->blk_cnt;
int cnt = mdp5_kms->smp_blk_cnt;
mdp5_smp_state_t released; mdp5_smp_state_t released;
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
enum mdp5_client_id cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
/* /*
* Figure out if there are any blocks we where previously * Figure out if there are any blocks we where previously
...@@ -160,14 +293,46 @@ void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) ...@@ -160,14 +293,46 @@ void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&smp_lock, flags); spin_lock_irqsave(&smp->state_lock, flags);
/* clear released blocks: */ /* clear released blocks: */
bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state, bitmap_andnot(smp->state, smp->state, released, cnt);
released, cnt); spin_unlock_irqrestore(&smp->state_lock, flags);
spin_unlock_irqrestore(&smp_lock, flags);
update_smp_state(mdp5_kms, CID_UNUSED, &released); update_smp_state(smp, CID_UNUSED, &released);
} }
bitmap_copy(ps->inuse, ps->pending, cnt); bitmap_copy(ps->inuse, ps->pending, cnt);
}
}
void mdp5_smp_destroy(struct mdp5_smp *smp)
{
kfree(smp);
}
struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
{
struct mdp5_smp *smp = NULL;
int ret;
smp = kzalloc(sizeof(*smp), GFP_KERNEL);
if (unlikely(!smp)) {
ret = -ENOMEM;
goto fail;
}
smp->dev = dev;
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
/* statically tied MMBs cannot be re-allocated: */
bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
spin_lock_init(&smp->state_lock);
return smp;
fail:
if (smp)
mdp5_smp_destroy(smp);
return ERR_PTR(ret);
} }
/* /*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat * Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com> * Author: Rob Clark <robdclark@gmail.com>
* *
...@@ -20,22 +21,26 @@ ...@@ -20,22 +21,26 @@
#include "msm_drv.h" #include "msm_drv.h"
#define MAX_SMP_BLOCKS 22
#define SMP_BLK_SIZE 4096
#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
struct mdp5_client_smp_state { struct mdp5_client_smp_state {
mdp5_smp_state_t inuse; mdp5_smp_state_t inuse;
mdp5_smp_state_t pending; mdp5_smp_state_t pending;
}; };
struct mdp5_kms; struct mdp5_kms;
struct mdp5_smp;
/*
* SMP module prototypes:
* mdp5_smp_init() returns a SMP @handler,
* which is then used to call the other mdp5_smp_*(handler, ...) functions.
*/
int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks); struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); void mdp5_smp_destroy(struct mdp5_smp *smp);
void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
#endif /* __MDP5_SMP_H__ */ #endif /* __MDP5_SMP_H__ */
...@@ -117,7 +117,7 @@ int msm_atomic_commit(struct drm_device *dev, ...@@ -117,7 +117,7 @@ int msm_atomic_commit(struct drm_device *dev,
if (!plane) if (!plane)
continue; continue;
if (plane->state->fb != new_state->fb) if ((plane->state->fb != new_state->fb) && new_state->fb)
add_fb(c, new_state->fb); add_fb(c, new_state->fb);
} }
......
...@@ -215,7 +215,6 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); ...@@ -215,7 +215,6 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
struct hdmi; struct hdmi;
int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder); struct drm_encoder *encoder);
irqreturn_t hdmi_irq(int irq, void *dev_id);
void __init hdmi_register(void); void __init hdmi_register(void);
void __exit hdmi_unregister(void); void __exit hdmi_unregister(void);
......
...@@ -120,6 +120,8 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id) ...@@ -120,6 +120,8 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
{ {
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
return msm_gem_iova(msm_fb->planes[plane], id); return msm_gem_iova(msm_fb->planes[plane], id);
} }
......
...@@ -68,6 +68,24 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev); ...@@ -68,6 +68,24 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev);
/* TODO move these helper iterator macro somewhere common: */ /* TODO move these helper iterator macro somewhere common: */
#define for_each_plane_on_crtc(_crtc, _plane) \ #define for_each_plane_on_crtc(_crtc, _plane) \
list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \ list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
if ((_plane)->crtc == (_crtc)) if ((_plane)->state->crtc == (_crtc))
static inline bool
__plane_will_be_attached_to_crtc(struct drm_atomic_state *state,
struct drm_plane *plane, struct drm_crtc *crtc)
{
int idx = drm_plane_index(plane);
/* if plane is modified in incoming state, use the new state: */
if (state->plane_states[idx])
return state->plane_states[idx]->crtc == crtc;
/* otherwise, current state: */
return plane->state->crtc == crtc;
}
#define for_each_pending_plane_on_crtc(_state, _crtc, _plane) \
list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
if (__plane_will_be_attached_to_crtc((_state), (_plane), (_crtc)))
#endif /* __MSM_KMS_H__ */ #endif /* __MSM_KMS_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment