Commit 4526903a authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

Main thing this time around is DSI support for msm8960/apq8064, which
should be helpful for getting an upstream kernel working on
nexus7/nexus4/etc.

* 'msm-next' of git://people.freedesktop.org/~robclark/linux: (29 commits)
  drm/msm/mdp: fix a problematic usage of WARN_ON()
  drm/msm/dsi: Added missing mutex_unlock
  drm/msm: ratelimit error irq msgs
  drm/msm: Use unlocked gem unreferencing
  drm/msm: trivial whitespace fix
  dt-bindings: msm/dsi: Add DSIv2 documentation
  dt-bindings: msm/dsi: Fix the order in which clocks are listed
  drm/msm/dsi: Enable MMSS SPFB port via syscon
  drm/msm/dsi: Don't use iommu for command TX buffer for DSIv2
  drm/msm/dsi: Add dsi_cfg for APQ8064
  drm/msm/dsi: Set up link clocks for DSIv2
  drm/msm/dsi: Parse bus clocks from a list
  drm/msm/dsi: Delay dsi_clk_init
  drm/msm/dsi: Use a better way to figure out DSI version
  drm/msm/dsi: Add DSI PLL for 28nm 8960 PHY
  drm/msm/dsi: Add support for 28nm PHY on 8960
  drm/msm/dsi: Don't get byte/pixel source clocks from DT
  drm/msm/mdp4: Initialize DSI encoders
  drm/msm/mdp4: Call custom round_pixclk helper only if the encoder type is TMDS
  drm/msm/dsi: Add a mdp4 encoder for DSI
  ...
parents 663a233e 2abd1c88
......@@ -14,17 +14,20 @@ Required properties:
- clocks: device clocks
See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required:
* "mdp_core_clk"
* "iface_clk"
* "bus_clk"
* "byte_clk"
* "core_clk"
* "core_mmss_clk"
* "iface_clk"
* "mdp_core_clk"
* "byte_clk"
* "pixel_clk"
* "core_clk"
For DSIv2, we need an additional clock:
* "src_clk"
- vdd-supply: phandle to vdd regulator device node
- vddio-supply: phandle to vdd-io regulator device node
- vdda-supply: phandle to vdda regulator device node
- qcom,dsi-phy: phandle to DSI PHY device node
- syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2)
Optional properties:
- panel@0: Node of panel connected to this DSI controller.
......@@ -51,6 +54,7 @@ Required properties:
* "qcom,dsi-phy-28nm-hpm"
* "qcom,dsi-phy-28nm-lp"
* "qcom,dsi-phy-20nm"
* "qcom,dsi-phy-28nm-8960"
- reg: Physical base address and length of the registers of PLL, PHY and PHY
regulator
- reg-names: The names of register regions. The following regions are required:
......
......@@ -2,18 +2,28 @@ Qualcomm adreno/snapdragon display controller
Required properties:
- compatible:
* "qcom,mdp" - mdp4
* "qcom,mdp4" - mdp4
* "qcom,mdp5" - mdp5
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt signal from the display controller.
- connectors: array of phandles for output device(s)
- clocks: device clocks
See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required:
* "core_clk"
* "iface_clk"
* "src_clk"
* "hdmi_clk"
* "mpd_clk"
- clock-names: the following clocks are required.
For MDP4:
* "core_clk"
* "iface_clk"
* "lut_clk"
* "src_clk"
* "hdmi_clk"
* "mdp_clk"
For MDP5:
* "bus_clk"
* "iface_clk"
* "core_clk_src"
* "core_clk"
* "lut_clk" (some MDP5 versions may not need this)
* "vsync_clk"
Optional properties:
- gpus: phandle for gpu device
......@@ -26,7 +36,7 @@ Example:
...
mdp: qcom,mdp@5100000 {
compatible = "qcom,mdp";
compatible = "qcom,mdp4";
reg = <0x05100000 0xf0000>;
interrupts = <GIC_SPI 75 0>;
connectors = <&hdmi>;
......
......@@ -54,3 +54,11 @@ config DRM_MSM_DSI_20NM_PHY
default y
help
Choose this option if the 20nm DSI PHY is used on the platform.
config DRM_MSM_DSI_28NM_8960_PHY
bool "Enable DSI 28nm 8960 PHY driver in MSM DRM"
depends on DRM_MSM_DSI
default y
help
Choose this option if the 28nm DSI PHY 8960 variant is used on the
platform.
......@@ -54,6 +54,7 @@ msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
mdp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
......@@ -62,10 +63,12 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
......@@ -19,10 +19,6 @@
#include "adreno_gpu.h"
#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
# include <mach/kgsl.h>
#endif
#define ANY_ID 0xff
bool hang_debug = false;
......@@ -168,7 +164,6 @@ static void set_gpu_pdev(struct drm_device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
#ifdef CONFIG_OF
struct device_node *child, *node = dev->of_node;
u32 val;
int ret;
......@@ -205,53 +200,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return -ENXIO;
}
#else
struct kgsl_device_platform_data *pdata = dev->platform_data;
uint32_t version = socinfo_get_version();
if (cpu_is_apq8064ab()) {
config.fast_rate = 450000000;
config.slow_rate = 27000000;
config.bus_freq = 4;
config.rev = ADRENO_REV(3, 2, 1, 0);
} else if (cpu_is_apq8064()) {
config.fast_rate = 400000000;
config.slow_rate = 27000000;
config.bus_freq = 4;
if (SOCINFO_VERSION_MAJOR(version) == 2)
config.rev = ADRENO_REV(3, 2, 0, 2);
else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
(SOCINFO_VERSION_MINOR(version) == 1))
config.rev = ADRENO_REV(3, 2, 0, 1);
else
config.rev = ADRENO_REV(3, 2, 0, 0);
} else if (cpu_is_msm8960ab()) {
config.fast_rate = 400000000;
config.slow_rate = 320000000;
config.bus_freq = 4;
if (SOCINFO_VERSION_MINOR(version) == 0)
config.rev = ADRENO_REV(3, 2, 1, 0);
else
config.rev = ADRENO_REV(3, 2, 1, 1);
} else if (cpu_is_msm8930()) {
config.fast_rate = 400000000;
config.slow_rate = 27000000;
config.bus_freq = 3;
if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
(SOCINFO_VERSION_MINOR(version) == 2))
config.rev = ADRENO_REV(3, 0, 5, 2);
else
config.rev = ADRENO_REV(3, 0, 5, 0);
}
# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
config.bus_scale_table = pdata->bus_scale_table;
# endif
#endif
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
......
......@@ -31,10 +31,12 @@ enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_HPM,
MSM_DSI_PHY_28NM_LP,
MSM_DSI_PHY_20NM,
MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_MAX
};
#define DSI_DEV_REGULATOR_MAX 8
#define DSI_BUS_CLK_MAX 4
/* Regulators for DSI devices */
struct dsi_reg_entry {
......@@ -89,7 +91,7 @@ int msm_dsi_manager_phy_enable(int id,
u32 *clk_pre, u32 *clk_post);
void msm_dsi_manager_phy_disable(int id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
......@@ -143,7 +145,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
u32 iova, u32 len);
u32 dma_base, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host);
......
......@@ -13,9 +13,26 @@
#include "dsi_cfg.h"
/* DSI v2 has not been supported by now */
static const struct msm_dsi_config dsi_v2_cfg = {
static const char * const dsi_v2_bus_clk_names[] = {
"core_mmss_clk", "iface_clk", "bus_clk",
};
static const struct msm_dsi_config apq8064_dsi_cfg = {
.io_offset = 0,
.reg_cfg = {
.num = 3,
.regs = {
{"vdda", 1200000, 1200000, 100000, 100},
{"avdd", 3000000, 3000000, 110000, 100},
{"vddio", 1800000, 1800000, 100000, 100},
},
},
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
};
static const char * const dsi_6g_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk",
};
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
......@@ -29,6 +46,12 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
{"vddio", 1800000, 1800000, 100000, 100},
},
},
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
};
static const char * const dsi_8916_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk",
};
static const struct msm_dsi_config msm8916_dsi_cfg = {
......@@ -42,6 +65,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
{"vddio", 1800000, 1800000, 100000, 100},
},
},
.bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
};
static const struct msm_dsi_config msm8994_dsi_cfg = {
......@@ -57,11 +82,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
{"lab_reg", -1, -1, -1, -1},
{"ibb_reg", -1, -1, -1, -1},
},
}
},
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
};
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg},
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
&msm8974_apq8084_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
......
......@@ -25,11 +25,15 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
#define DSI_6G_REG_SHIFT 4
struct msm_dsi_config {
u32 io_offset;
struct dsi_reg_config reg_cfg;
const char * const *bus_clk_names;
const int num_bus_clks;
};
struct msm_dsi_cfg_handler {
......
......@@ -24,26 +24,36 @@
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <video/mipi_display.h>
#include "dsi.h"
#include "dsi.xml.h"
#include "sfpb.xml.h"
#include "dsi_cfg.h"
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
u32 ver_6g;
if (!major || !minor)
return -EINVAL;
/* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
/*
* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
* makes all other registers 4-byte shifted down.
*
* In order to identify between DSI6G(v3) and beyond, and DSIv2 and
* older, we read the DSI_VERSION register without any shift(offset
* 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
* the case of DSI6G, this has to be zero (the offset points to a
* scratch register which we never touch)
*/
ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
if (ver_6g == 0) {
ver = msm_readl(base + REG_DSI_VERSION);
ver = msm_readl(base + REG_DSI_VERSION);
if (ver) {
/* older dsi host, there is no register shift */
ver = FIELD(ver, DSI_VERSION_MAJOR);
if (ver <= MSM_DSI_VER_MAJOR_V2) {
/* old versions */
......@@ -54,12 +64,17 @@ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
return -EINVAL;
}
} else {
/*
* newer host, offset 0 has 6G_HW_VERSION, the rest of the
* registers are shifted down, read DSI_VERSION again with
* the shifted offset
*/
ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
ver = FIELD(ver, DSI_VERSION_MAJOR);
if (ver == MSM_DSI_VER_MAJOR_6G) {
/* 6G version */
*major = ver;
*minor = ver_6g;
*minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
return 0;
} else {
return -EINVAL;
......@@ -91,10 +106,9 @@ struct msm_dsi_host {
void __iomem *ctrl_base;
struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
struct clk *mdp_core_clk;
struct clk *ahb_clk;
struct clk *axi_clk;
struct clk *mmss_misc_ahb_clk;
struct clk *bus_clks[DSI_BUS_CLK_MAX];
struct clk *byte_clk;
struct clk *esc_clk;
struct clk *pixel_clk;
......@@ -102,6 +116,14 @@ struct msm_dsi_host {
struct clk *pixel_clk_src;
u32 byte_clk_rate;
u32 esc_clk_rate;
/* DSI v2 specific clocks */
struct clk *src_clk;
struct clk *esc_clk_src;
struct clk *dsi_clk_src;
u32 src_clk_rate;
struct gpio_desc *disp_en_gpio;
struct gpio_desc *te_gpio;
......@@ -119,9 +141,19 @@ struct msm_dsi_host {
struct work_struct err_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
/* DSI v2 TX buffer */
void *tx_buf;
dma_addr_t tx_buf_paddr;
int tx_size;
u8 *rx_buf;
struct regmap *sfpb;
struct drm_display_mode *mode;
/* connected device info */
......@@ -165,21 +197,31 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct device *dev = &msm_host->pdev->dev;
struct regulator *gdsc_reg;
struct clk *ahb_clk;
int ret;
u32 major = 0, minor = 0;
gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
gdsc_reg = regulator_get(dev, "gdsc");
if (IS_ERR(gdsc_reg)) {
pr_err("%s: cannot get gdsc\n", __func__);
goto exit;
}
ahb_clk = clk_get(dev, "iface_clk");
if (IS_ERR(ahb_clk)) {
pr_err("%s: cannot get interface clock\n", __func__);
goto put_gdsc;
}
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
goto put_gdsc;
goto put_clk;
}
ret = clk_prepare_enable(msm_host->ahb_clk);
ret = clk_prepare_enable(ahb_clk);
if (ret) {
pr_err("%s: unable to enable ahb_clk\n", __func__);
goto disable_gdsc;
......@@ -196,9 +238,11 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
DBG("%s: Version %x:%x\n", __func__, major, minor);
disable_clks:
clk_disable_unprepare(msm_host->ahb_clk);
clk_disable_unprepare(ahb_clk);
disable_gdsc:
regulator_disable(gdsc_reg);
put_clk:
clk_put(ahb_clk);
put_gdsc:
regulator_put(gdsc_reg);
exit:
......@@ -295,40 +339,23 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
int ret = 0;
msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
if (IS_ERR(msm_host->mdp_core_clk)) {
ret = PTR_ERR(msm_host->mdp_core_clk);
pr_err("%s: Unable to get mdp core clk. ret=%d\n",
__func__, ret);
goto exit;
}
msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
if (IS_ERR(msm_host->ahb_clk)) {
ret = PTR_ERR(msm_host->ahb_clk);
pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
__func__, ret);
goto exit;
}
msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
if (IS_ERR(msm_host->axi_clk)) {
ret = PTR_ERR(msm_host->axi_clk);
pr_err("%s: Unable to get axi bus clk. ret=%d\n",
__func__, ret);
goto exit;
}
msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
__func__, ret);
goto exit;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
const struct msm_dsi_config *cfg = cfg_hnd->cfg;
int i, ret = 0;
/* get bus clocks */
for (i = 0; i < cfg->num_bus_clks; i++) {
msm_host->bus_clks[i] = devm_clk_get(dev,
cfg->bus_clk_names[i]);
if (IS_ERR(msm_host->bus_clks[i])) {
ret = PTR_ERR(msm_host->bus_clks[i]);
pr_err("%s: Unable to get %s, ret = %d\n",
__func__, cfg->bus_clk_names[i], ret);
goto exit;
}
}
/* get link and source clocks */
msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
if (IS_ERR(msm_host->byte_clk)) {
ret = PTR_ERR(msm_host->byte_clk);
......@@ -356,80 +383,85 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src");
if (IS_ERR(msm_host->byte_clk_src)) {
ret = PTR_ERR(msm_host->byte_clk_src);
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
msm_host->byte_clk_src = NULL;
goto exit;
}
msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src");
if (IS_ERR(msm_host->pixel_clk_src)) {
ret = PTR_ERR(msm_host->pixel_clk_src);
msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
if (!msm_host->pixel_clk_src) {
ret = -ENODEV;
pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
msm_host->pixel_clk_src = NULL;
goto exit;
}
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
msm_host->src_clk = devm_clk_get(dev, "src_clk");
if (IS_ERR(msm_host->src_clk)) {
ret = PTR_ERR(msm_host->src_clk);
pr_err("%s: can't find dsi_src_clk. ret=%d\n",
__func__, ret);
msm_host->src_clk = NULL;
goto exit;
}
msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
if (!msm_host->esc_clk_src) {
ret = -ENODEV;
pr_err("%s: can't get esc_clk_src. ret=%d\n",
__func__, ret);
goto exit;
}
msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
if (!msm_host->dsi_clk_src) {
ret = -ENODEV;
pr_err("%s: can't get dsi_clk_src. ret=%d\n",
__func__, ret);
}
}
exit:
return ret;
}
static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
{
int ret;
const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
int i, ret;
DBG("id=%d", msm_host->id);
ret = clk_prepare_enable(msm_host->mdp_core_clk);
if (ret) {
pr_err("%s: failed to enable mdp_core_clock, %d\n",
__func__, ret);
goto core_clk_err;
}
ret = clk_prepare_enable(msm_host->ahb_clk);
if (ret) {
pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
goto ahb_clk_err;
}
ret = clk_prepare_enable(msm_host->axi_clk);
if (ret) {
pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
goto axi_clk_err;
}
ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
if (ret) {
pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
__func__, ret);
goto misc_ahb_clk_err;
for (i = 0; i < cfg->num_bus_clks; i++) {
ret = clk_prepare_enable(msm_host->bus_clks[i]);
if (ret) {
pr_err("%s: failed to enable bus clock %d ret %d\n",
__func__, i, ret);
goto err;
}
}
return 0;
err:
for (; i > 0; i--)
clk_disable_unprepare(msm_host->bus_clks[i]);
misc_ahb_clk_err:
clk_disable_unprepare(msm_host->axi_clk);
axi_clk_err:
clk_disable_unprepare(msm_host->ahb_clk);
ahb_clk_err:
clk_disable_unprepare(msm_host->mdp_core_clk);
core_clk_err:
return ret;
}
static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
{
const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
int i;
DBG("");
clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
clk_disable_unprepare(msm_host->axi_clk);
clk_disable_unprepare(msm_host->ahb_clk);
clk_disable_unprepare(msm_host->mdp_core_clk);
for (i = cfg->num_bus_clks - 1; i >= 0; i--)
clk_disable_unprepare(msm_host->bus_clks[i]);
}
static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
......@@ -476,11 +508,98 @@ static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
return ret;
}
static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
int ret;
DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
msm_host->mode->clock, msm_host->byte_clk_rate,
msm_host->esc_clk_rate, msm_host->src_clk_rate);
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
goto error;
}
ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
goto error;
}
ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
goto error;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
}
ret = clk_prepare_enable(msm_host->byte_clk);
if (ret) {
pr_err("%s: Failed to enable dsi byte clk\n", __func__);
goto error;
}
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
goto esc_clk_err;
}
ret = clk_prepare_enable(msm_host->src_clk);
if (ret) {
pr_err("%s: Failed to enable dsi src clk\n", __func__);
goto src_clk_err;
}
ret = clk_prepare_enable(msm_host->pixel_clk);
if (ret) {
pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
goto pixel_clk_err;
}
return 0;
pixel_clk_err:
clk_disable_unprepare(msm_host->src_clk);
src_clk_err:
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->pixel_clk);
esc_clk_err:
clk_disable_unprepare(msm_host->byte_clk);
error:
return ret;
}
static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
return dsi_link_clk_enable_6g(msm_host);
else
return dsi_link_clk_enable_v2(msm_host);
}
static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->pixel_clk);
clk_disable_unprepare(msm_host->byte_clk);
} else {
clk_disable_unprepare(msm_host->pixel_clk);
clk_disable_unprepare(msm_host->src_clk);
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->byte_clk);
}
}
static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
......@@ -515,6 +634,7 @@ static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
{
struct drm_display_mode *mode = msm_host->mode;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
u32 pclk_rate;
......@@ -534,6 +654,47 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
unsigned int esc_mhz, esc_div;
unsigned long byte_mhz;
msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
/*
* esc clock is byte clock followed by a 4 bit divider,
* we need to find an escape clock frequency within the
* mipi DSI spec range within the maximum divider limit
* We iterate here between an escape clock frequencey
* between 20 Mhz to 5 Mhz and pick up the first one
* that can be supported by our divider
*/
byte_mhz = msm_host->byte_clk_rate / 1000000;
for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
/*
* TODO: Ideally, we shouldn't know what sort of divider
* is available in mmss_cc, we're just assuming that
* it'll always be a 4 bit divider. Need to come up with
* a better way here.
*/
if (esc_div >= 1 && esc_div <= 16)
break;
}
if (esc_mhz < 5)
return -EINVAL;
msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
msm_host->src_clk_rate);
}
return 0;
}
......@@ -835,29 +996,46 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
u32 iova;
mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
if (IS_ERR(msm_host->tx_gem_obj)) {
ret = PTR_ERR(msm_host->tx_gem_obj);
pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
msm_host->tx_gem_obj = NULL;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
if (IS_ERR(msm_host->tx_gem_obj)) {
ret = PTR_ERR(msm_host->tx_gem_obj);
pr_err("%s: failed to allocate gem, %d\n",
__func__, ret);
msm_host->tx_gem_obj = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
mutex_unlock(&dev->struct_mutex);
return ret;
}
if (ret) {
pr_err("%s: failed to get iova, %d\n", __func__, ret);
return ret;
}
ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
if (ret) {
pr_err("%s: failed to get iova, %d\n", __func__, ret);
return ret;
}
mutex_unlock(&dev->struct_mutex);
if (iova & 0x07) {
pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
return -EINVAL;
}
if (iova & 0x07) {
pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
return -EINVAL;
msm_host->tx_size = msm_host->tx_gem_obj->size;
} else {
msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
&msm_host->tx_buf_paddr, GFP_KERNEL);
if (!msm_host->tx_buf) {
ret = -ENOMEM;
pr_err("%s: failed to allocate tx buf, %d\n",
__func__, ret);
return ret;
}
msm_host->tx_size = size;
}
return 0;
......@@ -874,14 +1052,19 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
msm_host->tx_gem_obj = NULL;
mutex_unlock(&dev->struct_mutex);
}
if (msm_host->tx_buf)
dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
msm_host->tx_buf_paddr);
}
/*
* prepare cmd buffer to be txed
*/
static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
const struct mipi_dsi_msg *msg)
static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
const struct mipi_dsi_msg *msg)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct mipi_dsi_packet packet;
int len;
int ret;
......@@ -894,17 +1077,20 @@ static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
}
len = (packet.size + 3) & (~0x3);
if (len > tx_gem->size) {
if (len > msm_host->tx_size) {
pr_err("%s: packet size is too big\n", __func__);
return -EINVAL;
}
data = msm_gem_vaddr(tx_gem);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
return ret;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
data = msm_gem_vaddr(msm_host->tx_gem_obj);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
return ret;
}
} else {
data = msm_host->tx_buf;
}
/* MSM specific command format in memory */
......@@ -970,17 +1156,21 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
return msg->rx_len;
}
static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
u32 iova;
u32 dma_base;
bool triggered;
ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
if (ret) {
pr_err("%s: failed to get iova: %d\n", __func__, ret);
return ret;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base);
if (ret) {
pr_err("%s: failed to get iova: %d\n", __func__, ret);
return ret;
}
} else {
dma_base = msm_host->tx_buf_paddr;
}
reinit_completion(&msm_host->dma_comp);
......@@ -988,7 +1178,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
dsi_wait4video_eng_busy(msm_host);
triggered = msm_dsi_manager_cmd_xfer_trigger(
msm_host->id, iova, len);
msm_host->id, dma_base, len);
if (triggered) {
ret = wait_for_completion_timeout(&msm_host->dma_comp,
msecs_to_jiffies(200));
......@@ -1060,7 +1250,7 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
int bllp_len = msm_host->mode->hdisplay *
dsi_get_bpp(msm_host->format) / 8;
len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
len = dsi_cmd_dma_add(msm_host, msg);
if (!len) {
pr_err("%s: failed to add cmd type = 0x%x\n",
__func__, msg->type);
......@@ -1383,6 +1573,16 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_host->device_node = device_node;
if (of_property_read_bool(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
dev_err(dev, "%s: failed to get sfpb regmap\n",
__func__);
return PTR_ERR(msm_host->sfpb);
}
}
return 0;
}
......@@ -1408,12 +1608,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
ret = dsi_clk_init(msm_host);
if (ret) {
pr_err("%s: unable to initialize dsi clks\n", __func__);
goto fail;
}
msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
if (IS_ERR(msm_host->ctrl_base)) {
pr_err("%s: unable to map Dsi ctrl base\n", __func__);
......@@ -1437,6 +1631,12 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
ret = dsi_clk_init(msm_host);
if (ret) {
pr_err("%s: unable to initialize dsi clks\n", __func__);
goto fail;
}
msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
if (!msm_host->rx_buf) {
pr_err("%s: alloc rx temp buf failed\n", __func__);
......@@ -1750,11 +1950,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
return ret;
}
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
u32 len)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
dsi_write(msm_host, REG_DSI_DMA_LEN, len);
dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
......@@ -1766,6 +1967,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
......@@ -1791,6 +1993,22 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
goto exit;
}
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
if (ret) {
pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
__func__, ret);
goto exit;
}
ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
if (ret) {
pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
__func__, ret);
goto exit;
}
}
exit:
return ret;
}
......@@ -1828,6 +2046,20 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
return 0;
}
static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
{
enum sfpb_ahb_arb_master_port_en en;
if (!msm_host->sfpb)
return;
en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
SFPB_GPREG_MASTER_PORT_EN__MASK,
SFPB_GPREG_MASTER_PORT_EN(en));
}
int msm_dsi_host_power_on(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
......@@ -1840,6 +2072,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
goto unlock_ret;
}
msm_dsi_sfpb_config(msm_host, true);
ret = dsi_calc_clk_rate(msm_host);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
......@@ -1862,7 +2096,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
dsi_phy_sw_reset(msm_host);
ret = msm_dsi_manager_phy_enable(msm_host->id,
msm_host->byte_clk_rate * 8,
clk_get_rate(msm_host->esc_clk),
msm_host->esc_clk_rate,
&clk_pre, &clk_post);
dsi_bus_clk_disable(msm_host);
if (ret) {
......@@ -1927,6 +2161,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
dsi_host_regulator_disable(msm_host);
msm_dsi_sfpb_config(msm_host, false);
DBG("-");
msm_host->power_on = false;
......
......@@ -774,7 +774,7 @@ int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
return ret;
}
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
......@@ -784,9 +784,9 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
return false;
if (IS_SYNC_NEEDED() && msm_dsi0)
msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len);
msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, dma_base, len);
msm_dsi_host_cmd_xfer_commit(host, iova, len);
msm_dsi_host_cmd_xfer_commit(host, dma_base, len);
return true;
}
......
......@@ -276,6 +276,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
{ .compatible = "qcom,dsi-phy-20nm",
.data = &dsi_phy_20nm_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
{ .compatible = "qcom,dsi-phy-28nm-8960",
.data = &dsi_phy_28nm_8960_cfgs },
#endif
{}
};
......
......@@ -43,6 +43,7 @@ struct msm_dsi_phy_cfg {
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;
......
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dsi_phy.h"
#include "dsi.xml.h"
static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing)
{
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
0x100);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
}
static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
u32 status;
int i = 5000;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
usleep_range(5000, 6000);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
do {
status = dsi_phy_read(base +
REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
break;
udelay(1);
} while (--i > 0);
}
static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
int i;
for (i = 0; i < 4; i++) {
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
0x01);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
0x66);
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base;
DBG("");
if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
dev_err(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
dsi_28nm_phy_regulator_init(phy);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
/* strength control */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
/* phy ctrl */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
dsi_28nm_phy_regulator_ctrl(phy);
dsi_28nm_phy_calibration(phy);
dsi_28nm_phy_lane_config(phy);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
dsi_28nm_dphy_set_timing(phy, timing);
return 0;
}
static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
/*
* Wait for the registers writes to complete in order to
* ensure that the phy is completely disabled
*/
wmb();
}
const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.type = MSM_DSI_PHY_28NM_8960,
.src_pll_truthtable = { {true, true}, {false, true} },
.reg_cfg = {
.num = 1,
.regs = {
{"vddio", 1800000, 1800000, 100000, 100},
},
},
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
};
......@@ -151,6 +151,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_28NM_LP:
pll = msm_dsi_pll_28nm_init(pdev, type, id);
break;
case MSM_DSI_PHY_28NM_8960:
pll = msm_dsi_pll_28nm_8960_init(pdev, id);
break;
default:
pll = ERR_PTR(-ENXIO);
break;
......
......@@ -93,6 +93,16 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
return ERR_PTR(-ENODEV);
}
#endif
#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
int id);
#else
struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
int id)
{
return ERR_PTR(-ENODEV);
}
#endif
#endif /* __DSI_PLL_H__ */
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
#include "dsi_pll.h"
#include "dsi.xml.h"
/*
* DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
*
*
* +------+
* dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
* F * byte_clk | +------+
* | bit clock divider (F / 8)
* |
* | +------+
* o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
* | +------+ | (sets parent rate)
* | byte clock divider (F) |
* | |
* | o---> To esc RCG
* | (doesn't set parent rate)
* |
* | +------+
* o-----| DIV3 |----dsi0pll------o---> To dsi RCG
* +------+ | (sets parent rate)
* dsi clock divider (F * magic) |
* |
* o---> To pixel rcg
* (doesn't set parent rate)
*/
#define POLL_MAX_READS 8000
#define POLL_TIMEOUT_US 1
#define NUM_PROVIDED_CLKS 2
#define VCO_REF_CLK_RATE 27000000
#define VCO_MIN_RATE 600000000
#define VCO_MAX_RATE 1200000000
#define DSI_BYTE_PLL_CLK 0
#define DSI_PIXEL_PLL_CLK 1
#define VCO_PREF_DIV_RATIO 27
struct pll_28nm_cached_state {
unsigned long vco_rate;
u8 postdiv3;
u8 postdiv2;
u8 postdiv1;
};
struct clk_bytediv {
struct clk_hw hw;
void __iomem *reg;
};
struct dsi_pll_28nm {
struct msm_dsi_pll base;
int id;
struct platform_device *pdev;
void __iomem *mmio;
/* custom byte clock divider */
struct clk_bytediv *bytediv;
/* private clocks: */
struct clk *clks[NUM_DSI_CLOCKS_MAX];
u32 num_clks;
/* clock-provider: */
struct clk *provided_clks[NUM_PROVIDED_CLKS];
struct clk_onecell_data clk_data;
struct pll_28nm_cached_state cached_state;
};
#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
int nb_tries, int timeout_us)
{
bool pll_locked = false;
u32 val;
while (nb_tries--) {
val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
if (pll_locked)
break;
udelay(timeout_us);
}
DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
return pll_locked;
}
/*
* Clock Callbacks
*/
static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
void __iomem *base = pll_28nm->mmio;
u32 val, temp, fb_divider;
DBG("rate=%lu, parent's=%lu", rate, parent_rate);
temp = rate / 10;
val = VCO_REF_CLK_RATE / 10;
fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
fb_divider = fb_divider / 2 - 1;
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
fb_divider & 0xff);
val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val |= (fb_divider >> 8) & 0x07;
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
val);
val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
val);
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
0xf);
val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val |= 0x7 << 4;
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
val);
return 0;
}
static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
POLL_TIMEOUT_US);
}
static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
void __iomem *base = pll_28nm->mmio;
unsigned long vco_rate;
u32 status, fb_divider, temp, ref_divider;
VERB("parent_rate=%lu", parent_rate);
status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
fb_divider &= 0xff;
temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
fb_divider = (temp << 8) | fb_divider;
fb_divider += 1;
ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
ref_divider &= 0x3f;
ref_divider += 1;
/* multiply by 2 */
vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
} else {
vco_rate = 0;
}
DBG("returning vco rate = %lu", vco_rate);
return vco_rate;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
.round_rate = msm_dsi_pll_helper_clk_round_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = msm_dsi_pll_helper_clk_prepare,
.unprepare = msm_dsi_pll_helper_clk_unprepare,
.is_enabled = dsi_pll_28nm_clk_is_enabled,
};
/*
* Custom byte clock divier clk_ops
*
* This clock is the entry point to configuring the PLL. The user (dsi host)
* will set this clock's rate to the desired byte clock rate. The VCO lock
* frequency is a multiple of the byte clock rate. The multiplication factor
* (shown as F in the diagram above) is a function of the byte clock rate.
*
* This custom divider clock ensures that its parent (VCO) is set to the
* desired rate, and that the byte clock postdivider (POSTDIV2) is configured
* accordingly
*/
#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_bytediv *bytediv = to_clk_bytediv(hw);
unsigned int div;
div = pll_read(bytediv->reg) & 0xff;
return parent_rate / (div + 1);
}
/* find multiplication factor(wrt byte clock) at which the VCO should be set */
static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
{
unsigned long bit_mhz;
/* convert to bit clock in Mhz */
bit_mhz = (byte_clk_rate * 8) / 1000000;
if (bit_mhz < 125)
return 64;
else if (bit_mhz < 250)
return 32;
else if (bit_mhz < 600)
return 16;
else
return 8;
}
static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
unsigned long best_parent;
unsigned int factor;
factor = get_vco_mul_factor(rate);
best_parent = rate * factor;
*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
return *prate / factor;
}
static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_bytediv *bytediv = to_clk_bytediv(hw);
u32 val;
unsigned int factor;
factor = get_vco_mul_factor(rate);
val = pll_read(bytediv->reg);
val |= (factor - 1) & 0xff;
pll_write(bytediv->reg, val);
return 0;
}
/* Our special byte clock divider ops */
static const struct clk_ops clk_bytediv_ops = {
.round_rate = clk_bytediv_round_rate,
.set_rate = clk_bytediv_set_rate,
.recalc_rate = clk_bytediv_recalc_rate,
};
/*
* PLL Callbacks
*/
static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
struct device *dev = &pll_28nm->pdev->dev;
void __iomem *base = pll_28nm->mmio;
bool locked;
unsigned int bit_div, byte_div;
int max_reads = 1000, timeout_us = 100;
u32 val;
DBG("id=%d", pll_28nm->id);
/*
* before enabling the PLL, configure the bit clock divider since we
* don't expose it as a clock to the outside world
* 1: read back the byte clock divider that should already be set
* 2: divide by 8 to get bit clock divider
* 3: write it to POSTDIV1
*/
val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
byte_div = val + 1;
bit_div = byte_div / 8;
val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val &= ~0xf;
val |= (bit_div - 1);
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
/* enable the PLL */
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
dev_err(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
return locked ? 0 : -EINVAL;
}
static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
DBG("id=%d", pll_28nm->id);
pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
}
static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
void __iomem *base = pll_28nm->mmio;
cached_state->postdiv3 =
pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
cached_state->postdiv2 =
pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
cached_state->postdiv1 =
pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
}
static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
void __iomem *base = pll_28nm->mmio;
int ret;
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
dev_err(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
cached_state->postdiv3);
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
cached_state->postdiv2);
pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
cached_state->postdiv1);
return 0;
}
static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider,
struct clk **pixel_clk_provider)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
if (byte_clk_provider)
*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
if (pixel_clk_provider)
*pixel_clk_provider =
pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
return 0;
}
static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
pll_28nm->clks, pll_28nm->num_clks);
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
{
char *clk_name, *parent_name, *vco_name;
struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "pxo" },
.num_parents = 1,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
struct clk **clks = pll_28nm->clks;
struct clk **provided_clks = pll_28nm->provided_clks;
struct clk_bytediv *bytediv;
struct clk_init_data bytediv_init = { };
int ret, num = 0;
DBG("%d", pll_28nm->id);
bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
if (!bytediv)
return -ENOMEM;
vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!vco_name)
return -ENOMEM;
parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!parent_name)
return -ENOMEM;
clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!clk_name)
return -ENOMEM;
pll_28nm->bytediv = bytediv;
snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
vco_init.name = vco_name;
pll_28nm->base.clk_hw.init = &vco_init;
clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
/* prepare and register bytediv */
bytediv->hw.init = &bytediv_init;
bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops;
bytediv_init.flags = CLK_SET_RATE_PARENT;
bytediv_init.parent_names = (const char * const *) &parent_name;
bytediv_init.num_parents = 1;
/* DIV2 */
clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
clk_register(dev, &bytediv->hw);
snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
/* DIV3 */
clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
clk_register_divider(dev, clk_name,
parent_name, 0, pll_28nm->mmio +
REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
0, 8, 0, NULL);
pll_28nm->num_clks = num;
pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
pll_28nm->clk_data.clks = provided_clks;
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
dev_err(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
return 0;
}
struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
int id)
{
struct dsi_pll_28nm *pll_28nm;
struct msm_dsi_pll *pll;
int ret;
if (!pdev)
return ERR_PTR(-ENODEV);
pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
if (!pll_28nm)
return ERR_PTR(-ENOMEM);
pll_28nm->pdev = pdev;
pll_28nm->id = id + 1;
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
pll = &pll_28nm->base;
pll->min_rate = VCO_MIN_RATE;
pll->max_rate = VCO_MAX_RATE;
pll->get_provider = dsi_pll_28nm_get_provider;
pll->destroy = dsi_pll_28nm_destroy;
pll->disable_seq = dsi_pll_28nm_disable_seq;
pll->save_state = dsi_pll_28nm_save_state;
pll->restore_state = dsi_pll_28nm_restore_state;
pll->en_seq_cnt = 1;
pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
ret = pll_28nm_register(pll_28nm);
if (ret) {
dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
return pll;
}
......@@ -17,6 +17,8 @@
*/
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include "hdmi.h"
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
......@@ -322,8 +324,6 @@ int hdmi_modeset_init(struct hdmi *hdmi,
* The hdmi device:
*/
#include <linux/of_gpio.h>
#define HDMI_CFG(item, entry) \
.item ## _names = item ##_names_ ## entry, \
.item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
......@@ -388,17 +388,6 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
{ .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
{ .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
{}
};
#ifdef CONFIG_OF
static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
{
int gpio = of_get_named_gpio(of_node, name, 0);
......@@ -413,7 +402,6 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
}
return gpio;
}
#endif
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
......@@ -421,16 +409,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
struct msm_drm_private *priv = drm->dev_private;
static struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
const struct of_device_id *match;
match = of_match_node(dt_match, of_node);
if (match && match->data) {
hdmi_cfg = (struct hdmi_platform_config *)match->data;
DBG("hdmi phy: %s", match->compatible);
} else {
dev_err(dev, "unknown phy: %s\n", of_node->name);
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
if (!hdmi_cfg) {
dev_err(dev, "unknown hdmi_cfg: %s\n", of_node->name);
return -ENXIO;
}
......@@ -443,55 +427,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
#else
static struct hdmi_platform_config config = {};
static const char *hpd_clk_names[] = {
"core_clk", "master_iface_clk", "slave_iface_clk",
};
if (cpu_is_apq8064()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 70;
config.ddc_data_gpio = 71;
config.hpd_gpio = 72;
config.mux_en_gpio = -1;
config.mux_sel_gpio = -1;
} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 100;
config.ddc_data_gpio = 101;
config.hpd_gpio = 102;
config.mux_en_gpio = -1;
config.mux_sel_gpio = -1;
} else if (cpu_is_msm8x60()) {
static const char *hpd_reg_names[] = {
"8901_hdmi_mvs", "8901_mpp0"
};
config.phy_init = hdmi_phy_8x60_init;
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 170;
config.ddc_data_gpio = 171;
config.hpd_gpio = 172;
config.mux_en_gpio = -1;
config.mux_sel_gpio = -1;
}
config.mmio_name = "hdmi_msm_hdmi_addr";
config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
hdmi_cfg = &config;
#endif
dev->platform_data = hdmi_cfg;
hdmi = hdmi_init(to_platform_device(dev));
......@@ -529,6 +464,16 @@ static int hdmi_dev_remove(struct platform_device *pdev)
return 0;
}
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
{ .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
{ .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
{}
};
static struct platform_driver hdmi_driver = {
.probe = hdmi_dev_probe,
.remove = hdmi_dev_remove,
......
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2014, Inforce Computing. All rights reserved.
*
* Author: Vinay Simha <vinaysimha@inforcecomputing.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mdp4_kms.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
struct mdp4_dsi_encoder {
struct drm_encoder base;
struct drm_panel *panel;
bool enabled;
};
#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base)
static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(mdp4_dsi_encoder);
}
static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
.destroy = mdp4_dsi_encoder_destroy,
};
static bool mdp4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp4_kms *mdp4_kms = get_kms(encoder);
uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
mode = adjusted_mode;
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mode->base.id, mode->name,
mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
ctrl_pol = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW;
/* probably need to get DATA_EN polarity from panel.. */
dsi_hsync_skew = 0; /* get this from panel? */
hsync_start_x = (mode->htotal - mode->hsync_start);
hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
vsync_period = mode->vtotal * mode->htotal;
vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1;
mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL,
MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal));
mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period);
mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len);
mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL,
MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) |
MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x));
mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start);
mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end);
mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol);
mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR,
MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY |
MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff));
mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL,
MDP4_DSI_ACTIVE_HCTL_START(0) |
MDP4_DSI_ACTIVE_HCTL_END(0));
mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew);
mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0);
}
static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder)
{
struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
if (!mdp4_dsi_encoder->enabled)
return;
mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
mdp4_dsi_encoder->enabled = false;
}
static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
{
struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
if (mdp4_dsi_encoder->enabled)
return;
mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN |
MDP4_DMA_CONFIG_R_BPC(BPC8) |
MDP4_DMA_CONFIG_G_BPC(BPC8) |
MDP4_DMA_CONFIG_B_BPC(BPC8) |
MDP4_DMA_CONFIG_PACK(0x21));
mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1);
mdp4_dsi_encoder->enabled = true;
}
static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
.mode_fixup = mdp4_dsi_encoder_mode_fixup,
.mode_set = mdp4_dsi_encoder_mode_set,
.disable = mdp4_dsi_encoder_disable,
.enable = mdp4_dsi_encoder_enable,
};
/* initialize encoder */
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder = NULL;
struct mdp4_dsi_encoder *mdp4_dsi_encoder;
int ret;
mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
if (!mdp4_dsi_encoder) {
ret = -ENOMEM;
goto fail;
}
encoder = &mdp4_dsi_encoder->base;
drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
return encoder;
fail:
if (encoder)
mdp4_dsi_encoder_destroy(encoder);
return ERR_PTR(ret);
}
......@@ -29,7 +29,7 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
DRM_ERROR("errors: %08x\n", irqstatus);
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
}
void mdp4_irq_preinstall(struct msm_kms *kms)
......
......@@ -169,7 +169,14 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder)
{
/* if we had >1 encoder, we'd need something more clever: */
return mdp4_dtv_round_pixclk(encoder, rate);
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_TMDS:
return mdp4_dtv_round_pixclk(encoder, rate);
case DRM_MODE_ENCODER_LVDS:
case DRM_MODE_ENCODER_DSI:
default:
return rate;
}
}
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
......@@ -240,19 +247,18 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
return 0;
}
#ifdef CONFIG_OF
static struct drm_panel *detect_panel(struct drm_device *dev)
static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
{
struct device_node *endpoint, *panel_node;
struct device_node *np = dev->dev->of_node;
struct drm_panel *panel = NULL;
endpoint = of_graph_get_next_endpoint(np, NULL);
if (!endpoint) {
dev_err(dev->dev, "no valid endpoint\n");
return ERR_PTR(-ENODEV);
DBG("no endpoint in MDP4 to fetch LVDS panel\n");
return NULL;
}
/* don't proceed if we have an endpoint but no panel_node tied to it */
panel_node = of_graph_get_remote_port_parent(endpoint);
if (!panel_node) {
dev_err(dev->dev, "no valid panel node\n");
......@@ -262,132 +268,185 @@ static struct drm_panel *detect_panel(struct drm_device *dev)
of_node_put(endpoint);
panel = of_drm_find_panel(panel_node);
if (!panel) {
of_node_put(panel_node);
return ERR_PTR(-EPROBE_DEFER);
}
return panel;
return panel_node;
}
#else
static struct drm_panel *detect_panel(struct drm_device *dev)
{
// ??? maybe use a module param to specify which panel is attached?
}
#endif
static int modeset_init(struct mdp4_kms *mdp4_kms)
static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
int intf_type)
{
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_panel *panel;
struct device_node *panel_node;
struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
int i, dsi_id;
int ret;
/* construct non-private planes: */
plane = mdp4_plane_init(dev, VG1, false);
if (IS_ERR(plane)) {
dev_err(dev->dev, "failed to construct plane for VG1\n");
ret = PTR_ERR(plane);
goto fail;
}
priv->planes[priv->num_planes++] = plane;
switch (intf_type) {
case DRM_MODE_ENCODER_LVDS:
/*
* bail out early if:
* - there is no panel node (no need to initialize lcdc
* encoder and lvds connector), or
* - panel node is a bad pointer
*/
panel_node = mdp4_detect_lcdc_panel(dev);
if (IS_ERR_OR_NULL(panel_node))
return PTR_ERR(panel_node);
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct LCDC encoder\n");
return PTR_ERR(encoder);
}
plane = mdp4_plane_init(dev, VG2, false);
if (IS_ERR(plane)) {
dev_err(dev->dev, "failed to construct plane for VG2\n");
ret = PTR_ERR(plane);
goto fail;
}
priv->planes[priv->num_planes++] = plane;
/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
encoder->possible_crtcs = 1 << DMA_P;
/*
* Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
*/
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
dev_err(dev->dev, "failed to initialize LVDS connector\n");
return PTR_ERR(connector);
}
panel = detect_panel(dev);
if (IS_ERR(panel)) {
ret = PTR_ERR(panel);
dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
goto fail;
}
priv->encoders[priv->num_encoders++] = encoder;
priv->connectors[priv->num_connectors++] = connector;
plane = mdp4_plane_init(dev, RGB2, true);
if (IS_ERR(plane)) {
dev_err(dev->dev, "failed to construct plane for RGB2\n");
ret = PTR_ERR(plane);
goto fail;
}
break;
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct DTV encoder\n");
return PTR_ERR(encoder);
}
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P);
if (IS_ERR(crtc)) {
dev_err(dev->dev, "failed to construct crtc for DMA_P\n");
ret = PTR_ERR(crtc);
goto fail;
}
/* DTV can be hooked to DMA_E: */
encoder->possible_crtcs = 1 << 1;
encoder = mdp4_lcdc_encoder_init(dev, panel);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct LCDC encoder\n");
ret = PTR_ERR(encoder);
goto fail;
}
if (priv->hdmi) {
/* Construct bridge/connector for HDMI: */
ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
}
}
/* LCDC can be hooked to DMA_P: */
encoder->possible_crtcs = 1 << priv->num_crtcs;
priv->encoders[priv->num_encoders++] = encoder;
priv->crtcs[priv->num_crtcs++] = crtc;
priv->encoders[priv->num_encoders++] = encoder;
break;
case DRM_MODE_ENCODER_DSI:
/* only DSI1 supported for now */
dsi_id = 0;
connector = mdp4_lvds_connector_init(dev, panel, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
dev_err(dev->dev, "failed to initialize LVDS connector: %d\n", ret);
goto fail;
}
if (!priv->dsi[dsi_id])
break;
priv->connectors[priv->num_connectors++] = connector;
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
dsi_encs[i] = mdp4_dsi_encoder_init(dev);
if (IS_ERR(dsi_encs[i])) {
ret = PTR_ERR(dsi_encs[i]);
dev_err(dev->dev,
"failed to construct DSI encoder: %d\n",
ret);
return ret;
}
/*
* Setup DTV/HDMI path: RGB1 -> DMA_E -> DTV -> HDMI:
*/
/* TODO: Add DMA_S later? */
dsi_encs[i]->possible_crtcs = 1 << DMA_P;
priv->encoders[priv->num_encoders++] = dsi_encs[i];
}
plane = mdp4_plane_init(dev, RGB1, true);
if (IS_ERR(plane)) {
dev_err(dev->dev, "failed to construct plane for RGB1\n");
ret = PTR_ERR(plane);
goto fail;
}
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
if (ret) {
dev_err(dev->dev, "failed to initialize DSI: %d\n",
ret);
return ret;
}
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
if (IS_ERR(crtc)) {
dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
ret = PTR_ERR(crtc);
goto fail;
break;
default:
dev_err(dev->dev, "Invalid or unsupported interface\n");
return -EINVAL;
}
encoder = mdp4_dtv_encoder_init(dev);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct DTV encoder\n");
ret = PTR_ERR(encoder);
goto fail;
return 0;
}
static int modeset_init(struct mdp4_kms *mdp4_kms)
{
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
static const enum mdp4_pipe rgb_planes[] = {
RGB1, RGB2,
};
static const enum mdp4_pipe vg_planes[] = {
VG1, VG2,
};
static const enum mdp4_dma mdp4_crtcs[] = {
DMA_P, DMA_E,
};
static const char * const mdp4_crtc_names[] = {
"DMA_P", "DMA_E",
};
static const int mdp4_intfs[] = {
DRM_MODE_ENCODER_LVDS,
DRM_MODE_ENCODER_DSI,
DRM_MODE_ENCODER_TMDS,
};
/* construct non-private planes: */
for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
plane = mdp4_plane_init(dev, vg_planes[i], false);
if (IS_ERR(plane)) {
dev_err(dev->dev,
"failed to construct plane for VG%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
}
priv->planes[priv->num_planes++] = plane;
}
/* DTV can be hooked to DMA_E: */
encoder->possible_crtcs = 1 << priv->num_crtcs;
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
plane = mdp4_plane_init(dev, rgb_planes[i], true);
if (IS_ERR(plane)) {
dev_err(dev->dev,
"failed to construct plane for RGB%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
}
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
dev_err(dev->dev, "failed to construct crtc for %s\n",
mdp4_crtc_names[i]);
ret = PTR_ERR(crtc);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
}
priv->crtcs[priv->num_crtcs++] = crtc;
priv->encoders[priv->num_encoders++] = encoder;
/*
* we currently set up two relatively fixed paths:
*
* LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
* or
* DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
*
* DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
*/
if (priv->hdmi) {
/* Construct bridge/connector for HDMI: */
ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
if (ret) {
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
i, ret);
goto fail;
}
}
......@@ -558,17 +617,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
{
static struct mdp4_platform_config config = {};
#ifdef CONFIG_OF
/* TODO */
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
#else
if (cpu_is_apq8064())
config.max_clk = 266667000;
else
config.max_clk = 200000000;
config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
#endif
return &config;
}
......@@ -157,7 +157,7 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
break;
default:
WARN_ON("invalid pipe");
WARN(1, "invalid pipe");
break;
}
......@@ -212,10 +212,19 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
struct drm_panel *panel);
struct device_node *panel_node);
struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
struct drm_panel *panel, struct drm_encoder *encoder);
struct device_node *panel_node, struct drm_encoder *encoder);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
#else
static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
{
return ERR_PTR(-ENODEV);
}
#endif
#ifdef CONFIG_COMMON_CLK
struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
......
......@@ -23,6 +23,7 @@
struct mdp4_lcdc_encoder {
struct drm_encoder base;
struct device_node *panel_node;
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
......@@ -338,7 +339,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel = mdp4_lcdc_encoder->panel;
struct drm_panel *panel;
int i, ret;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
......@@ -346,6 +347,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (panel) {
drm_panel_disable(panel);
drm_panel_unprepare(panel);
......@@ -381,7 +383,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
to_mdp4_lcdc_encoder(encoder);
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel = mdp4_lcdc_encoder->panel;
struct drm_panel *panel;
int i, ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
......@@ -414,6 +416,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
if (ret)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (panel) {
drm_panel_prepare(panel);
drm_panel_enable(panel);
......@@ -442,7 +445,7 @@ long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
/* initialize encoder */
struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
struct drm_panel *panel)
struct device_node *panel_node)
{
struct drm_encoder *encoder = NULL;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
......@@ -455,7 +458,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
goto fail;
}
mdp4_lcdc_encoder->panel = panel;
mdp4_lcdc_encoder->panel_node = panel_node;
encoder = &mdp4_lcdc_encoder->base;
......
......@@ -23,6 +23,7 @@
struct mdp4_lvds_connector {
struct drm_connector base;
struct drm_encoder *encoder;
struct device_node *panel_node;
struct drm_panel *panel;
};
#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
......@@ -33,6 +34,10 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
if (!mdp4_lvds_connector->panel)
mdp4_lvds_connector->panel =
of_drm_find_panel(mdp4_lvds_connector->panel_node);
return mdp4_lvds_connector->panel ?
connector_status_connected :
connector_status_disconnected;
......@@ -42,10 +47,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
struct drm_panel *panel = mdp4_lvds_connector->panel;
if (panel)
drm_panel_detach(panel);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
......@@ -60,9 +61,14 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
struct drm_panel *panel = mdp4_lvds_connector->panel;
int ret = 0;
if (panel)
if (panel) {
drm_panel_attach(panel, connector);
ret = panel->funcs->get_modes(panel);
drm_panel_detach(panel);
}
return ret;
}
......@@ -111,7 +117,7 @@ static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs
/* initialize connector */
struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
struct drm_panel *panel, struct drm_encoder *encoder)
struct device_node *panel_node, struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct mdp4_lvds_connector *mdp4_lvds_connector;
......@@ -124,7 +130,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
}
mdp4_lvds_connector->encoder = encoder;
mdp4_lvds_connector->panel = panel;
mdp4_lvds_connector->panel_node = panel_node;
connector = &mdp4_lvds_connector->base;
......@@ -141,9 +147,6 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
drm_mode_connector_attach_encoder(connector, encoder);
if (panel)
drm_panel_attach(panel, connector);
return connector;
fail:
......
......@@ -553,9 +553,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
{
static struct mdp5_cfg_platform config = {};
#ifdef CONFIG_OF
/* TODO */
#endif
config.iommu = iommu_domain_alloc(&platform_bus_type);
return &config;
......
......@@ -293,6 +293,24 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.enable = mdp5_encoder_enable,
};
int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf.num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
}
u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf.num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
}
int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder)
{
......
......@@ -31,7 +31,7 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
DRM_ERROR("errors: %08x\n", irqstatus);
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
}
void mdp5_irq_preinstall(struct msm_kms *kms)
......
......@@ -468,6 +468,127 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
return 0;
}
static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc)
return encoder;
return NULL;
}
static int mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
unsigned int flags, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
int ret = 0;
crtc = priv->crtcs[pipe];
if (!crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return 0;
}
encoder = get_encoder_from_crtc(crtc);
if (!encoder) {
DRM_ERROR("no encoder found for crtc %d\n", pipe);
return 0;
}
ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
/*
* the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
* the end of VFP. Translate the porch values relative to the line
* counter positions.
*/
vactive_start = vsw + vbp + 1;
vactive_end = vactive_start + mode->crtc_vdisplay;
/* last scan line before VSYNC */
vfp_end = mode->crtc_vtotal;
if (stime)
*stime = ktime_get();
line = mdp5_encoder_get_linecount(encoder);
if (line < vactive_start) {
line -= vactive_start;
ret |= DRM_SCANOUTPOS_IN_VBLANK;
} else if (line > vactive_end) {
line = line - vfp_end - vactive_start;
ret |= DRM_SCANOUTPOS_IN_VBLANK;
} else {
line -= vactive_start;
}
*vpos = line;
*hpos = 0;
if (etime)
*etime = ktime_get();
return ret;
}
static int mdp5_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_crtc *crtc;
if (pipe < 0 || pipe >= priv->num_crtcs) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
crtc = priv->crtcs[pipe];
if (!crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
&crtc->mode);
}
static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
if (pipe < 0 || pipe >= priv->num_crtcs)
return 0;
crtc = priv->crtcs[pipe];
if (!crtc)
return 0;
encoder = get_encoder_from_crtc(crtc);
if (!encoder)
return 0;
return mdp5_encoder_get_framecount(encoder);
}
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
......@@ -590,6 +711,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
!config->hw->intf.base[i])
continue;
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
}
mdp5_disable(mdp5_kms);
mdelay(16);
......@@ -635,6 +758,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.max_width = config->hw->lm.max_width;
dev->mode_config.max_height = config->hw->lm.max_height;
dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
dev->max_vblank_count = 0xffffffff;
dev->vblank_disable_immediate = true;
return kms;
fail:
......
......@@ -222,6 +222,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder);
int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
......
......@@ -237,20 +237,9 @@ static int msm_unload(struct drm_device *dev)
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
static const struct of_device_id match_types[] = { {
.compatible = "qcom,mdss_mdp",
.data = (void *)5,
}, {
/* end node */
} };
struct device *dev = &pdev->dev;
const struct of_device_id *match;
match = of_match_node(match_types, dev->of_node);
if (match)
return (int)(unsigned long)match->data;
#endif
return 4;
return (int) (unsigned long) of_device_get_match_data(dev);
}
#include <linux/of_address.h>
......@@ -258,10 +247,10 @@ static int get_mdp_ver(struct platform_device *pdev)
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct device_node *node;
unsigned long size = 0;
int ret = 0;
#ifdef CONFIG_OF
/* In the device-tree world, we could have a 'memory-region'
* phandle, which gives us a link to our "vram". Allocating
* is all nicely abstracted behind the dma api, but we need
......@@ -278,7 +267,6 @@ static int msm_init_vram(struct drm_device *dev)
* as corruption on screen before we have a chance to
* load and do initial modeset)
*/
struct device_node *node;
node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
if (node) {
......@@ -288,14 +276,12 @@ static int msm_init_vram(struct drm_device *dev)
return ret;
size = r.end - r.start;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
} else
#endif
/* if we have no IOMMU, then we need to use carveout allocator.
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
if (!iommu_present(&platform_bus_type)) {
/* if we have no IOMMU, then we need to use carveout allocator.
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
} else if (!iommu_present(&platform_bus_type)) {
DRM_INFO("using %s VRAM carveout\n", vram);
size = memparse(vram, NULL);
}
......@@ -1035,9 +1021,9 @@ static const struct dev_pm_ops msm_pm_ops = {
* Componentized driver support:
*/
#ifdef CONFIG_OF
/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
* (or probably any other).. so probably some room for some helpers
/*
* NOTE: duplication of the same code as exynos or imx (or probably any other).
* so probably some room for some helpers
*/
static int compare_of(struct device *dev, void *data)
{
......@@ -1062,12 +1048,6 @@ static int add_components(struct device *dev, struct component_match **matchptr,
return 0;
}
#else
static int compare_dev(struct device *dev, void *data)
{
return dev == data;
}
#endif
static int msm_drm_bind(struct device *dev)
{
......@@ -1091,35 +1071,9 @@ static const struct component_master_ops msm_drm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
#ifdef CONFIG_OF
add_components(&pdev->dev, &match, "connectors");
add_components(&pdev->dev, &match, "gpus");
#else
/* For non-DT case, it kinda sucks. We don't actually have a way
* to know whether or not we are waiting for certain devices (or if
* they are simply not present). But for non-DT we only need to
* care about apq8064/apq8060/etc (all mdp4/a3xx):
*/
static const char *devnames[] = {
"hdmi_msm.0", "kgsl-3d0.0",
};
int i;
DBG("Adding components..");
for (i = 0; i < ARRAY_SIZE(devnames); i++) {
struct device *dev;
dev = bus_find_device_by_name(&platform_bus_type,
NULL, devnames[i]);
if (!dev) {
dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
return -EPROBE_DEFER;
}
component_match_add(&pdev->dev, &match, compare_dev, dev);
}
#endif
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
......@@ -1138,8 +1092,10 @@ static const struct platform_device_id msm_id[] = {
};
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp" }, /* mdp4 */
{ .compatible = "qcom,mdss_mdp" }, /* mdp5 */
{ .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */
{ .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */
/* to support downstream DT files */
{ .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
......
......@@ -31,14 +31,9 @@
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
#ifndef CONFIG_OF
#include <mach/board.h>
#include <mach/socinfo.h>
#include <mach/iommu_domains.h>
#endif
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
......
......@@ -121,7 +121,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_unreference(fbdev->bo);
drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
......
......@@ -121,7 +121,7 @@ struct drm_msm_gem_cpu_fini {
struct drm_msm_gem_submit_reloc {
__u32 submit_offset; /* in, offset from submit_bo */
__u32 or; /* in, value OR'd with result */
__s32 shift; /* in, amount of left shift (can be negative) */
__s32 shift; /* in, amount of left shift (can be negative) */
__u32 reloc_idx; /* in, index of reloc_bo buffer */
__u64 reloc_offset; /* in, offset from start of reloc_bo */
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment