Commit af8352f1 authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'drm-msm-next-2021-04-11' of https://gitlab.freedesktop.org/drm/msm into drm-next

msm-next from Rob:
* Big DSI phy/pll cleanup. Includes some clk patches, acked by
  maintainer
* Initial support for sc7280
* compatibles fixes for sm8150/sm8250
* cleanups for all dpu gens to use same bandwidth scaling paths (\o/)
* various shrinker path lock contention optimizations
* unpin/swap support for GEM objects (disabled by default, enable with
  msm.enable_eviction=1 .. due to various combinations of iommu drivers
  with older gens I want to get more testing on hw I don't have in front
  of me before enabling by default)
* The usual assortment of misc fixes and cleanups
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvL=4aw15qoY8fbKG9FCgnx8Y-dCtf7xiFwTQSHopwSQg@mail.gmail.com
parents 213cc929 a29c8c02
......@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
......@@ -206,6 +207,40 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
}
EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
static void devm_clk_hw_release_mux(struct device *dev, void *res)
{
clk_hw_unregister_mux(*(struct clk_hw **)res);
}
struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
const char *name, u8 num_parents,
const char * const *parent_names,
const struct clk_hw **parent_hws,
const struct clk_parent_data *parent_data,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock)
{
struct clk_hw **ptr, *hw;
ptr = devres_alloc(devm_clk_hw_release_mux, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
hw = __clk_hw_register_mux(dev, np, name, num_parents, parent_names, parent_hws,
parent_data, flags, reg, shift, mask,
clk_mux_flags, table, lock);
if (!IS_ERR(hw)) {
*ptr = hw;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return hw;
}
EXPORT_SYMBOL_GPL(__devm_clk_hw_register_mux);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
......
......@@ -20,6 +20,7 @@ config DRM_MSM
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
select NVMEM
help
DRM/KMS driver for MSM/snapdragon.
......@@ -76,14 +77,6 @@ config DRM_MSM_DSI
Choose this option if you have a need for MIPI DSI connector
support.
config DRM_MSM_DSI_PLL
bool "Enable DSI PLL driver in MSM DRM"
depends on DRM_MSM_DSI && COMMON_CLK
default y
help
Choose this option to enable DSI PLL driver which provides DSI
source clocks under common clock framework.
config DRM_MSM_DSI_28NM_PHY
bool "Enable DSI 28nm PHY driver in MSM DRM"
depends on DRM_MSM_DSI
......
......@@ -136,13 +136,4 @@ msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
......@@ -246,7 +246,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
}
struct a6xx_gmu_oob_bits {
int set, ack, set_new, ack_new;
int set, ack, set_new, ack_new, clear, clear_new;
const char *name;
};
......@@ -260,6 +260,8 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
.ack = 24,
.set_new = 30,
.ack_new = 31,
.clear = 24,
.clear_new = 31,
},
[GMU_OOB_PERFCOUNTER_SET] = {
......@@ -268,18 +270,22 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
.ack = 25,
.set_new = 28,
.ack_new = 30,
.clear = 25,
.clear_new = 29,
},
[GMU_OOB_BOOT_SLUMBER] = {
.name = "BOOT_SLUMBER",
.set = 22,
.ack = 30,
.clear = 30,
},
[GMU_OOB_DCVS_SET] = {
.name = "GPU_DCVS",
.set = 23,
.ack = 31,
.clear = 31,
},
};
......@@ -335,9 +341,9 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
return;
if (gmu->legacy)
bit = a6xx_gmu_oob_bits[state].ack;
bit = a6xx_gmu_oob_bits[state].clear;
else
bit = a6xx_gmu_oob_bits[state].ack_new;
bit = a6xx_gmu_oob_bits[state].clear_new;
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
}
......
......@@ -273,6 +273,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_FAULTS:
*value = gpu->global_faults;
return 0;
case MSM_PARAM_SUSPENDS:
*value = gpu->suspend_count;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
......
......@@ -58,8 +58,8 @@ int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
return -EINVAL;
return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
instance_idx);
return dpu_kms->hw_intr->ops.irq_idx_lookup(dpu_kms->hw_intr,
intr_type, instance_idx);
}
/**
......
......@@ -380,7 +380,6 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
} else {
DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
memset(old, 0, sizeof(*old));
memset(new, 0, sizeof(*new));
update_bus = true;
update_clk = true;
}
......
......@@ -66,6 +66,83 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
kfree(dpu_crtc);
}
static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc)
return encoder;
return NULL;
}
static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
encoder = get_encoder_from_crtc(crtc);
if (!encoder) {
DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
return false;
}
return dpu_encoder_get_frame_count(encoder);
}
static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
unsigned int pipe = crtc->index;
struct drm_encoder *encoder;
int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
encoder = get_encoder_from_crtc(crtc);
if (!encoder) {
DRM_ERROR("no encoder found for crtc %d\n", pipe);
return false;
}
vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
/*
* the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
* the end of VFP. Translate the porch values relative to the line
* counter positions.
*/
vactive_start = vsw + vbp + 1;
vactive_end = vactive_start + mode->crtc_vdisplay;
/* last scan line before VSYNC */
vfp_end = mode->crtc_vtotal;
if (stime)
*stime = ktime_get();
line = dpu_encoder_get_linecount(encoder);
if (line < vactive_start)
line -= vactive_start;
else if (line > vactive_end)
line = line - vfp_end - vactive_start;
else
line -= vactive_start;
*vpos = line;
*hpos = 0;
if (etime)
*etime = ktime_get();
return true;
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
struct dpu_plane_state *pstate, struct dpu_format *format)
{
......@@ -130,7 +207,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
uint32_t stage_idx, lm_idx;
int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
bool bg_alpha_enable = false;
DECLARE_BITMAP(fetch_active, SSPP_MAX);
memset(fetch_active, 0, sizeof(fetch_active));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
......@@ -140,7 +219,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
fb = state->fb;
dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
set_bit(dpu_plane_pipe(plane), fetch_active);
DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
......@@ -180,6 +259,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
}
}
if (ctl->ops.set_active_pipes)
ctl->ops.set_active_pipes(ctl, fetch_active);
_dpu_crtc_program_lm_output_roi(crtc);
}
......@@ -839,6 +921,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, crtc_state->enable,
crtc_state->active);
memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
goto end;
}
......@@ -1247,6 +1330,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.early_unregister = dpu_crtc_early_unregister,
.enable_vblank = msm_crtc_enable_vblank,
.disable_vblank = msm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.get_vblank_counter = dpu_crtc_get_vblank_counter,
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
......@@ -1255,6 +1340,7 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
.atomic_flush = dpu_crtc_atomic_flush,
.get_scanout_position = dpu_crtc_get_scanout_position,
};
/* initialize crtc */
......
......@@ -426,6 +426,36 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
return 0;
}
int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
int framecount = 0;
dpu_enc = to_dpu_encoder_virt(drm_enc);
phys = dpu_enc ? dpu_enc->cur_master : NULL;
if (phys && phys->ops.get_frame_count)
framecount = phys->ops.get_frame_count(phys);
return framecount;
}
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
int linecount = 0;
dpu_enc = to_dpu_encoder_virt(drm_enc);
phys = dpu_enc ? dpu_enc->cur_master : NULL;
if (phys && phys->ops.get_line_count)
linecount = phys->ops.get_line_count(phys);
return linecount;
}
void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
struct dpu_encoder_hw_resources *hw_res)
{
......
......@@ -156,5 +156,16 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
*/
void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
u32 idle_timeout);
/**
* dpu_encoder_get_linecount - get interface line count for the encoder.
* @drm_enc: Pointer to previously created drm encoder structure
*/
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
/**
* dpu_encoder_get_frame_count - get interface frame count for the encoder.
* @drm_enc: Pointer to previously created drm encoder structure
*/
int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
......@@ -143,6 +143,7 @@ struct dpu_encoder_phys_ops {
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
int (*get_line_count)(struct dpu_encoder_phys *phys);
int (*get_frame_count)(struct dpu_encoder_phys *phys);
};
/**
......
......@@ -658,6 +658,31 @@ static int dpu_encoder_phys_vid_get_line_count(
return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
}
static int dpu_encoder_phys_vid_get_frame_count(
struct dpu_encoder_phys *phys_enc)
{
struct intf_status s = {0};
u32 fetch_start = 0;
struct drm_display_mode mode = phys_enc->cached_mode;
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_status)
return -EINVAL;
phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &s);
if (s.is_prog_fetch_en && s.is_en) {
fetch_start = mode.vtotal - (mode.vsync_start - mode.vdisplay);
if ((s.line_count > fetch_start) &&
(s.line_count <= mode.vtotal))
return s.frame_count + 1;
}
return s.frame_count;
}
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_vid_is_master;
......@@ -676,6 +701,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
}
struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
......
......@@ -41,7 +41,7 @@
#define DPU_HW_VER_501 DPU_HW_VER(5, 0, 1) /* sm8150 v2.0 */
#define DPU_HW_VER_600 DPU_HW_VER(6, 0, 0) /* sm8250 */
#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
#define DPU_HW_VER_720 DPU_HW_VER(7, 2, 0) /* sc7280 */
#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
......@@ -49,7 +49,7 @@
#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620)
#define IS_SC7280_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_720)
#define DPU_HW_BLK_NAME_LEN 16
......@@ -185,6 +185,7 @@ enum {
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
DPU_CTL_ACTIVE_CFG,
DPU_CTL_FETCH_ACTIVE,
DPU_CTL_MAX
};
......@@ -193,11 +194,14 @@ enum {
* @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
* pixel data arrives to this INTF
* @DPU_INTF_TE INTF block has TE configuration support
* @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
than video timing
* @DPU_INTF_MAX
*/
enum {
DPU_INTF_INPUT_CTRL = 0x1,
DPU_INTF_TE,
DPU_DATA_HCTL_EN,
DPU_INTF_MAX
};
......@@ -719,6 +723,7 @@ struct dpu_perf_cfg {
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
* @mdss_irqs: Bitmap with the irqs supported by the target
* @obsolete_irq: Irq types that are obsolete for a particular target
*/
struct dpu_mdss_cfg {
u32 hwversion;
......@@ -765,6 +770,7 @@ struct dpu_mdss_cfg {
const struct dpu_format_extended *vig_formats;
unsigned long mdss_irqs;
unsigned long obsolete_irq;
};
struct dpu_mdss_hw_cfg_handler {
......
......@@ -27,6 +27,7 @@
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_INTF_FLUSH 0x110
#define CTL_INTF_MASTER 0x134
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_CTL BIT(17)
......@@ -34,6 +35,11 @@
#define DPU_REG_RESET_TIMEOUT_US 2000
#define MERGE_3D_IDX 23
#define INTF_IDX 31
#define CTL_INVALID_BIT 0xffff
static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
const struct dpu_mdss_cfg *m,
......@@ -344,6 +350,8 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
}
DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
}
static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
......@@ -531,6 +539,23 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
}
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active)
{
int i;
u32 val = 0;
if (fetch_active) {
for (i = 0; i < SSPP_MAX; i++) {
if (test_bit(i, fetch_active) &&
fetch_tbl[i] != CTL_INVALID_BIT)
val |= BIT(fetch_tbl[i]);
}
}
DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
}
static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
unsigned long cap)
{
......@@ -560,6 +585,8 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
......
......@@ -167,6 +167,9 @@ struct dpu_hw_ctl_ops {
*/
void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
};
/**
......
......@@ -83,11 +83,12 @@ struct dpu_hw_intr_ops {
/**
* irq_idx_lookup - Lookup IRQ index on the HW interrupt type
* Used for all irq related ops
* @intr: HW interrupt handle
* @intr_type: Interrupt type defined in dpu_intr_type
* @instance_idx: HW interrupt block instance
* @return: irq_idx or -EINVAL for lookup fail
*/
int (*irq_idx_lookup)(
int (*irq_idx_lookup)(struct dpu_hw_intr *intr,
enum dpu_intr_type intr_type,
u32 instance_idx);
......@@ -179,6 +180,7 @@ struct dpu_hw_intr_ops {
* @save_irq_status: array of IRQ status reg storage created during init
* @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
* @obsolete_irq: irq types that are obsolete for a particular target
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
......@@ -188,6 +190,7 @@ struct dpu_hw_intr {
u32 irq_idx_tbl_size;
spinlock_t irq_lock;
unsigned long irq_mask;
unsigned long obsolete_irq;
};
/**
......
......@@ -31,6 +31,8 @@
#define INTF_TEST_CTL 0x054
#define INTF_TP_COLOR0 0x058
#define INTF_TP_COLOR1 0x05C
#define INTF_CONFIG2 0x060
#define INTF_DISPLAY_DATA_HCTL 0x064
#define INTF_FRAME_LINE_COUNT_EN 0x0A8
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
......@@ -93,7 +95,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
u32 active_hctl, display_hctl, hsync_ctl;
u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
u32 panel_format;
u32 intf_cfg;
u32 intf_cfg, intf_cfg2 = 0, display_data_hctl = 0;
/* read interface_cfg */
intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
......@@ -178,6 +180,13 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
(COLOR_8BIT << 4) |
(0x21 << 8));
if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
intf_cfg2 |= BIT(4);
display_data_hctl = display_hctl;
DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
}
DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
......@@ -256,6 +265,7 @@ static void dpu_hw_intf_get_status(
struct dpu_hw_blk_reg_map *c = &intf->hw;
s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31));
if (s->is_en) {
s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
......
......@@ -40,6 +40,7 @@ struct intf_prog_fetch {
struct intf_status {
u8 is_en; /* interface timing engine is enabled or not */
u8 is_prog_fetch_en; /* interface prog fetch counter is enabled or not */
u32 frame_count; /* frame count since timing engine enabled */
u32 line_count; /* current line count including blanking */
};
......
......@@ -30,7 +30,7 @@ struct traffic_shaper_cfg {
/**
* struct split_pipe_cfg - pipe configuration for dual display panels
* @en : Enable/disable dual pipe confguration
* @en : Enable/disable dual pipe configuration
* @mode : Panel interface mode
* @intf : Interface id for main control path
* @split_flush_en: Allows both the paths to be flushed when master path is
......@@ -76,7 +76,7 @@ struct dpu_vsync_source_cfg {
* @setup_traffic_shaper : programs traffic shaper control
*/
struct dpu_hw_mdp_ops {
/** setup_split_pipe() : Regsiters are not double buffered, thisk
/** setup_split_pipe() : Registers are not double buffered, thisk
* function should be called before timing control enable
* @mdp : mdp top context driver
* @cfg : upper and lower part of pipe configuration
......
......@@ -14,6 +14,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "msm_mmu.h"
......@@ -933,8 +934,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
DPU_DEBUG("REG_DMA is not defined");
}
if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
dpu_kms_parse_data_bus_icc_path(dpu_kms);
dpu_kms_parse_data_bus_icc_path(dpu_kms);
pm_runtime_get_sync(&dpu_kms->pdev->dev);
......@@ -1025,6 +1025,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
dev->max_vblank_count = 0xffffffff;
/* Disable vblank irqs aggressively for power-saving */
dev->vblank_disable_immediate = true;
/*
* _dpu_kms_drm_obj_init should create the DRM related objects
* i.e. CRTCs, planes, encoders, connectors and so forth
......@@ -1221,6 +1225,9 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sdm845-dpu", },
{ .compatible = "qcom,sc7180-dpu", },
{ .compatible = "qcom,sc7280-dpu", },
{ .compatible = "qcom,sm8150-dpu", },
{ .compatible = "qcom,sm8250-dpu", },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
......
......@@ -31,40 +31,8 @@ struct dpu_mdss {
void __iomem *mmio;
struct dss_module_power mp;
struct dpu_irq_controller irq_controller;
struct icc_path *path[2];
u32 num_paths;
};
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
struct dpu_mdss *dpu_mdss)
{
struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem");
struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
dpu_mdss->path[0] = path0;
dpu_mdss->num_paths = 1;
if (!IS_ERR_OR_NULL(path1)) {
dpu_mdss->path[1] = path1;
dpu_mdss->num_paths++;
}
return 0;
}
static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss)
{
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
int i;
u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0;
for (i = 0; i < dpu_mdss->num_paths; i++)
icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW));
}
static void dpu_mdss_irq(struct irq_desc *desc)
{
struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
......@@ -178,8 +146,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
dpu_mdss_icc_request_bw(mdss);
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (ret) {
DPU_ERROR("clock enable failed, ret:%d\n", ret);
......@@ -204,6 +170,9 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
case DPU_HW_VER_620:
writel_relaxed(0x1e, dpu_mdss->mmio + UBWC_STATIC);
break;
case DPU_HW_VER_720:
writel_relaxed(0x101e, dpu_mdss->mmio + UBWC_STATIC);
break;
}
return ret;
......@@ -213,15 +182,12 @@ static int dpu_mdss_disable(struct msm_mdss *mdss)
{
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret, i;
int ret;
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (ret)
DPU_ERROR("clock disable failed, ret:%d\n", ret);
for (i = 0; i < dpu_mdss->num_paths; i++)
icc_set_bw(dpu_mdss->path[i], 0, 0);
return ret;
}
......@@ -232,7 +198,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int irq;
int i;
pm_runtime_suspend(dev->dev);
pm_runtime_disable(dev->dev);
......@@ -242,9 +207,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
for (i = 0; i < dpu_mdss->num_paths; i++)
icc_put(dpu_mdss->path[i]);
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
......@@ -276,12 +238,6 @@ int dpu_mdss_init(struct drm_device *dev)
DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
if (ret)
return ret;
}
mp = &dpu_mdss->mp;
ret = msm_dss_parse_clock(pdev, mp);
if (ret) {
......@@ -307,8 +263,6 @@ int dpu_mdss_init(struct drm_device *dev)
pm_runtime_enable(dev->dev);
dpu_mdss_icc_request_bw(priv->mdss);
return ret;
irq_error:
......
......@@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct device *dev = encoder->dev->dev;
u32 total_lines_x100, vclks_line, cfg;
u32 total_lines, vclks_line, cfg;
long vsync_clk_speed;
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
......@@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
return -EINVAL;
}
total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines_x100) {
total_lines = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines) {
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
__func__, mode->vtotal, drm_mode_vrefresh(mode));
return -EINVAL;
......@@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
vsync_clk_speed);
return -EINVAL;
}
vclks_line = vsync_clk_speed * 100 / total_lines_x100;
vclks_line = vsync_clk_speed / total_lines;
cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
| MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
/*
* Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
* the vsync_clk equating to roughly half the desired panel refresh rate.
* This is only necessary as stability fallback if interrupts from the
* panel arrive too late or not at all, but is currently used by default
* because these panel interrupts are not wired up yet.
*/
mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
mdp5_write(mdp5_kms,
REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
mdp5_write(mdp5_kms,
REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
......@@ -59,6 +67,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
MDP5_PP_SYNC_THRESH_START(4) |
MDP5_PP_SYNC_THRESH_CONTINUE(4));
mdp5_write(mdp5_kms, REG_MDP5_PP_AUTOREFRESH_CONFIG(pp_id), 0x0);
return 0;
}
......
......@@ -226,7 +226,7 @@ static int dp_test_data_show(struct seq_file *m, void *data)
debug->link->test_video.test_h_width);
seq_printf(m, "vdisplay: %d\n",
debug->link->test_video.test_v_height);
seq_printf(m, "bpc: %u\n",
seq_printf(m, "bpc: %u\n",
dp_link_bit_depth_to_bpc(bpc));
} else
seq_puts(m, "0");
......@@ -368,44 +368,21 @@ static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
int rc = 0;
struct dp_debug_private *debug = container_of(dp_debug,
struct dp_debug_private, dp_debug);
struct dentry *file;
struct dentry *test_active;
struct dentry *test_data, *test_type;
file = debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
debug, &dp_debug_fops);
if (IS_ERR_OR_NULL(file)) {
rc = PTR_ERR(file);
DRM_ERROR("[%s] debugfs create file failed, rc=%d\n",
DEBUG_NAME, rc);
}
test_active = debugfs_create_file("msm_dp_test_active", 0444,
debugfs_create_file("msm_dp_test_active", 0444,
minor->debugfs_root,
debug, &test_active_fops);
if (IS_ERR_OR_NULL(test_active)) {
rc = PTR_ERR(test_active);
DRM_ERROR("[%s] debugfs test_active failed, rc=%d\n",
DEBUG_NAME, rc);
}
test_data = debugfs_create_file("msm_dp_test_data", 0444,
debugfs_create_file("msm_dp_test_data", 0444,
minor->debugfs_root,
debug, &dp_test_data_fops);
if (IS_ERR_OR_NULL(test_data)) {
rc = PTR_ERR(test_data);
DRM_ERROR("[%s] debugfs test_data failed, rc=%d\n",
DEBUG_NAME, rc);
}
test_type = debugfs_create_file("msm_dp_test_type", 0444,
debugfs_create_file("msm_dp_test_type", 0444,
minor->debugfs_root,
debug, &dp_test_type_fops);
if (IS_ERR_OR_NULL(test_type)) {
rc = PTR_ERR(test_type);
DRM_ERROR("[%s] debugfs test_type failed, rc=%d\n",
DEBUG_NAME, rc);
}
debug->root = minor->debugfs_root;
......
......@@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
dp_usbpd->hpd_high = hpd;
if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
&& !hpd_priv->dp_cb->disconnect) {
if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
|| !hpd_priv->dp_cb->disconnect) {
pr_err("hpd dp_cb not initialized\n");
return -EINVAL;
}
......
......@@ -269,7 +269,7 @@ int dp_power_clk_enable(struct dp_power *dp_power,
DRM_ERROR("failed to '%s' clks for: %s. err=%d\n",
enable ? "enable" : "disable",
dp_parser_pm_name(pm_type), rc);
return rc;
return rc;
}
if (pm_type == DP_CORE_PM)
......
......@@ -23,18 +23,6 @@
struct msm_dsi_phy_shared_timings;
struct msm_dsi_phy_clk_request;
enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_HPM,
MSM_DSI_PHY_28NM_LP,
MSM_DSI_PHY_20NM,
MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_14NM,
MSM_DSI_PHY_10NM,
MSM_DSI_PHY_7NM,
MSM_DSI_PHY_7NM_V4_1,
MSM_DSI_PHY_MAX
};
enum msm_dsi_phy_usecase {
MSM_DSI_PHY_STANDALONE,
MSM_DSI_PHY_MASTER,
......@@ -104,45 +92,6 @@ static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
/* dsi pll */
struct msm_dsi_pll;
#ifdef CONFIG_DRM_MSM_DSI_PLL
struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int dsi_id);
void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
enum msm_dsi_phy_usecase uc);
#else
static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id) {
return ERR_PTR(-ENODEV);
}
static inline void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
{
}
static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
{
return -ENODEV;
}
static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
{
}
static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
{
return 0;
}
static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
enum msm_dsi_phy_usecase uc)
{
return -ENODEV;
}
#endif
/* dsi host */
struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
......@@ -169,7 +118,7 @@ struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll);
struct msm_dsi_phy *src_phy);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req,
......@@ -213,14 +162,17 @@ struct msm_dsi_phy_clk_request {
void msm_dsi_phy_driver_register(void);
void msm_dsi_phy_driver_unregister(void);
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
struct msm_dsi_phy_shared_timings *shared_timing);
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc);
int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
#endif /* __DSI_CONNECTOR_H__ */
......@@ -106,12 +106,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
.num_dsi = 2,
};
/*
* TODO: core_mmss_clk fails to enable for some reason, but things work fine
* without it too. Figure out why it doesn't enable and uncomment below
*/
static const char * const dsi_8996_bus_clk_names[] = {
"mdp_core", "iface", "bus", /* "core_mmss", */
"mdp_core", "iface", "bus", "core_mmss",
};
static const struct msm_dsi_config msm8996_dsi_cfg = {
......
......@@ -1826,8 +1826,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
if (!msm_host) {
pr_err("%s: FAILED: cannot alloc dsi host\n",
__func__);
ret = -ENOMEM;
goto fail;
}
......@@ -2226,13 +2224,13 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
}
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll)
struct msm_dsi_phy *src_phy)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
ret = msm_dsi_pll_get_clk_provider(src_pll,
ret = msm_dsi_phy_get_clk_provider(src_phy,
&byte_clk_provider, &pixel_clk_provider);
if (ret) {
pr_info("%s: can't get provider from pll, don't set parent\n",
......
......@@ -70,7 +70,6 @@ static int dsi_mgr_setup_components(int id)
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
struct msm_dsi_pll *src_pll;
int ret;
if (!IS_DUAL_DSI()) {
......@@ -79,10 +78,7 @@ static int dsi_mgr_setup_components(int id)
return ret;
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
if (IS_ERR(src_pll))
return PTR_ERR(src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, msm_dsi->phy);
} else if (!other_dsi) {
ret = 0;
} else {
......@@ -109,19 +105,16 @@ static int dsi_mgr_setup_components(int id)
MSM_DSI_PHY_MASTER);
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
if (IS_ERR(src_pll))
return PTR_ERR(src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, clk_master_dsi->phy);
if (ret)
return ret;
ret = msm_dsi_host_set_src_pll(other_dsi->host, src_pll);
ret = msm_dsi_host_set_src_pll(other_dsi->host, clk_master_dsi->phy);
}
return ret;
}
static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
static int enable_phy(struct msm_dsi *msm_dsi,
struct msm_dsi_phy_shared_timings *shared_timings)
{
struct msm_dsi_phy_clk_request clk_req;
......@@ -130,7 +123,7 @@ static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req);
msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
return ret;
......@@ -143,7 +136,6 @@ dsi_mgr_phy_enable(int id,
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
int ret;
/* In case of dual DSI, some registers in PHY1 have been programmed
......@@ -156,11 +148,11 @@ dsi_mgr_phy_enable(int id,
msm_dsi_host_reset_phy(mdsi->host);
msm_dsi_host_reset_phy(sdsi->host);
ret = enable_phy(mdsi, src_pll_id,
ret = enable_phy(mdsi,
&shared_timings[DSI_CLOCK_MASTER]);
if (ret)
return ret;
ret = enable_phy(sdsi, src_pll_id,
ret = enable_phy(sdsi,
&shared_timings[DSI_CLOCK_SLAVE]);
if (ret) {
msm_dsi_phy_disable(mdsi->phy);
......@@ -169,7 +161,7 @@ dsi_mgr_phy_enable(int id,
}
} else {
msm_dsi_host_reset_phy(msm_dsi->host);
ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
ret = enable_phy(msm_dsi, &shared_timings[id]);
if (ret)
return ret;
}
......@@ -505,7 +497,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
struct msm_dsi_pll *src_pll;
bool is_dual_dsi = IS_DUAL_DSI();
int ret;
......@@ -539,9 +530,8 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
id, ret);
}
/* Save PLL status if it is a clock source */
src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
msm_dsi_pll_save_state(src_pll);
/* Save PHY status if it is a clock source */
msm_dsi_phy_pll_save_state(msm_dsi->phy);
ret = msm_dsi_host_power_off(host);
if (ret)
......
......@@ -3,6 +3,7 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include "dsi_phy.h"
......@@ -460,23 +461,6 @@ int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask)
{
int phy_id = phy->id;
u32 val;
if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
return;
val = dsi_phy_read(phy->base + reg);
if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
dsi_phy_write(phy->base + reg, val | bit_mask);
else
dsi_phy_write(phy->base + reg, val & (~bit_mask));
}
static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
{
struct regulator_bulk_data *s = phy->supplies;
......@@ -637,24 +621,6 @@ static int dsi_phy_get_id(struct msm_dsi_phy *phy)
return -EINVAL;
}
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
int ret = 0;
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
"DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
__func__);
ret = -ENOMEM;
goto fail;
}
fail:
return ret;
}
static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
......@@ -670,6 +636,14 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
if (!match)
return -ENODEV;
phy->provided_clocks = devm_kzalloc(dev,
struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
GFP_KERNEL);
if (!phy->provided_clocks)
return -ENOMEM;
phy->provided_clocks->num = NUM_PROVIDED_CLKS;
phy->cfg = match->data;
phy->pdev = pdev;
......@@ -691,6 +665,31 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
}
phy->pll_base = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR(phy->pll_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
ret = -ENOMEM;
goto fail;
}
if (phy->cfg->has_phy_lane) {
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", "DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
ret = -ENOMEM;
goto fail;
}
}
if (phy->cfg->has_phy_regulator) {
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
ret = -ENOMEM;
goto fail;
}
}
ret = dsi_phy_regulator_init(phy);
if (ret)
goto fail;
......@@ -702,12 +701,6 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
}
if (phy->cfg->ops.init) {
ret = phy->cfg->ops.init(phy);
if (ret)
goto fail;
}
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
......@@ -715,12 +708,21 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
if (ret)
goto fail;
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
if (IS_ERR_OR_NULL(phy->pll)) {
DRM_DEV_INFO(dev,
"%s: pll init failed: %ld, need separate pll clk driver\n",
__func__, PTR_ERR(phy->pll));
phy->pll = NULL;
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
if (ret) {
DRM_DEV_INFO(dev,
"%s: pll init failed: %d, need separate pll clk driver\n",
__func__, ret);
goto fail;
}
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
phy->provided_clocks);
if (ret) {
DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
goto fail;
}
dsi_phy_disable_resource(phy);
......@@ -733,23 +735,8 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
return ret;
}
static int dsi_phy_driver_remove(struct platform_device *pdev)
{
struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
if (phy && phy->pll) {
msm_dsi_pll_destroy(phy->pll);
phy->pll = NULL;
}
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver dsi_phy_platform_driver = {
.probe = dsi_phy_driver_probe,
.remove = dsi_phy_driver_remove,
.driver = {
.name = "msm_dsi_phy",
.of_match_table = dsi_phy_dt_match,
......@@ -766,7 +753,7 @@ void __exit msm_dsi_phy_driver_unregister(void)
platform_driver_unregister(&dsi_phy_platform_driver);
}
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req)
{
struct device *dev = &phy->pdev->dev;
......@@ -789,7 +776,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
goto reg_en_fail;
}
ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
ret = phy->cfg->ops.enable(phy, clk_req);
if (ret) {
DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
goto phy_en_fail;
......@@ -802,9 +789,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
* source.
*/
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
ret = msm_dsi_pll_restore_state(phy->pll);
ret = msm_dsi_phy_pll_restore_state(phy);
if (ret) {
DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
__func__, ret);
goto pll_restor_fail;
}
......@@ -841,17 +828,43 @@ void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
sizeof(*shared_timings));
}
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
{
if (!phy)
return NULL;
return phy->pll;
}
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc)
{
if (phy)
phy->usecase = uc;
}
int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
{
if (byte_clk_provider)
*byte_clk_provider = phy->provided_clocks->hws[DSI_BYTE_PLL_CLK]->clk;
if (pixel_clk_provider)
*pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
return -EINVAL;
}
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
{
if (phy->cfg->ops.save_pll_state) {
phy->cfg->ops.save_pll_state(phy);
phy->state_saved = true;
}
}
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
{
int ret;
if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
ret = phy->cfg->ops.restore_pll_state(phy);
if (ret)
return ret;
phy->state_saved = false;
}
return 0;
}
......@@ -6,37 +6,38 @@
#ifndef __DSI_PHY_H__
#define __DSI_PHY_H__
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#include "dsi.h"
#define dsi_phy_read(offset) msm_readl((offset))
#define dsi_phy_write(offset, data) msm_writel((data), (offset))
/* v3.0.0 10nm implementation that requires the old timings settings */
#define V3_0_0_10NM_OLD_TIMINGS_QUIRK BIT(0)
#define dsi_phy_write_udelay(offset, data, delay_us) { msm_writel((data), (offset)); udelay(delay_us); }
#define dsi_phy_write_ndelay(offset, data, delay_ns) { msm_writel((data), (offset)); ndelay(delay_ns); }
struct msm_dsi_phy_ops {
int (*init) (struct msm_dsi_phy *phy);
int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
int (*pll_init)(struct msm_dsi_phy *phy);
int (*enable)(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req);
void (*disable)(struct msm_dsi_phy *phy);
void (*save_pll_state)(struct msm_dsi_phy *phy);
int (*restore_pll_state)(struct msm_dsi_phy *phy);
};
struct msm_dsi_phy_cfg {
enum msm_dsi_phy_type type;
struct dsi_reg_config reg_cfg;
struct msm_dsi_phy_ops ops;
/*
* Each cell {phy_id, pll_id} of the truth table indicates
* if the source PLL selection bit should be set for each PHY.
* Fill default H/W values in illegal cells, eg. cell {0, 1}.
*/
bool src_pll_truthtable[DSI_MAX][DSI_MAX];
unsigned long min_pll_rate;
unsigned long max_pll_rate;
const resource_size_t io_start[DSI_MAX];
const int num_dsi_phy;
const int quirks;
bool has_phy_regulator;
bool has_phy_lane;
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
......@@ -74,9 +75,14 @@ struct msm_dsi_dphy_timing {
u8 hs_halfbyte_en_ckln;
};
#define DSI_BYTE_PLL_CLK 0
#define DSI_PIXEL_PLL_CLK 1
#define NUM_PROVIDED_CLKS 2
struct msm_dsi_phy {
struct platform_device *pdev;
void __iomem *base;
void __iomem *pll_base;
void __iomem *reg_base;
void __iomem *lane_base;
int id;
......@@ -90,7 +96,12 @@ struct msm_dsi_phy {
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;
struct msm_dsi_pll *pll;
struct clk_hw *vco_hw;
bool pll_on;
struct clk_hw_onecell_data *provided_clocks;
bool state_saved;
};
/*
......@@ -104,9 +115,5 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask);
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
#endif /* __DSI_PHY_H__ */
This diff is collapsed.
This diff is collapsed.
......@@ -63,13 +63,14 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
}
static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
int i;
void __iomem *base = phy->base;
u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
u32 val;
DBG("");
......@@ -83,9 +84,12 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
msm_dsi_phy_set_src_pll(phy, src_pll_id,
REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
val = dsi_phy_read(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL);
if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_STANDALONE)
val |= DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
else
val &= ~DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
dsi_phy_write(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL, val);
for (i = 0; i < 4; i++) {
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
......@@ -125,8 +129,7 @@ static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
}
const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.type = MSM_DSI_PHY_20NM,
.src_pll_truthtable = { {false, true}, {false, true} },
.has_phy_regulator = true,
.reg_cfg = {
.num = 2,
.regs = {
......@@ -137,7 +140,6 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
.init = msm_dsi_phy_init_common,
},
.io_start = { 0xfd998500, 0xfd9a0500 },
.num_dsi_phy = 2,
......
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
#include "dsi_pll.h"
static int dsi_pll_enable(struct msm_dsi_pll *pll)
{
int i, ret = 0;
/*
* Certain PLLs do not allow VCO rate update when it is on.
* Keep track of their status to turn on/off after set rate success.
*/
if (unlikely(pll->pll_on))
return 0;
/* Try all enable sequences until one succeeds */
for (i = 0; i < pll->en_seq_cnt; i++) {
ret = pll->enable_seqs[i](pll);
DBG("DSI PLL %s after sequence #%d",
ret ? "unlocked" : "locked", i + 1);
if (!ret)
break;
}
if (ret) {
DRM_ERROR("DSI PLL failed to lock\n");
return ret;
}
pll->pll_on = true;
return 0;
}
static void dsi_pll_disable(struct msm_dsi_pll *pll)
{
if (unlikely(!pll->pll_on))
return;
pll->disable_seq(pll);
pll->pll_on = false;
}
/*
* DSI PLL Helper functions
*/
long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
if (rate < pll->min_rate)
return pll->min_rate;
else if (rate > pll->max_rate)
return pll->max_rate;
else
return rate;
}
int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
return dsi_pll_enable(pll);
}
void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
dsi_pll_disable(pll);
}
void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
struct clk **clks, u32 num_clks)
{
of_clk_del_provider(pdev->dev.of_node);
if (!num_clks || !clks)
return;
do {
clk_unregister(clks[--num_clks]);
clks[num_clks] = NULL;
} while (num_clks);
}
/*
* DSI PLL API
*/
int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
{
if (pll->get_provider)
return pll->get_provider(pll,
byte_clk_provider,
pixel_clk_provider);
return -EINVAL;
}
void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
{
if (pll->destroy)
pll->destroy(pll);
}
void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
{
if (pll->save_state) {
pll->save_state(pll);
pll->state_saved = true;
}
}
int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
{
int ret;
if (pll->restore_state && pll->state_saved) {
ret = pll->restore_state(pll);
if (ret)
return ret;
pll->state_saved = false;
}
return 0;
}
int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
enum msm_dsi_phy_usecase uc)
{
if (pll->set_usecase)
return pll->set_usecase(pll, uc);
return 0;
}
struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
struct device *dev = &pdev->dev;
struct msm_dsi_pll *pll;
switch (type) {
case MSM_DSI_PHY_28NM_HPM:
case MSM_DSI_PHY_28NM_LP:
pll = msm_dsi_pll_28nm_init(pdev, type, id);
break;
case MSM_DSI_PHY_28NM_8960:
pll = msm_dsi_pll_28nm_8960_init(pdev, id);
break;
case MSM_DSI_PHY_14NM:
pll = msm_dsi_pll_14nm_init(pdev, id);
break;
case MSM_DSI_PHY_10NM:
pll = msm_dsi_pll_10nm_init(pdev, id);
break;
case MSM_DSI_PHY_7NM:
case MSM_DSI_PHY_7NM_V4_1:
pll = msm_dsi_pll_7nm_init(pdev, type, id);
break;
default:
pll = ERR_PTR(-ENXIO);
break;
}
if (IS_ERR(pll)) {
DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
return pll;
}
pll->type = type;
DBG("DSI:%d PLL registered", id);
return pll;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
int ret;
ret = mutex_lock_interruptible(&priv->mm_lock);
ret = mutex_lock_interruptible(&priv->obj_lock);
if (ret)
return ret;
if (gpu) {
seq_printf(m, "Active Objects (%s):\n", gpu->name);
msm_gem_describe_objects(&gpu->active_list, m);
}
seq_printf(m, "Inactive Objects:\n");
msm_gem_describe_objects(&priv->inactive_dontneed, m);
msm_gem_describe_objects(&priv->inactive_willneed, m);
msm_gem_describe_objects(&priv->objects, m);
mutex_unlock(&priv->mm_lock);
mutex_unlock(&priv->obj_lock);
return 0;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -33,6 +33,7 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct msm_gem_stats stats = {};
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
......@@ -42,7 +43,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
msm_gem_describe(fb->obj[i], m);
msm_gem_describe(fb->obj[i], m, &stats);
}
}
#endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -251,6 +251,8 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
gpu->suspend_count++;
return 0;
}
......
......@@ -152,6 +152,8 @@ struct msm_gpu {
ktime_t time;
} devfreq;
uint32_t suspend_count;
struct msm_gpu_state *crashstate;
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
......
This diff is collapsed.
This diff is collapsed.
......@@ -76,6 +76,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_NR_RINGS 0x07
#define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */
#define MSM_PARAM_FAULTS 0x09
#define MSM_PARAM_SUSPENDS 0x0a
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment