Commit f83493f7 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2022-05-09' of https://gitlab.freedesktop.org/drm/msm into drm-next

- Fourcc modifier for tiled but not compressed layouts
- Support for userspace allocated IOVA (GPU virtual address)
- Devfreq clamp_to_idle fix
- DPU: DSC (Display Stream Compression) support
- DPU: inline rotation support on SC7280
- DPU: update DP timings to follow vendor recommendations
- DP, DPU: add support for wide bus (on newer chipsets)
- DP: eDP support
- Merge DPU1 and MDP5 MDSS driver, make dpu/mdp device the master
  component
- MDSS: optionally reset the IP block at the bootup to drop
  bootloader state
- Properly register and unregister internal bridges in the DRM framework
- Complete DPU IRQ cleanup
- DP: conversion to use drm_bridge and drm_bridge_connector
- eDP: drop old eDP parts again
- DPU: writeback support
- Misc small fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvJCr_1D8d0dgmyQC5HD4gmXeZw=bFV_CNCfceZbpMxRw@mail.gmail.com
parents d53b8e19 24df1201
......@@ -66,6 +66,10 @@ properties:
interconnect-names:
const: mdp0-mem
resets:
items:
- description: MDSS_CORE reset
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
......
......@@ -65,6 +65,10 @@ properties:
interconnect-names:
const: mdp0-mem
resets:
items:
- description: MDSS_CORE reset
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
......
......@@ -64,6 +64,10 @@ properties:
interconnect-names:
const: mdp0-mem
resets:
items:
- description: MDSS_CORE reset
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
......
......@@ -57,6 +57,10 @@ properties:
ranges: true
resets:
items:
- description: MDSS_CORE reset
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
......
......@@ -6258,8 +6258,9 @@ F: drivers/gpu/drm/tiny/panel-mipi-dbi.c
DRM DRIVER FOR MSM ADRENO GPU
M: Rob Clark <robdclark@gmail.com>
M: Sean Paul <sean@poorly.run>
R: Abhinav Kumar <quic_abhinavk@quicinc.com>
M: Abhinav Kumar <quic_abhinavk@quicinc.com>
M: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
R: Sean Paul <sean@poorly.run>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: freedreno@lists.freedesktop.org
......
......@@ -155,7 +155,6 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
kwb_conn->wb_layer = kcrtc->master->wb_layer;
wb_conn = &kwb_conn->base;
wb_conn->encoder.possible_crtcs = BIT(drm_crtc_index(&kcrtc->base));
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
......@@ -164,7 +163,8 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
&komeda_wb_encoder_helper_funcs,
formats, n_formats);
formats, n_formats,
BIT(drm_crtc_index(&kcrtc->base)));
komeda_put_fourcc_list(formats);
if (err) {
kfree(kwb_conn);
......
......@@ -212,7 +212,6 @@ int malidp_mw_connector_init(struct drm_device *drm)
if (!malidp->dev->hw->enable_memwrite)
return 0;
malidp->mw_connector.encoder.possible_crtcs = 1 << drm_crtc_index(&malidp->crtc);
drm_connector_helper_add(&malidp->mw_connector.base,
&malidp_mw_connector_helper_funcs);
......@@ -223,7 +222,8 @@ int malidp_mw_connector_init(struct drm_device *drm)
ret = drm_writeback_connector_init(drm, &malidp->mw_connector,
&malidp_mw_connector_funcs,
&malidp_mw_encoder_helper_funcs,
formats, n_formats);
formats, n_formats,
1 << drm_crtc_index(&malidp->crtc));
kfree(formats);
if (ret)
return ret;
......
......@@ -157,6 +157,7 @@ static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
* @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
* @possible_crtcs: possible crtcs for the internal writeback encoder
*
* This function creates the writeback-connector-specific properties if they
* have not been already created, initializes the connector as
......@@ -174,7 +175,64 @@ int drm_writeback_connector_init(struct drm_device *dev,
struct drm_writeback_connector *wb_connector,
const struct drm_connector_funcs *con_funcs,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
const u32 *formats, int n_formats)
const u32 *formats, int n_formats,
u32 possible_crtcs)
{
int ret = 0;
drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
wb_connector->encoder.possible_crtcs = possible_crtcs;
ret = drm_encoder_init(dev, &wb_connector->encoder,
&drm_writeback_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
ret = drm_writeback_connector_init_with_encoder(dev, wb_connector, &wb_connector->encoder,
con_funcs, formats, n_formats);
if (ret)
drm_encoder_cleanup(&wb_connector->encoder);
return ret;
}
EXPORT_SYMBOL(drm_writeback_connector_init);
/**
* drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
* a custom encoder
*
* @dev: DRM device
* @wb_connector: Writeback connector to initialize
* @enc: handle to the already initialized drm encoder
* @con_funcs: Connector funcs vtable
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
*
* This function creates the writeback-connector-specific properties if they
* have not been already created, initializes the connector as
* type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
* values.
*
* This function assumes that the drm_writeback_connector's encoder has already been
* created and initialized before invoking this function.
*
* In addition, this function also assumes that callers of this API will manage
* assigning the encoder helper functions, possible_crtcs and any other encoder
* specific operation.
*
* Drivers should always use this function instead of drm_connector_init() to
* set up writeback connectors if they want to manage themselves the lifetime of the
* associated encoder.
*
* Returns: 0 on success, or a negative error code
*/
int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
struct drm_writeback_connector *wb_connector, struct drm_encoder *enc,
const struct drm_connector_funcs *con_funcs, const u32 *formats,
int n_formats)
{
struct drm_property_blob *blob;
struct drm_connector *connector = &wb_connector->base;
......@@ -189,12 +247,6 @@ int drm_writeback_connector_init(struct drm_device *dev,
if (IS_ERR(blob))
return PTR_ERR(blob);
drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
ret = drm_encoder_init(dev, &wb_connector->encoder,
&drm_writeback_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
goto fail;
connector->interlace_allowed = 0;
......@@ -203,8 +255,7 @@ int drm_writeback_connector_init(struct drm_device *dev,
if (ret)
goto connector_fail;
ret = drm_connector_attach_encoder(connector,
&wb_connector->encoder);
ret = drm_connector_attach_encoder(connector, enc);
if (ret)
goto attach_fail;
......@@ -233,12 +284,10 @@ int drm_writeback_connector_init(struct drm_device *dev,
attach_fail:
drm_connector_cleanup(connector);
connector_fail:
drm_encoder_cleanup(&wb_connector->encoder);
fail:
drm_property_blob_put(blob);
return ret;
}
EXPORT_SYMBOL(drm_writeback_connector_init);
EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
......
......@@ -12,6 +12,7 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
......@@ -48,12 +49,39 @@ config DRM_MSM_GPU_SUDO
Only use this if you are a driver developer. This should *not*
be enabled for production kernels. If unsure, say N.
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
config DRM_MSM_MDSS
bool
depends on DRM_MSM
default n
config DRM_MSM_MDP4
bool "Enable MDP4 support in MSM DRM driver"
depends on DRM_MSM
default y
help
Choose this option to enable HDCP state machine
Compile in support for the Mobile Display Processor v4 (MDP4) in
the MSM DRM driver. It is the older display controller found in
devices using APQ8064/MSM8960/MSM8x60 platforms.
config DRM_MSM_MDP5
bool "Enable MDP5 support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
default y
help
Compile in support for the Mobile Display Processor v5 (MDP5) in
the MSM DRM driver. It is the display controller found in devices
using e.g. APQ8016/MSM8916/APQ8096/MSM8996/MSM8974/SDM6x0 platforms.
config DRM_MSM_DPU
bool "Enable DPU support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
default y
help
Compile in support for the Display Processing Unit in
the MSM DRM driver. It is the display controller found in devices
using e.g. SDM845 and newer platforms.
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
......@@ -118,3 +146,20 @@ config DRM_MSM_DSI_7NM_PHY
help
Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on
the platform.
config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
default y
help
Compile in support for the HDMI output MSM DRM driver. It can
be a primary or a secondary display on device. Note that this is used
only for the direct HDMI output. If the device outputs HDMI data
throught some kind of DSI-to-HDMI bridge, this option can be disabled.
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
depends on DRM_MSM && DRM_MSM_HDMI
default y
help
Choose this option to enable HDCP state machine
......@@ -16,6 +16,8 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
msm-$(CONFIG_DRM_MSM_HDMI) += \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
......@@ -27,9 +29,10 @@ msm-y := \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
disp/mdp_format.o \
disp/mdp_kms.o \
msm-$(CONFIG_DRM_MSM_MDP4) += \
disp/mdp4/mdp4_crtc.o \
disp/mdp4/mdp4_dsi_encoder.o \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
disp/mdp4/mdp4_lvds_connector.o \
......@@ -37,25 +40,31 @@ msm-y := \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
disp/mdp4/mdp4_plane.o \
msm-$(CONFIG_DRM_MSM_MDP5) += \
disp/mdp5/mdp5_cfg.o \
disp/mdp5/mdp5_cmd_encoder.o \
disp/mdp5/mdp5_ctl.o \
disp/mdp5/mdp5_crtc.o \
disp/mdp5/mdp5_encoder.o \
disp/mdp5/mdp5_irq.o \
disp/mdp5/mdp5_mdss.o \
disp/mdp5/mdp5_kms.o \
disp/mdp5/mdp5_pipe.o \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
msm-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_core_perf.o \
disp/dpu1/dpu_crtc.o \
disp/dpu1/dpu_encoder.o \
disp/dpu1/dpu_encoder_phys_cmd.o \
disp/dpu1/dpu_encoder_phys_vid.o \
disp/dpu1/dpu_encoder_phys_wb.o \
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_dsc.o \
disp/dpu1/dpu_hw_interrupts.o \
disp/dpu1/dpu_hw_intf.o \
disp/dpu1/dpu_hw_lm.o \
......@@ -66,11 +75,19 @@ msm-y := \
disp/dpu1/dpu_hw_top.o \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
disp/dpu1/dpu_hw_wb.o \
disp/dpu1/dpu_kms.o \
disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
disp/dpu1/dpu_writeback.o
msm-$(CONFIG_DRM_MSM_MDSS) += \
msm_mdss.o \
msm-y += \
disp/mdp_format.o \
disp/mdp_kms.o \
disp/msm_disp_snapshot.o \
disp/msm_disp_snapshot_util.o \
msm_atomic.o \
......@@ -118,12 +135,10 @@ msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
disp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
dsi/phy/dsi_phy.o \
disp/mdp5/mdp5_cmd_encoder.o
dsi/phy/dsi_phy.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
......
......@@ -1235,7 +1235,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
return;
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
......@@ -1662,28 +1662,23 @@ static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
return a5xx_gpu->cur_ring;
}
static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
u64 busy_cycles, busy_time;
u64 busy_cycles;
/* Only read the gpu busy if the hardware is already active */
if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0)
if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0) {
*out_sample_rate = 1;
return 0;
}
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
busy_time = busy_cycles - gpu->devfreq.busy_cycles;
do_div(busy_time, clk_get_rate(gpu->core_clk) / 1000000);
gpu->devfreq.busy_cycles = busy_cycles;
*out_sample_rate = clk_get_rate(gpu->core_clk);
pm_runtime_put(&gpu->pdev->dev);
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
return (unsigned long)busy_time;
return busy_cycles;
}
static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
......
......@@ -1172,7 +1172,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
return PTR_ERR(bo->obj);
ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
range_start, range_end);
if (ret) {
drm_gem_object_put(bo->obj);
return ret;
......
......@@ -1390,7 +1390,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A6XX_RBBM_STATUS),
gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
......@@ -1649,12 +1649,14 @@ static void a6xx_destroy(struct msm_gpu *gpu)
kfree(a6xx_gpu);
}
static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
u64 busy_cycles, busy_time;
u64 busy_cycles;
/* 19.2MHz */
*out_sample_rate = 19200000;
/* Only read the gpu busy if the hardware is already active */
if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
......@@ -1664,17 +1666,10 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
do_div(busy_time, 192);
gpu->devfreq.busy_cycles = busy_cycles;
pm_runtime_put(a6xx_gpu->gmu.dev);
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
return (unsigned long)busy_time;
return busy_cycles;
}
static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
......
......@@ -229,10 +229,14 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
}
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value)
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
/* No pointer params yet */
if (*len != 0)
return -EINVAL;
switch (param) {
case MSM_PARAM_GPU_ID:
*value = adreno_gpu->info->revn;
......@@ -272,11 +276,24 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = 0;
return 0;
case MSM_PARAM_FAULTS:
if (ctx->aspace)
*value = gpu->global_faults + ctx->aspace->faults;
else
*value = gpu->global_faults;
return 0;
case MSM_PARAM_SUSPENDS:
*value = gpu->suspend_count;
return 0;
case MSM_PARAM_VA_START:
if (ctx->aspace == gpu->aspace)
return -EINVAL;
*value = ctx->aspace->va_start;
return 0;
case MSM_PARAM_VA_SIZE:
if (ctx->aspace == gpu->aspace)
return -EINVAL;
*value = ctx->aspace->va_size;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
......@@ -284,9 +301,50 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
}
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t value)
uint32_t param, uint64_t value, uint32_t len)
{
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE:
/* kstrdup_quotable_cmdline() limits to PAGE_SIZE, so
* that should be a reasonable upper bound
*/
if (len > PAGE_SIZE)
return -EINVAL;
break;
default:
if (len != 0)
return -EINVAL;
}
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE: {
char *str, **paramp;
str = kmalloc(len + 1, GFP_KERNEL);
if (!str)
return -ENOMEM;
if (copy_from_user(str, u64_to_user_ptr(value), len)) {
kfree(str);
return -EFAULT;
}
/* Ensure string is null terminated: */
str[len] = '\0';
if (param == MSM_PARAM_COMM) {
paramp = &ctx->comm;
} else {
paramp = &ctx->cmdline;
}
kfree(*paramp);
*paramp = str;
return 0;
}
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
......@@ -533,7 +591,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
state->ring[i].fence = gpu->rb[i]->memptrs->fence;
state->ring[i].iova = gpu->rb[i]->iova;
state->ring[i].seqno = gpu->rb[i]->seqno;
state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
state->ring[i].wptr = get_wptr(gpu->rb[i]);
......@@ -783,7 +841,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("rb %d: fence: %d/%d\n", i,
ring->memptrs->fence,
ring->seqno);
ring->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
printk("rb wptr: %d\n", get_wptr(ring));
......
......@@ -281,9 +281,9 @@ static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
}
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value);
uint32_t param, uint64_t *value, uint32_t *len);
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t value);
uint32_t param, uint64_t value, uint32_t len);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
......
......@@ -10,46 +10,42 @@
/**
* dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
* @dpu_kms: DPU handle
* @kms: MSM KMS handle
* @return: none
*/
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
void dpu_core_irq_preinstall(struct msm_kms *kms);
/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
* @dpu_kms: DPU handle
* @kms: MSM KMS handle
* @return: none
*/
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
void dpu_core_irq_uninstall(struct msm_kms *kms);
/**
* dpu_core_irq - core IRQ handler
* @dpu_kms: DPU handle
* @kms: MSM KMS handle
* @return: interrupt handling status
*/
irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
irqreturn_t dpu_core_irq(struct msm_kms *kms);
/**
* dpu_core_irq_read - IRQ helper function for reading IRQ status
* @dpu_kms: DPU handle
* @irq_idx: irq index
* @clear: True to clear the irq after read
* @return: non-zero if irq detected; otherwise no irq detected
*/
u32 dpu_core_irq_read(
struct dpu_kms *dpu_kms,
int irq_idx,
bool clear);
int irq_idx);
/**
* dpu_core_irq_register_callback - For registering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
* @irq_cb: IRQ callback structure, containing callback function
* and argument. Passing NULL for irq_cb will unregister
* the callback for the given irq_idx
* This must exist until un-registration.
* @irq_cb: IRQ callback funcion.
* @irq_arg: IRQ callback argument.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
......@@ -57,25 +53,21 @@ u32 dpu_core_irq_read(
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
struct dpu_irq_callback *irq_cb);
void (*irq_cb)(void *arg, int irq_idx),
void *irq_arg);
/**
* dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
* @irq_cb: IRQ callback structure, containing callback function
* and argument. Passing NULL for irq_cb will unregister
* the callback for the given irq_idx
* This must match with registration.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
struct dpu_irq_callback *irq_cb);
int irq_idx);
/**
* dpu_debugfs_core_irq_init - register core irq debugfs
......
......@@ -204,6 +204,7 @@ static int dpu_crtc_get_crc(struct drm_crtc *crtc)
rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
if (rc) {
if (rc != -ENODATA)
DRM_DEBUG_DRIVER("MISR read failed\n");
return rc;
}
......@@ -869,6 +870,13 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("crtc_commit");
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) {
if (!dpu_encoder_is_valid_for_commit(encoder)) {
DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
goto end;
}
}
/*
* Encoder will flush/start now, unless it has a tx pending. If so, it
* may delay and flush at an irq event (e.g. ppdone)
......@@ -891,6 +899,8 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
dpu_encoder_kickoff(encoder);
reinit_completion(&dpu_crtc->frame_done_comp);
end:
DPU_ATRACE_END("crtc_commit");
}
......
This diff is collapsed.
......@@ -27,6 +27,7 @@
* based on num_of_h_tiles
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
* @dsc: DSC configuration data for DSC-enabled displays
*/
struct msm_display_info {
int intf_type;
......@@ -34,6 +35,7 @@ struct msm_display_info {
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_te_using_watchdog_timer;
struct msm_display_dsc_config *dsc;
};
/**
......@@ -170,4 +172,34 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
*/
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
/**
* dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
* @drm_enc: Pointer to previously created drm encoder structure
*/
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
/**
* dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
* @drm_enc: Pointer to previously created drm encoder structure
* @job: Pointer to the current drm writeback job
*/
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
/**
* dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
* @drm_enc: Pointer to previously created drm encoder structure
* @job: Pointer to the current drm writeback job
*/
void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
/**
* dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
* @drm_enc: Pointer to drm encoder structure
*/
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*/
#ifndef __DPU_ENCODER_PHYS_H__
#define __DPU_ENCODER_PHYS_H__
#include <drm/drm_writeback.h>
#include <linux/jiffies.h>
#include "dpu_kms.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_wb.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
......@@ -135,6 +138,11 @@ struct dpu_encoder_phys_ops {
void (*restore)(struct dpu_encoder_phys *phys);
int (*get_line_count)(struct dpu_encoder_phys *phys);
int (*get_frame_count)(struct dpu_encoder_phys *phys);
void (*prepare_wb_job)(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job);
void (*cleanup_wb_job)(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job);
bool (*is_valid_for_commit)(struct dpu_encoder_phys *phys_enc);
};
/**
......@@ -143,6 +151,7 @@ struct dpu_encoder_phys_ops {
* @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
* @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
* @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
* @INTR_IDX_WB_DONE: Writeback fone interrupt for virtual connector
*/
enum dpu_intr_idx {
INTR_IDX_VSYNC,
......@@ -150,24 +159,10 @@ enum dpu_intr_idx {
INTR_IDX_UNDERRUN,
INTR_IDX_CTL_START,
INTR_IDX_RDPTR,
INTR_IDX_WB_DONE,
INTR_IDX_MAX,
};
/**
* dpu_encoder_irq - tracking structure for interrupts
* @name: string name of interrupt
* @intr_idx: Encoder interrupt enumeration
* @irq_idx: IRQ interface lookup index from DPU IRQ framework
* will be -EINVAL if IRQ is not registered
* @irq_cb: interrupt callback
*/
struct dpu_encoder_irq {
const char *name;
enum dpu_intr_idx intr_idx;
int irq_idx;
struct dpu_irq_callback cb;
};
/**
* struct dpu_encoder_phys - physical encoder that drives a single INTF block
* tied to a specific panel / sub-panel. Abstract type, sub-classed by
......@@ -179,12 +174,14 @@ struct dpu_encoder_irq {
* @hw_ctl: Hardware interface to the ctl registers
* @hw_pp: Hardware interface to the ping pong registers
* @hw_intf: Hardware interface to the intf registers
* @hw_wb: Hardware interface to the wb registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @enabled: Whether the encoder has enabled and running a mode
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
* @wb_idx: Writeback index on dpu hardware
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
......@@ -197,7 +194,7 @@ struct dpu_encoder_irq {
* @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
* pending.
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
* @irq: IRQ tracking structures
* @irq: IRQ indices
*/
struct dpu_encoder_phys {
struct drm_encoder *parent;
......@@ -207,11 +204,13 @@ struct dpu_encoder_phys {
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
struct dpu_hw_intf *hw_intf;
struct dpu_hw_wb *hw_wb;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
enum dpu_wb wb_idx;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
......@@ -220,7 +219,7 @@ struct dpu_encoder_phys {
atomic_t pending_ctlstart_cnt;
atomic_t pending_kickoff_cnt;
wait_queue_head_t pending_kickoff_wq;
struct dpu_encoder_irq irq[INTR_IDX_MAX];
int irq[INTR_IDX_MAX];
};
static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
......@@ -229,6 +228,27 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
return atomic_inc_return(&phys->pending_kickoff_cnt);
}
/**
* struct dpu_encoder_phys_wb - sub-class of dpu_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
* @wbirq_refcount: Reference count of writeback interrupt
* @wb_done_timeout_cnt: number of wb done irq timeout errors
* @wb_cfg: writeback block config to store fb related details
* @wb_conn: backpointer to writeback connector
* @wb_job: backpointer to current writeback job
* @dest: dpu buffer layout for current writeback output buffer
*/
struct dpu_encoder_phys_wb {
struct dpu_encoder_phys base;
atomic_t wbirq_refcount;
int wb_done_timeout_cnt;
struct dpu_hw_wb_cfg wb_cfg;
struct drm_writeback_connector *wb_conn;
struct drm_writeback_job *wb_job;
struct dpu_hw_fmt_layout dest;
};
/**
* struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
* mode specific operations
......@@ -257,6 +277,7 @@ struct dpu_encoder_phys_cmd {
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @split_role: Role to play in a split-panel configuration
* @intf_idx: Interface index this phys_enc will control
* @wb_idx: Writeback index this phys_enc will control
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
*/
struct dpu_enc_phys_init_params {
......@@ -265,6 +286,7 @@ struct dpu_enc_phys_init_params {
const struct dpu_encoder_virt_ops *parent_ops;
enum dpu_enc_split_role split_role;
enum dpu_intf intf_idx;
enum dpu_wb wb_idx;
spinlock_t *enc_spinlock;
};
......@@ -296,6 +318,13 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
* @init: Pointer to init info structure with initialization params
*/
struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_helper_trigger_start - control start helper function
* This helper function may be optionally specified by physical
......@@ -314,13 +343,23 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
/* Use merge_3d unless DSC MERGE topology is used */
if (phys_enc->split_role == ENC_ROLE_SOLO &&
dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
dpu_cstate->num_mixers == CRTC_DUAL_MIXERS &&
!dpu_encoder_use_dsc_merge(phys_enc->parent))
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
}
/**
* dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
* This helper function is used by physical encoder to get DSC blocks mask
* used for this encoder.
* @phys_enc: Pointer to physical encoder structure
*/
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
/**
* dpu_encoder_helper_split_config - split display configuration helper function
* This helper function may be used by physical encoders to configure
......@@ -345,30 +384,20 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
* dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
* note: will call dpu_encoder_helper_wait_for_irq on timeout
* @phys_enc: Pointer to physical encoder structure
* @intr_idx: encoder interrupt index
* @irq: IRQ index
* @func: IRQ callback to be called in case of timeout
* @wait_info: wait info struct
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx,
int irq,
void (*func)(void *arg, int irq_idx),
struct dpu_encoder_wait_info *wait_info);
/**
* dpu_encoder_helper_register_irq - register and enable an irq
* dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
* @phys_enc: Pointer to physical encoder structure
* @intr_idx: encoder interrupt index
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx);
/**
* dpu_encoder_helper_unregister_irq - unregister and disable an irq
* @phys_enc: Pointer to physical encoder structure
* @intr_idx: encoder interrupt index
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx);
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
#endif /* __dpu_encoder_phys_H__ */
......@@ -62,6 +62,13 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
true,
phys_enc->hw_pp->idx);
}
static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
......@@ -140,19 +147,13 @@ static void dpu_encoder_phys_cmd_atomic_mode_set(
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct dpu_encoder_irq *irq;
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->irq_idx = phys_enc->hw_ctl->caps->intr_start;
phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
irq = &phys_enc->irq[INTR_IDX_PINGPONG];
irq->irq_idx = phys_enc->hw_pp->caps->intr_done;
phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
irq = &phys_enc->irq[INTR_IDX_RDPTR];
irq->irq_idx = phys_enc->hw_pp->caps->intr_rdptr;
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
......@@ -192,7 +193,8 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt));
msm_disp_snapshot_state(drm_enc->dev);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_RDPTR]);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
......@@ -219,7 +221,9 @@ static int _dpu_encoder_phys_cmd_wait_for_idle(
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
ret = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_PINGPONG],
dpu_encoder_phys_cmd_pp_tx_done_irq,
&wait_info);
if (ret == -ETIMEDOUT)
_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
......@@ -258,10 +262,13 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
enable ? "true" : "false", refcount);
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_RDPTR],
dpu_encoder_phys_cmd_pp_rd_ptr_irq,
phys_enc);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
ret = dpu_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_RDPTR);
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_RDPTR]);
end:
if (ret) {
......@@ -282,21 +289,31 @@ static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
enable, atomic_read(&phys_enc->vblank_refcount));
if (enable) {
dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_PINGPONG],
dpu_encoder_phys_cmd_pp_tx_done_irq,
phys_enc);
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_UNDERRUN],
dpu_encoder_phys_cmd_underrun_irq,
phys_enc);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_encoder_helper_register_irq(phys_enc,
INTR_IDX_CTL_START);
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_CTL_START],
dpu_encoder_phys_cmd_ctl_start_irq,
phys_enc);
} else {
if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_CTL_START);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_CTL_START]);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_UNDERRUN]);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_PINGPONG]);
}
}
......@@ -488,6 +505,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_hw_ctl *ctl;
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
......@@ -504,6 +522,17 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_pp->ops.enable_tearcheck)
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
phys_enc->hw_intf->ops.bind_pingpong_blk(
phys_enc->hw_intf,
false,
phys_enc->hw_pp->idx);
ctl = phys_enc->hw_ctl;
ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
}
phys_enc->enable_state = DPU_ENC_DISABLED;
}
......@@ -623,7 +652,9 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
ret = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_CTL_START],
dpu_encoder_phys_cmd_ctl_start_irq,
&wait_info);
if (ret == -ETIMEDOUT) {
DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
......@@ -681,7 +712,9 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
atomic_inc(&cmd_enc->pending_vblank_cnt);
rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
rc = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_RDPTR],
dpu_encoder_phys_cmd_pp_rd_ptr_irq,
&wait_info);
return rc;
......@@ -731,7 +764,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
struct dpu_encoder_irq *irq;
int i, ret = 0;
DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
......@@ -755,32 +787,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
phys_enc->enc_spinlock = p->enc_spinlock;
cmd_enc->stream_sel = 0;
phys_enc->enable_state = DPU_ENC_DISABLED;
for (i = 0; i < INTR_IDX_MAX; i++) {
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
irq->cb.arg = phys_enc;
}
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->name = "ctl_start";
irq->intr_idx = INTR_IDX_CTL_START;
irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
irq = &phys_enc->irq[INTR_IDX_PINGPONG];
irq->name = "pp_done";
irq->intr_idx = INTR_IDX_PINGPONG;
irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
irq = &phys_enc->irq[INTR_IDX_RDPTR];
irq->name = "pp_rd_ptr";
irq->intr_idx = INTR_IDX_RDPTR;
irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->name = "underrun";
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
......
......@@ -91,25 +91,27 @@ static void drm_mode_to_intf_timing_params(
timing->vsync_polarity = 0;
}
/*
* For edp only:
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
/*
* if (vid_enc->hw->cap->type == INTF_EDP) {
* display_v_start += mode->htotal - mode->hsync_start;
* display_v_end -= mode->hsync_start - mode->hdisplay;
* }
*/
/* for DP/EDP, Shift timings to align it to bottom right */
if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
(phys_enc->hw_intf->cap->type == INTF_EDP)) {
if (phys_enc->hw_intf->cap->type == INTF_DP) {
timing->h_back_porch += timing->h_front_porch;
timing->h_front_porch = 0;
timing->v_back_porch += timing->v_front_porch;
timing->v_front_porch = 0;
}
timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
/*
* for DP, divide the horizonal parameters by 2 when
* widebus is enabled
*/
if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
timing->width = timing->width >> 1;
timing->xres = timing->xres >> 1;
timing->h_back_porch = timing->h_back_porch >> 1;
timing->h_front_porch = timing->h_front_porch >> 1;
timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
}
}
static u32 get_horizontal_total(const struct intf_timing_params *timing)
......@@ -353,13 +355,9 @@ static void dpu_encoder_phys_vid_atomic_mode_set(
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct dpu_encoder_irq *irq;
irq = &phys_enc->irq[INTR_IDX_VSYNC];
irq->irq_idx = phys_enc->hw_intf->cap->intr_vsync;
phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int dpu_encoder_phys_vid_control_vblank_irq(
......@@ -385,10 +383,13 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
atomic_read(&phys_enc->vblank_refcount));
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_VSYNC],
dpu_encoder_phys_vid_vblank_irq,
phys_enc);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
ret = dpu_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_VSYNC);
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_VSYNC]);
end:
if (ret) {
......@@ -461,7 +462,9 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
}
/* Wait for kickoff to complete */
ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
ret = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_VSYNC],
dpu_encoder_phys_vid_vblank_irq,
&wait_info);
if (ret == -ETIMEDOUT) {
......@@ -513,7 +516,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
msm_disp_snapshot_state(drm_enc->dev);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_VSYNC]);
}
}
......@@ -602,10 +606,14 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
if (WARN_ON(ret))
return;
dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_UNDERRUN],
dpu_encoder_phys_vid_underrun_irq,
phys_enc);
} else {
dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_UNDERRUN]);
}
}
......@@ -669,7 +677,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_irq *irq;
int i;
if (!p) {
......@@ -695,22 +702,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_VIDEO;
phys_enc->enc_spinlock = p->enc_spinlock;
for (i = 0; i < INTR_IDX_MAX; i++) {
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
irq->cb.arg = phys_enc;
}
irq = &phys_enc->irq[INTR_IDX_VSYNC];
irq->name = "vsync_irq";
irq->intr_idx = INTR_IDX_VSYNC;
irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->name = "underrun";
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
......
This diff is collapsed.
......@@ -20,6 +20,28 @@ const struct dpu_format *dpu_get_dpu_format_ext(
#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
/**
* dpu_find_format - validate if the pixel format is supported
* @format: dpu format
* @supported_formats: supported formats by dpu HW
* @num_formatss: total number of formats
*
* Return: false if not valid format, true on success
*/
static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
size_t num_formats)
{
int i;
for (i = 0; i < num_formats; i++) {
/* check for valid formats supported */
if (format == supported_formats[i])
return true;
}
return false;
}
/**
* dpu_get_msm_format - get an dpu_format by its msm_format base
* callback function registers with the msm_kms layer
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
......@@ -35,6 +36,9 @@
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
(VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
......@@ -117,6 +121,16 @@
BIT(MDP_AD4_0_INTR) | \
BIT(MDP_AD4_1_INTR))
#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
BIT(DPU_WB_PIPE_ALPHA) | \
BIT(DPU_WB_XY_ROI_OFFSET) | \
BIT(DPU_WB_QOS) | \
BIT(DPU_WB_QOS_8LVL) | \
BIT(DPU_WB_CDP) | \
BIT(DPU_WB_INPUT_CTRL))
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
......@@ -203,6 +217,45 @@ static const uint32_t plane_formats_yuv[] = {
DRM_FORMAT_YVU420,
};
static const u32 rotation_v2_formats[] = {
DRM_FORMAT_NV12,
/* TODO add formats after validation */
};
static const uint32_t wb2_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_BGRX4444,
DRM_FORMAT_XBGR4444,
};
/*************************************************************
* DPU sub blocks config
*************************************************************/
......@@ -223,6 +276,17 @@ static const struct dpu_caps msm8998_dpu_caps = {
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
static const struct dpu_caps qcm2290_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
.ubwc_version = DPU_HW_UBWC_VER_20,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
static const struct dpu_caps sdm845_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
......@@ -338,17 +402,6 @@ static const struct dpu_mdp_cfg msm8998_mdp[] = {
},
};
static const struct dpu_caps qcm2290_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
.ubwc_version = DPU_HW_UBWC_VER_20,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
static const struct dpu_mdp_cfg sdm845_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
......@@ -440,6 +493,8 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = {
.reg_off = 0x2C4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
.reg_off = 0x2BC, .bit_off = 20},
.clk_ctrls[DPU_CLK_CTRL_WB2] = {
.reg_off = 0x3B8, .bit_off = 24},
},
};
......@@ -642,7 +697,6 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
*************************************************************/
/* SSPP common configuration */
#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
......@@ -660,6 +714,27 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}
#define _VIG_SBLK_ROT(num, sdma_pri, qseed_ver, rot_cfg) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
.smart_dma_priority = sdma_pri, \
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
.id = qseed_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = STRCAT("sspp_csc", num), \
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = rot_cfg, \
}
#define _DMA_SBLK(num, sdma_pri) \
......@@ -684,6 +759,12 @@ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
_VIG_SBLK("3", 0, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
.rot_maxheight = 1088,
.rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
.rot_format_list = rotation_v2_formats,
};
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
......@@ -751,6 +832,9 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
_VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
_VIG_SBLK_ROT("0", 4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
static const struct dpu_sspp_cfg sc7180_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
......@@ -791,8 +875,8 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
};
static const struct dpu_sspp_cfg sc7280_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7280_MASK,
sc7280_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK,
......@@ -1117,6 +1201,24 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
/*************************************************************
* DSC sub blocks config
*************************************************************/
#define DSC_BLK(_name, _id, _base) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x140, \
.features = 0, \
}
static struct dpu_dsc_cfg sdm845_dsc[] = {
DSC_BLK("dsc_0", DSC_0, 0x80000),
DSC_BLK("dsc_1", DSC_1, 0x80400),
DSC_BLK("dsc_2", DSC_2, 0x80800),
DSC_BLK("dsc_3", DSC_3, 0x80c00),
};
/*************************************************************
* INTF sub blocks config
*************************************************************/
......@@ -1179,6 +1281,29 @@ static const struct dpu_intf_cfg qcm2290_intf[] = {
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
};
/*************************************************************
* Writeback blocks config
*************************************************************/
#define WB_BLK(_name, _id, _base, _features, _clk_ctrl, \
__xin_id, vbif_id, _reg, _wb_done_bit) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x2c8, \
.features = _features, \
.format_list = wb2_formats, \
.num_formats = ARRAY_SIZE(wb2_formats), \
.clk_ctrl = _clk_ctrl, \
.xin_id = __xin_id, \
.vbif_idx = vbif_id, \
.maxlinewidth = DEFAULT_DPU_LINE_WIDTH, \
.intr_wb_done = DPU_IRQ_IDX(_reg, _wb_done_bit) \
}
static const struct dpu_wb_cfg sm8250_wb[] = {
WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
VBIF_RT, MDP_SSPP_TOP0_INTR, 4),
};
/*************************************************************
* VBIF sub blocks config
*************************************************************/
......@@ -1643,6 +1768,8 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.mixer = sdm845_lm,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
.dsc_count = ARRAY_SIZE(sdm845_dsc),
.dsc = sdm845_dsc,
.intf_count = ARRAY_SIZE(sdm845_intf),
.intf = sdm845_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
......@@ -1775,6 +1902,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.intf = sm8150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.wb_count = ARRAY_SIZE(sm8250_wb),
.wb = sm8250_wb,
.reg_dma_count = 1,
.dma_cfg = sm8250_regdma,
.perf = sm8250_perf_data,
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
/*
* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_CATALOG_H
......@@ -112,6 +114,7 @@ enum {
* @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
* @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
* @DPU_SSPP_CDP Supports client driven prefetch
* @DPU_SSPP_INLINE_ROTATION Support inline rotation
* @DPU_SSPP_MAX maximum value
*/
enum {
......@@ -132,6 +135,7 @@ enum {
DPU_SSPP_TS_PREFILL,
DPU_SSPP_TS_PREFILL_REC1,
DPU_SSPP_CDP,
DPU_SSPP_INLINE_ROTATION,
DPU_SSPP_MAX
};
......@@ -211,6 +215,42 @@ enum {
DPU_INTF_MAX
};
/**
* WB sub-blocks and features
* @DPU_WB_LINE_MODE Writeback module supports line/linear mode
* @DPU_WB_BLOCK_MODE Writeback module supports block mode read
* @DPU_WB_CHROMA_DOWN, Writeback chroma down block,
* @DPU_WB_DOWNSCALE, Writeback integer downscaler,
* @DPU_WB_DITHER, Dither block
* @DPU_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
* @DPU_WB_UBWC, Writeback Universal bandwidth compression
* @DPU_WB_YUV_CONFIG Writeback supports output of YUV colorspace
* @DPU_WB_PIPE_ALPHA Writeback supports pipe alpha
* @DPU_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
* the destination image
* @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq
* @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control
* @DPU_WB_CDP Writeback supports client driven prefetch
* @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel
* data arrives.
* @DPU_WB_CROP CWB supports cropping
* @DPU_WB_MAX maximum value
*/
enum {
DPU_WB_LINE_MODE = 0x1,
DPU_WB_BLOCK_MODE,
DPU_WB_UBWC,
DPU_WB_YUV_CONFIG,
DPU_WB_PIPE_ALPHA,
DPU_WB_XY_ROI_OFFSET,
DPU_WB_QOS,
DPU_WB_QOS_8LVL,
DPU_WB_CDP,
DPU_WB_INPUT_CTRL,
DPU_WB_CROP,
DPU_WB_MAX
};
/**
* VBIF sub-blocks and features
* @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
......@@ -314,6 +354,18 @@ struct dpu_qos_lut_tbl {
const struct dpu_qos_lut_entry *entries;
};
/**
* struct dpu_rotation_cfg - define inline rotation config
* @rot_maxheight: max pre rotated height allowed for rotation
* @rot_num_formats: number of elements in @rot_format_list
* @rot_format_list: list of supported rotator formats
*/
struct dpu_rotation_cfg {
u32 rot_maxheight;
size_t rot_num_formats;
const u32 *rot_format_list;
};
/**
* struct dpu_caps - define DPU capabilities
* @max_mixer_width max layer mixer line width support.
......@@ -369,6 +421,7 @@ struct dpu_caps {
* @num_formats: Number of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes
* @virt_num_formats: Number of supported formats for virtual planes
* @dpu_rotation_cfg: inline rotation configuration
*/
struct dpu_sspp_sub_blks {
u32 creq_vblank;
......@@ -390,6 +443,7 @@ struct dpu_sspp_sub_blks {
u32 num_formats;
const u32 *virt_format_list;
u32 virt_num_formats;
const struct dpu_rotation_cfg *rotation_cfg;
};
/**
......@@ -444,6 +498,7 @@ enum dpu_clk_ctrl_type {
DPU_CLK_CTRL_CURSOR1,
DPU_CLK_CTRL_INLINE_ROT0_SSPP,
DPU_CLK_CTRL_REG_DMA,
DPU_CLK_CTRL_WB2,
DPU_CLK_CTRL_MAX,
};
......@@ -561,6 +616,16 @@ struct dpu_merge_3d_cfg {
const struct dpu_merge_3d_sub_blks *sblk;
};
/**
* struct dpu_dsc_cfg - information of DSC blocks
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
*/
struct dpu_dsc_cfg {
DPU_HW_BLK_INFO;
};
/**
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
......@@ -581,6 +646,28 @@ struct dpu_intf_cfg {
s32 intr_vsync;
};
/**
* struct dpu_wb_cfg - information of writeback blocks
* @DPU_HW_BLK_INFO: refer to the description above for DPU_HW_BLK_INFO
* @vbif_idx: vbif client index
* @maxlinewidth: max line width supported by writeback block
* @xin_id: bus client identifier
* @intr_wb_done: interrupt index for WB_DONE
* @format_list: list of formats supported by this writeback block
* @num_formats: number of formats supported by this writeback block
* @clk_ctrl: clock control identifier
*/
struct dpu_wb_cfg {
DPU_HW_BLK_INFO;
u8 vbif_idx;
u32 maxlinewidth;
u32 xin_id;
s32 intr_wb_done;
const u32 *format_list;
u32 num_formats;
enum dpu_clk_ctrl_type clk_ctrl;
};
/**
* struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
* @pps pixel per seconds
......@@ -757,12 +844,18 @@ struct dpu_mdss_cfg {
u32 merge_3d_count;
const struct dpu_merge_3d_cfg *merge_3d;
u32 dsc_count;
struct dpu_dsc_cfg *dsc;
u32 intf_count;
const struct dpu_intf_cfg *intf;
u32 vbif_count;
const struct dpu_vbif_cfg *vbif;
u32 wb_count;
const struct dpu_wb_cfg *wb;
u32 reg_dma_count;
struct dpu_reg_dma_cfg dma_cfg;
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
......@@ -23,8 +24,12 @@
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_MERGE_3D_ACTIVE 0x0E4
#define CTL_WB_ACTIVE 0x0EC
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
#define CTL_INTF_MASTER 0x134
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
......@@ -34,7 +39,9 @@
#define DPU_REG_RESET_TIMEOUT_US 2000
#define MERGE_3D_IDX 23
#define DSC_IDX 22
#define INTF_IDX 31
#define WB_IDX 16
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
......@@ -126,13 +133,15 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
{
if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
ctx->pending_merge_3d_flush_mask);
if (ctx->pending_flush_mask & BIT(INTF_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
ctx->pending_intf_flush_mask);
if (ctx->pending_flush_mask & BIT(WB_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
ctx->pending_wb_flush_mask);
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
......@@ -253,6 +262,27 @@ static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
}
}
static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
enum dpu_wb wb)
{
switch (wb) {
case WB_0:
case WB_1:
case WB_2:
ctx->pending_flush_mask |= BIT(WB_IDX);
break;
default:
break;
}
}
static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
enum dpu_wb wb)
{
ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
ctx->pending_flush_mask |= BIT(WB_IDX);
}
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
......@@ -502,6 +532,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
......@@ -511,17 +542,32 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
if (cfg->dsc)
DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc);
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
mode_sel |= BIT(17);
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
if (cfg->dsc) {
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
}
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
......@@ -537,6 +583,9 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
intf_cfg |= (cfg->mode_3d - 0x1) << 20;
}
if (cfg->wb)
intf_cfg |= (cfg->wb & 0x3) + 2;
switch (cfg->intf_mode_sel) {
case DPU_CTL_MODE_SEL_VID:
intf_cfg &= ~BIT(17);
......@@ -554,6 +603,44 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
}
static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
u32 merge3d_active = 0;
/*
* This API resets each portion of the CTL path namely,
* clearing the sspps staged on the lm, merge_3d block,
* interfaces , writeback etc to ensure clean teardown of the pipeline.
* This will be used for writeback to begin with to have a
* proper teardown of the writeback session but upon further
* validation, this can be extended to all interfaces.
*/
if (cfg->merge_3d) {
merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
merge3d_active);
}
dpu_hw_ctl_clear_all_blendstages(ctx);
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
if (cfg->wb) {
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
wb_active &= ~BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
}
}
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active)
{
......@@ -577,15 +664,18 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf_v1;
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_HW_CTL_H
......@@ -40,13 +41,16 @@ struct dpu_hw_stage_cfg {
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
* @stream_sel: Stream selection for multi-stream interfaces
* @dsc: DSC BIT masks used
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
enum dpu_wb wb;
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
enum dpu_ctl_mode_sel intf_mode_sel;
int stream_sel;
unsigned int dsc;
};
/**
......@@ -99,6 +103,15 @@ struct dpu_hw_ctl_ops {
void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
u32 flushbits);
/**
* OR in the given flushbits to the cached pending_(wb_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : writeback block index
*/
void (*update_pending_flush_wb)(struct dpu_hw_ctl *ctx,
enum dpu_wb blk);
/**
* OR in the given flushbits to the cached pending_(intf_)flush_mask
* No effect on hardware
......@@ -138,6 +151,14 @@ struct dpu_hw_ctl_ops {
void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg);
/**
* reset ctl_path interface config
* @ctx : ctl path ctx pointer
* @cfg : interface config structure pointer
*/
void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg);
int (*reset)(struct dpu_hw_ctl *c);
/*
......@@ -189,6 +210,7 @@ struct dpu_hw_ctl_ops {
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
* @ops: operation list
*/
struct dpu_hw_ctl {
......@@ -202,6 +224,7 @@ struct dpu_hw_ctl {
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
u32 pending_wb_flush_mask;
u32 pending_merge_3d_flush_mask;
/* ops */
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2022, Linaro Limited
*/
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_dsc.h"
#define DSC_COMMON_MODE 0x000
#define DSC_ENC 0x004
#define DSC_PICTURE 0x008
#define DSC_SLICE 0x00C
#define DSC_CHUNK_SIZE 0x010
#define DSC_DELAY 0x014
#define DSC_SCALE_INITIAL 0x018
#define DSC_SCALE_DEC_INTERVAL 0x01C
#define DSC_SCALE_INC_INTERVAL 0x020
#define DSC_FIRST_LINE_BPG_OFFSET 0x024
#define DSC_BPG_OFFSET 0x028
#define DSC_DSC_OFFSET 0x02C
#define DSC_FLATNESS 0x030
#define DSC_RC_MODEL_SIZE 0x034
#define DSC_RC 0x038
#define DSC_RC_BUF_THRESH 0x03C
#define DSC_RANGE_MIN_QP 0x074
#define DSC_RANGE_MAX_QP 0x0B0
#define DSC_RANGE_BPG_OFFSET 0x0EC
static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
{
struct dpu_hw_blk_reg_map *c = &dsc->hw;
DPU_REG_WRITE(c, DSC_COMMON_MODE, 0);
}
static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc,
u32 mode,
u32 initial_lines)
{
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 data, lsb, bpp;
u32 slice_last_group_size;
u32 det_thresh_flatness;
bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
if (is_cmd_mode)
initial_lines += 1;
slice_last_group_size = 3 - (dsc->drm->slice_width % 3);
data = (initial_lines << 20);
data |= ((slice_last_group_size - 1) << 18);
/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
data |= dsc->drm->bits_per_pixel << 12;
lsb = dsc->drm->bits_per_pixel % 4;
bpp = dsc->drm->bits_per_pixel / 4;
bpp *= 4;
bpp <<= 4;
bpp |= lsb;
data |= bpp << 8;
data |= (dsc->drm->block_pred_enable << 7);
data |= (dsc->drm->line_buf_depth << 3);
data |= (dsc->drm->simple_422 << 2);
data |= (dsc->drm->convert_rgb << 1);
data |= dsc->drm->bits_per_component;
DPU_REG_WRITE(c, DSC_ENC, data);
data = dsc->drm->pic_width << 16;
data |= dsc->drm->pic_height;
DPU_REG_WRITE(c, DSC_PICTURE, data);
data = dsc->drm->slice_width << 16;
data |= dsc->drm->slice_height;
DPU_REG_WRITE(c, DSC_SLICE, data);
data = dsc->drm->slice_chunk_size << 16;
DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
data = dsc->drm->initial_dec_delay << 16;
data |= dsc->drm->initial_xmit_delay;
DPU_REG_WRITE(c, DSC_DELAY, data);
data = dsc->drm->initial_scale_value;
DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
data = dsc->drm->scale_decrement_interval;
DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
data = dsc->drm->scale_increment_interval;
DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
data = dsc->drm->first_line_bpg_offset;
DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
data = dsc->drm->nfl_bpg_offset << 16;
data |= dsc->drm->slice_bpg_offset;
DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
data = dsc->drm->initial_offset << 16;
data |= dsc->drm->final_offset;
DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8);
data = det_thresh_flatness << 10;
data |= dsc->drm->flatness_max_qp << 5;
data |= dsc->drm->flatness_min_qp;
DPU_REG_WRITE(c, DSC_FLATNESS, data);
data = dsc->drm->rc_model_size;
DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
data = dsc->drm->rc_tgt_offset_low << 18;
data |= dsc->drm->rc_tgt_offset_high << 14;
data |= dsc->drm->rc_quant_incr_limit1 << 9;
data |= dsc->drm->rc_quant_incr_limit0 << 4;
data |= dsc->drm->rc_edge_factor;
DPU_REG_WRITE(c, DSC_RC, data);
}
static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc)
{
struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params;
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 off;
int i;
off = DSC_RC_BUF_THRESH;
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]);
off += 4;
}
off = DSC_RANGE_MIN_QP;
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
DPU_REG_WRITE(c, off, rc[i].range_min_qp);
off += 4;
}
off = DSC_RANGE_MAX_QP;
for (i = 0; i < 15; i++) {
DPU_REG_WRITE(c, off, rc[i].range_max_qp);
off += 4;
}
off = DSC_RANGE_BPG_OFFSET;
for (i = 0; i < 15; i++) {
DPU_REG_WRITE(c, off, rc[i].range_bpg_offset);
off += 4;
}
}
static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->dsc_count; i++) {
if (dsc == m->dsc[i].id) {
b->base_off = addr;
b->blk_off = m->dsc[i].base;
b->length = m->dsc[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_DSC;
return &m->dsc[i];
}
}
return NULL;
}
static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
unsigned long cap)
{
ops->dsc_disable = dpu_hw_dsc_disable;
ops->dsc_config = dpu_hw_dsc_config;
ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
};
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_dsc *c;
struct dpu_dsc_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _dsc_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
return ERR_PTR(-EINVAL);
}
c->idx = idx;
c->caps = cfg;
_setup_dsc_ops(&c->ops, c->caps->features);
return c;
}
void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
{
kfree(dsc);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2020-2022, Linaro Limited */
#ifndef _DPU_HW_DSC_H
#define _DPU_HW_DSC_H
#include <drm/display/drm_dsc.h>
#define DSC_MODE_SPLIT_PANEL BIT(0)
#define DSC_MODE_MULTIPLEX BIT(1)
#define DSC_MODE_VIDEO BIT(2)
struct dpu_hw_dsc;
/**
* struct dpu_hw_dsc_ops - interface to the dsc hardware driver functions
* Assumption is these functions will be called after clocks are enabled
*/
struct dpu_hw_dsc_ops {
/**
* dsc_disable - disable dsc
* @hw_dsc: Pointer to dsc context
*/
void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc);
/**
* dsc_config - configures dsc encoder
* @hw_dsc: Pointer to dsc context
* @dsc: panel dsc parameters
* @mode: dsc topology mode to be set
* @initial_lines: amount of initial lines to be used
*/
void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc,
u32 mode,
u32 initial_lines);
/**
* dsc_config_thresh - programs panel thresholds
* @hw_dsc: Pointer to dsc context
* @dsc: panel dsc parameters
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc);
};
struct dpu_hw_dsc {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* dsc */
enum dpu_dsc idx;
const struct dpu_dsc_cfg *caps;
/* ops */
struct dpu_hw_dsc_ops ops;
};
/**
* dpu_hw_dsc_init - initializes the dsc block for the passed dsc idx.
* @idx: DSC index for which driver object is required
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
* Returns: Error code or allocated dpu_hw_dsc context
*/
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
struct dpu_mdss_cfg *m);
/**
* dpu_hw_dsc_destroy - destroys dsc driver context
* @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init
*/
void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc);
static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_dsc, base);
}
#endif /* _DPU_HW_DSC_H */
......@@ -151,25 +151,22 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_irq_callback *cb;
VERB("irq_idx=%d\n", irq_idx);
if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
/*
* Perform registered function callback
*/
list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
if (cb->func)
cb->func(cb->arg, irq_idx);
dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
}
irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
irqreturn_t dpu_core_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
int irq_idx;
......@@ -362,7 +359,7 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
wmb();
}
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
......@@ -389,7 +386,7 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
intr_status = DPU_REG_READ(&intr->hw,
dpu_intr_set[reg_idx].status_off) &
DPU_IRQ_MASK(irq_idx);
if (intr_status && clear)
if (intr_status)
DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
intr_status);
......@@ -413,24 +410,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
int nirq = MDP_INTR_MAX * 32;
if (!addr || !m)
return ERR_PTR(-EINVAL);
intr = kzalloc(sizeof(*intr), GFP_KERNEL);
intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
__intr_offset(m, addr, &intr->hw);
intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
GFP_KERNEL);
if (intr->cache_irq_mask == NULL) {
kfree(intr);
return ERR_PTR(-ENOMEM);
}
intr->total_irqs = nirq;
intr->irq_mask = m->mdss_irqs;
......@@ -441,31 +432,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
if (intr) {
kfree(intr->cache_irq_mask);
kfree(intr->irq_cb_tbl);
kfree(intr->irq_counts);
kfree(intr);
}
}
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
struct dpu_irq_callback *register_irq_cb)
void (*irq_cb)(void *arg, int irq_idx),
void *irq_arg)
{
unsigned long irq_flags;
int ret;
if (!dpu_kms->hw_intr->irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
if (!register_irq_cb || !register_irq_cb->func) {
DPU_ERROR("invalid irq_cb:%d func:%d\n",
register_irq_cb != NULL,
register_irq_cb ?
register_irq_cb->func != NULL : -1);
if (!irq_cb) {
DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
return -EINVAL;
}
......@@ -477,41 +455,34 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
list_add_tail(&register_irq_cb->list,
&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
if (list_is_first(&register_irq_cb->list,
&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
int ret = dpu_hw_intr_enable_irq_locked(
if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
return -EBUSY;
}
trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
}
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_irq_register_success(irq_idx);
return 0;
}
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
struct dpu_irq_callback *register_irq_cb)
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;
if (!dpu_kms->hw_intr->irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
if (!register_irq_cb || !register_irq_cb->func) {
DPU_ERROR("invalid irq_cb:%d func:%d\n",
register_irq_cb != NULL,
register_irq_cb ?
register_irq_cb->func != NULL : -1);
return -EINVAL;
}
int ret;
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
......@@ -521,20 +492,20 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
/* empty callback list but interrupt is still enabled */
if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
int ret = dpu_hw_intr_disable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
trace_dpu_core_irq_unregister_callback(irq_idx);
ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
if (ret)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
irq_idx);
VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
}
DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
irq_idx, ret);
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_irq_unregister_success(irq_idx);
return 0;
}
......@@ -542,24 +513,18 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
struct dpu_irq_callback *cb;
unsigned long irq_flags;
int i, irq_count, cb_count;
if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
return 0;
int i, irq_count;
void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
cb_count = 0;
irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
cb_count++;
irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
cb = dpu_kms->hw_intr->irq_tbl[i].cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
if (irq_count || cb_count)
seq_printf(s, "idx:%d irq:%d cb:%d\n",
i, irq_count, cb_count);
if (irq_count || cb)
seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
}
return 0;
......@@ -575,8 +540,9 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
}
#endif
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
void dpu_core_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
......@@ -584,24 +550,21 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
/* Create irq callbacks for all possible irq_idx */
dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
sizeof(struct list_head), GFP_KERNEL);
dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
sizeof(atomic_t), GFP_KERNEL);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
}
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
}
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
void dpu_core_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
if (!dpu_kms->hw_intr)
return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
if (dpu_kms->hw_intr->irq_tbl[i].cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_irqs(dpu_kms);
......
......@@ -44,19 +44,21 @@ enum dpu_hw_intr_reg {
* @save_irq_status: array of IRQ status reg storage created during init
* @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
* @irq_cb_tbl: array of IRQ callbacks lists
* @irq_counts: array of IRQ counts
* @irq_cb_tbl: array of IRQ callbacks
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
u32 *cache_irq_mask;
u32 cache_irq_mask[MDP_INTR_MAX];
u32 *save_irq_status;
u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;
struct list_head *irq_cb_tbl;
atomic_t *irq_counts;
struct {
void (*cb)(void *arg, int irq_idx);
void *arg;
atomic_t count;
} irq_tbl[];
};
/**
......
......@@ -33,6 +33,7 @@
#define INTF_TP_COLOR1 0x05C
#define INTF_CONFIG2 0x060
#define INTF_DISPLAY_DATA_HCTL 0x064
#define INTF_ACTIVE_DATA_HCTL 0x068
#define INTF_FRAME_LINE_COUNT_EN 0x0A8
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
......@@ -60,6 +61,12 @@
#define INTF_MUX 0x25C
#define INTF_CFG_ACTIVE_H_EN BIT(29)
#define INTF_CFG_ACTIVE_V_EN BIT(30)
#define INTF_CFG2_DATABUS_WIDEN BIT(0)
#define INTF_CFG2_DATA_HCTL_EN BIT(4)
static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
......@@ -90,15 +97,23 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
u32 hsync_data_start_x, hsync_data_end_x;
u32 active_h_start, active_h_end;
u32 active_v_start, active_v_end;
u32 active_hctl, display_hctl, hsync_ctl;
u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
u32 panel_format;
u32 intf_cfg, intf_cfg2 = 0, display_data_hctl = 0;
u32 intf_cfg, intf_cfg2 = 0;
u32 display_data_hctl = 0, active_data_hctl = 0;
u32 data_width;
bool dp_intf = false;
/* read interface_cfg */
intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
if (ctx->cap->type == INTF_DP)
dp_intf = true;
hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
p->h_front_porch;
vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
......@@ -112,7 +127,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
hsync_end_x = hsync_period - p->h_front_porch - 1;
if (p->width != p->xres) {
if (p->width != p->xres) { /* border fill added */
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
} else {
......@@ -120,7 +135,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
active_h_end = 0;
}
if (p->height != p->yres) {
if (p->height != p->yres) { /* border fill added */
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
} else {
......@@ -130,27 +145,46 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
if (active_h_end) {
active_hctl = (active_h_end << 16) | active_h_start;
intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
intf_cfg |= INTF_CFG_ACTIVE_H_EN;
} else {
active_hctl = 0;
}
if (active_v_end)
intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
intf_cfg |= INTF_CFG_ACTIVE_V_EN;
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
/*
* DATA_HCTL_EN controls data timing which can be different from
* video timing. It is recommended to enable it for all cases, except
* if compression is enabled in 1 pixel per clock mode
*/
if (p->wide_bus_en)
intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
data_width = p->width;
hsync_data_start_x = hsync_start_x;
hsync_data_end_x = hsync_start_x + data_width - 1;
display_data_hctl = (hsync_data_end_x << 16) | hsync_data_start_x;
if (dp_intf) {
/* DP timing adjustment */
display_v_start += p->hsync_pulse_width + p->h_back_porch;
display_v_end -= p->h_front_porch;
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
display_v_start += p->hsync_pulse_width + p->h_back_porch;
active_hctl = (active_h_end << 16) | active_h_start;
display_hctl = active_hctl;
intf_cfg |= INTF_CFG_ACTIVE_H_EN | INTF_CFG_ACTIVE_V_EN;
}
den_polarity = 0;
......@@ -180,13 +214,6 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
(COLOR_8BIT << 4) |
(0x21 << 8));
if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
intf_cfg2 |= BIT(4);
display_data_hctl = display_hctl;
DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
}
DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
......@@ -204,6 +231,11 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
}
}
static void dpu_hw_intf_enable_timing_engine(
......
......@@ -30,6 +30,8 @@ struct intf_timing_params {
u32 border_clr;
u32 underflow_clr;
u32 hsync_skew;
bool wide_bus_en;
};
struct intf_prog_fetch {
......
......@@ -138,7 +138,7 @@ static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
ctrl = DPU_REG_READ(c, LM_MISR_CTRL);
if (!(ctrl & LM_MISR_CTRL_ENABLE))
return -EINVAL;
return -ENODATA;
if (!(ctrl & LM_MISR_CTRL_STATUS))
return -EINVAL;
......
......@@ -97,6 +97,7 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_WB,
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
DPU_HW_BLK_DSC,
DPU_HW_BLK_MAX,
};
......@@ -176,6 +177,17 @@ enum dpu_ctl {
CTL_MAX
};
enum dpu_dsc {
DSC_NONE = 0,
DSC_0,
DSC_1,
DSC_2,
DSC_3,
DSC_4,
DSC_5,
DSC_MAX
};
enum dpu_pingpong {
PINGPONG_0 = 1,
PINGPONG_1,
......@@ -205,14 +217,21 @@ enum dpu_intf {
INTF_MAX
};
/*
* Historically these values correspond to the values written to the
* DISP_INTF_SEL register, which had to programmed manually. On newer MDP
* generations this register is NOP, but we keep the values for historical
* reasons.
*/
enum dpu_intf_type {
INTF_NONE = 0x0,
INTF_DSI = 0x1,
INTF_HDMI = 0x3,
INTF_LCDC = 0x5,
/* old eDP found on 8x74 and 8x84 */
INTF_EDP = 0x9,
/* both DP and eDP, handled by the new DP driver */
INTF_DP = 0xa,
INTF_TYPE_MAX,
/* virtual interfaces */
INTF_WB = 0x100,
......@@ -437,5 +456,6 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_VBIF (1 << 8)
#define DPU_DBG_MASK_ROT (1 << 9)
#define DPU_DBG_MASK_DSPP (1 << 10)
#define DPU_DBG_MASK_DSC (1 << 11)
#endif /* _DPU_HW_MDSS_H */
......@@ -28,6 +28,9 @@
#define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
#define PP_DSC_MODE 0x0a0
#define PP_DCE_DATA_IN_SWAP 0x0ac
#define PP_DCE_DATA_OUT_SWAP 0x0c8
#define PP_DITHER_EN 0x000
#define PP_DITHER_BITDEPTH 0x004
......@@ -245,6 +248,32 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
return line;
}
static int dpu_hw_pp_dsc_enable(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *c = &pp->hw;
DPU_REG_WRITE(c, PP_DSC_MODE, 1);
return 0;
}
static void dpu_hw_pp_dsc_disable(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *c = &pp->hw;
DPU_REG_WRITE(c, PP_DSC_MODE, 0);
}
static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *pp_c = &pp->hw;
int data;
data = DPU_REG_READ(pp_c, PP_DCE_DATA_OUT_SWAP);
data |= BIT(18); /* endian flip */
DPU_REG_WRITE(pp_c, PP_DCE_DATA_OUT_SWAP, data);
return 0;
}
static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
unsigned long features)
{
......@@ -256,6 +285,9 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
if (test_bit(DPU_PINGPONG_DITHER, &features))
c->ops.setup_dither = dpu_hw_pp_setup_dither;
......
......@@ -124,6 +124,20 @@ struct dpu_hw_pingpong_ops {
*/
void (*setup_dither)(struct dpu_hw_pingpong *pp,
struct dpu_hw_dither_cfg *cfg);
/**
* Enable DSC
*/
int (*enable_dsc)(struct dpu_hw_pingpong *pp);
/**
* Disable DSC
*/
void (*disable_dsc)(struct dpu_hw_pingpong *pp);
/**
* Setup DSC
*/
int (*setup_dsc)(struct dpu_hw_pingpong *pp);
};
struct dpu_hw_merge_3d;
......
......@@ -627,7 +627,7 @@ static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
}
static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cdp_cfg *cfg,
struct dpu_hw_cdp_cfg *cfg,
enum dpu_sspp_multirect_index index)
{
u32 idx;
......
......@@ -192,22 +192,6 @@ enum {
DPU_SSPP_CDP_PRELOAD_AHEAD_64
};
/**
* struct dpu_hw_pipe_cdp_cfg : CDP configuration
* @enable: true to enable CDP
* @ubwc_meta_enable: true to enable ubwc metadata preload
* @tile_amortize_enable: true to enable amortization control for tile format
* @preload_ahead: number of request to preload ahead
* DPU_SSPP_CDP_PRELOAD_AHEAD_32,
* DPU_SSPP_CDP_PRELOAD_AHEAD_64
*/
struct dpu_hw_pipe_cdp_cfg {
bool enable;
bool ubwc_meta_enable;
bool tile_amortize_enable;
u32 preload_ahead;
};
/**
* struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
* @size: size to prefill in bytes, or zero to disable
......@@ -359,7 +343,7 @@ struct dpu_hw_sspp_ops {
* @index: rectangle index in multirect
*/
void (*setup_cdp)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cdp_cfg *cfg,
struct dpu_hw_cdp_cfg *cfg,
enum dpu_sspp_multirect_index index);
};
......
......@@ -422,3 +422,28 @@ void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
}
/**
* _dpu_hw_get_qos_lut - get LUT mapping based on fill level
* @tbl: Pointer to LUT table
* @total_fl: fill level
* Return: LUT setting corresponding to the fill level
*/
u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
u32 total_fl)
{
int i;
if (!tbl || !tbl->nentry || !tbl->entries)
return 0;
for (i = 0; i < tbl->nentry; i++)
if (total_fl <= tbl->entries[i].fl)
return tbl->entries[i].lut;
/* if last fl is zero, use as default */
if (!tbl->entries[i-1].fl)
return tbl->entries[i-1].lut;
return 0;
}
......@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include "dpu_hw_mdss.h"
#include "dpu_hw_catalog.h"
#define REG_MASK(n) ((BIT(n)) - 1)
......@@ -298,6 +299,21 @@ struct dpu_drm_scaler_v2 {
struct dpu_drm_de_v1 de;
};
/**
* struct dpu_hw_cdp_cfg : CDP configuration
* @enable: true to enable CDP
* @ubwc_meta_enable: true to enable ubwc metadata preload
* @tile_amortize_enable: true to enable amortization control for tile format
* @preload_ahead: number of request to preload ahead
* DPU_*_CDP_PRELOAD_AHEAD_32,
* DPU_*_CDP_PRELOAD_AHEAD_64
*/
struct dpu_hw_cdp_cfg {
bool enable;
bool ubwc_meta_enable;
bool tile_amortize_enable;
u32 preload_ahead;
};
u32 *dpu_hw_util_get_log_mask_ptr(void);
......@@ -324,4 +340,7 @@ void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10);
u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
u32 total_fl);
#endif /* _DPU_HW_UTIL_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
*/
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_wb.h"
#include "dpu_formats.h"
#include "dpu_kms.h"
#define WB_DST_FORMAT 0x000
#define WB_DST_OP_MODE 0x004
#define WB_DST_PACK_PATTERN 0x008
#define WB_DST0_ADDR 0x00C
#define WB_DST1_ADDR 0x010
#define WB_DST2_ADDR 0x014
#define WB_DST3_ADDR 0x018
#define WB_DST_YSTRIDE0 0x01C
#define WB_DST_YSTRIDE1 0x020
#define WB_DST_YSTRIDE1 0x020
#define WB_DST_DITHER_BITDEPTH 0x024
#define WB_DST_MATRIX_ROW0 0x030
#define WB_DST_MATRIX_ROW1 0x034
#define WB_DST_MATRIX_ROW2 0x038
#define WB_DST_MATRIX_ROW3 0x03C
#define WB_DST_WRITE_CONFIG 0x048
#define WB_ROTATION_DNSCALER 0x050
#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
#define WB_N16_INIT_PHASE_X_C03 0x060
#define WB_N16_INIT_PHASE_X_C12 0x064
#define WB_N16_INIT_PHASE_Y_C03 0x068
#define WB_N16_INIT_PHASE_Y_C12 0x06C
#define WB_OUT_SIZE 0x074
#define WB_ALPHA_X_VALUE 0x078
#define WB_DANGER_LUT 0x084
#define WB_SAFE_LUT 0x088
#define WB_QOS_CTRL 0x090
#define WB_CREQ_LUT_0 0x098
#define WB_CREQ_LUT_1 0x09C
#define WB_UBWC_STATIC_CTRL 0x144
#define WB_MUX 0x150
#define WB_CROP_CTRL 0x154
#define WB_CROP_OFFSET 0x158
#define WB_CSC_BASE 0x260
#define WB_DST_ADDR_SW_STATUS 0x2B0
#define WB_CDP_CNTL 0x2B4
#define WB_OUT_IMAGE_SIZE 0x2C0
#define WB_OUT_XY 0x2C4
/* WB_QOS_CTRL */
#define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0)
static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
const struct dpu_mdss_cfg *m, void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->wb_count; i++) {
if (wb == m->wb[i].id) {
b->base_off = addr;
b->blk_off = m->wb[i].base;
b->length = m->wb[i].len;
b->hwversion = m->hwversion;
return &m->wb[i];
}
}
return ERR_PTR(-EINVAL);
}
static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *data)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
DPU_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
DPU_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
DPU_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
DPU_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
}
static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *data)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
const struct dpu_format *fmt = data->dest.format;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 write_config = 0;
u32 opmode = 0;
u32 dst_addr_sw = 0;
chroma_samp = fmt->chroma_sample;
dst_format = (chroma_samp << 23) |
(fmt->fetch_planes << 19) |
(fmt->bits[C3_ALPHA] << 6) |
(fmt->bits[C2_R_Cr] << 4) |
(fmt->bits[C1_B_Cb] << 2) |
(fmt->bits[C0_G_Y] << 0);
if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
dst_format |= BIT(8); /* DSTC3_EN */
if (!fmt->alpha_enable ||
!(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA)))
dst_format |= BIT(14); /* DST_ALPHA_X */
}
pattern = (fmt->element[3] << 24) |
(fmt->element[2] << 16) |
(fmt->element[1] << 8) |
(fmt->element[0] << 0);
dst_format |= (fmt->unpack_align_msb << 18) |
(fmt->unpack_tight << 17) |
((fmt->unpack_count - 1) << 12) |
((fmt->bpp - 1) << 9);
ystride0 = data->dest.plane_pitch[0] |
(data->dest.plane_pitch[1] << 16);
ystride1 = data->dest.plane_pitch[2] |
(data->dest.plane_pitch[3] << 16);
if (drm_rect_height(&data->roi) && drm_rect_width(&data->roi))
outsize = (drm_rect_height(&data->roi) << 16) | drm_rect_width(&data->roi);
else
outsize = (data->dest.height << 16) | data->dest.width;
DPU_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
DPU_REG_WRITE(c, WB_DST_FORMAT, dst_format);
DPU_REG_WRITE(c, WB_DST_OP_MODE, opmode);
DPU_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
DPU_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
DPU_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
DPU_REG_WRITE(c, WB_OUT_SIZE, outsize);
DPU_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
DPU_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
}
static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 image_size, out_size, out_xy;
image_size = (wb->dest.height << 16) | wb->dest.width;
out_xy = 0;
out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi);
DPU_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
DPU_REG_WRITE(c, WB_OUT_XY, out_xy);
DPU_REG_WRITE(c, WB_OUT_SIZE, out_size);
}
static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_qos_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 qos_ctrl = 0;
if (!ctx || !cfg)
return;
DPU_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
DPU_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
/*
* for chipsets not using DPU_WB_QOS_8LVL but still using DPU
* driver such as msm8998, the reset value of WB_CREQ_LUT is
* sufficient for writeback to work. SW doesn't need to explicitly
* program a value.
*/
if (ctx->caps && test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features)) {
DPU_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
DPU_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
}
if (cfg->danger_safe_en)
qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
DPU_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
}
static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
struct dpu_hw_cdp_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 cdp_cntl = 0;
if (!ctx || !cfg)
return;
c = &ctx->hw;
if (cfg->enable)
cdp_cntl |= BIT(0);
if (cfg->ubwc_meta_enable)
cdp_cntl |= BIT(1);
if (cfg->preload_ahead == DPU_WB_CDP_PRELOAD_AHEAD_64)
cdp_cntl |= BIT(3);
DPU_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
}
static void dpu_hw_wb_bind_pingpong_blk(
struct dpu_hw_wb *ctx,
bool enable, const enum dpu_pingpong pp)
{
struct dpu_hw_blk_reg_map *c;
int mux_cfg;
if (!ctx)
return;
c = &ctx->hw;
mux_cfg = DPU_REG_READ(c, WB_MUX);
mux_cfg &= ~0xf;
if (enable)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
DPU_REG_WRITE(c, WB_MUX, mux_cfg);
}
static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
unsigned long features)
{
ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
ops->setup_outformat = dpu_hw_wb_setup_format;
if (test_bit(DPU_WB_XY_ROI_OFFSET, &features))
ops->setup_roi = dpu_hw_wb_roi;
if (test_bit(DPU_WB_QOS, &features))
ops->setup_qos_lut = dpu_hw_wb_setup_qos_lut;
if (test_bit(DPU_WB_CDP, &features))
ops->setup_cdp = dpu_hw_wb_setup_cdp;
if (test_bit(DPU_WB_INPUT_CTRL, &features))
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
}
struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
void __iomem *addr, const struct dpu_mdss_cfg *m)
{
struct dpu_hw_wb *c;
const struct dpu_wb_cfg *cfg;
if (!addr || !m)
return ERR_PTR(-EINVAL);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _wb_offset(idx, m, addr, &c->hw);
if (IS_ERR(cfg)) {
WARN(1, "Unable to find wb idx=%d\n", idx);
kfree(c);
return ERR_PTR(-EINVAL);
}
/* Assign ops */
c->mdp = &m->mdp[0];
c->idx = idx;
c->caps = cfg;
_setup_wb_ops(&c->ops, c->caps->features);
return c;
}
void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
{
kfree(hw_wb);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
*/
#ifndef _DPU_HW_WB_H
#define _DPU_HW_WB_H
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_top.h"
#include "dpu_hw_util.h"
#include "dpu_hw_pingpong.h"
struct dpu_hw_wb;
struct dpu_hw_wb_cfg {
struct dpu_hw_fmt_layout dest;
enum dpu_intf_mode intf_mode;
struct drm_rect roi;
struct drm_rect crop;
};
/**
* enum CDP preload ahead address size
*/
enum {
DPU_WB_CDP_PRELOAD_AHEAD_32,
DPU_WB_CDP_PRELOAD_AHEAD_64
};
/**
* struct dpu_hw_wb_qos_cfg : Writeback pipe QoS configuration
* @danger_lut: LUT for generate danger level based on fill level
* @safe_lut: LUT for generate safe level based on fill level
* @creq_lut: LUT for generate creq level based on fill level
* @danger_safe_en: enable danger safe generation
*/
struct dpu_hw_wb_qos_cfg {
u32 danger_lut;
u32 safe_lut;
u64 creq_lut;
bool danger_safe_en;
};
/**
*
* struct dpu_hw_wb_ops : Interface to the wb hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @setup_outaddress: setup output address from the writeback job
* @setup_outformat: setup output format of writeback block from writeback job
* @setup_qos_lut: setup qos LUT for writeback block based on input
* @setup_cdp: setup chroma down prefetch block for writeback block
* @bind_pingpong_blk: enable/disable the connection with ping-pong block
*/
struct dpu_hw_wb_ops {
void (*setup_outaddress)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *wb);
void (*setup_outformat)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *wb);
void (*setup_roi)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *wb);
void (*setup_qos_lut)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_qos_cfg *cfg);
void (*setup_cdp)(struct dpu_hw_wb *ctx,
struct dpu_hw_cdp_cfg *cfg);
void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
bool enable, const enum dpu_pingpong pp);
};
/**
* struct dpu_hw_wb : WB driver object
* @hw: block hardware details
* @mdp: pointer to associated mdp portion of the catalog
* @idx: hardware index number within type
* @wb_hw_caps: hardware capabilities
* @ops: function pointers
* @hw_mdp: MDP top level hardware block
*/
struct dpu_hw_wb {
struct dpu_hw_blk_reg_map hw;
const struct dpu_mdp_cfg *mdp;
/* wb path */
int idx;
const struct dpu_wb_cfg *caps;
/* ops */
struct dpu_hw_wb_ops ops;
struct dpu_hw_mdp *hw_mdp;
};
/**
* dpu_hw_wb_init(): Initializes and return writeback hw driver object.
* @idx: wb_path index for which driver object is required
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
*/
struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m);
/**
* dpu_hw_wb_destroy(): Destroy writeback hw driver object.
* @hw_wb: Pointer to writeback hw driver object
*/
void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
#endif /*_DPU_HW_WB_H */
This diff is collapsed.
......@@ -65,18 +65,6 @@
#define DPU_NAME_SIZE 12
/*
* struct dpu_irq_callback - IRQ callback handlers
* @list: list to callback
* @func: intr handler
* @arg: argument for the handler
*/
struct dpu_irq_callback {
struct list_head list;
void (*func)(void *arg, int irq_idx);
void *arg;
};
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
......@@ -145,6 +133,7 @@ struct dpu_global_state {
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
};
struct dpu_global_state
......
This diff is collapsed.
This diff is collapsed.
......@@ -26,6 +26,7 @@
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
* @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed
* @rotation: simplified drm rotation hint
*/
struct dpu_plane_state {
struct drm_plane_state base;
......@@ -40,6 +41,7 @@ struct dpu_plane_state {
u64 plane_clk;
bool needs_dirtyfb;
unsigned int rotation;
};
/**
......
This diff is collapsed.
......@@ -19,6 +19,7 @@ struct dpu_global_state;
* @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources
* @hw_intf: array of intf hardware resources
* @hw_wb: array of wb hardware resources
* @dspp_blks: array of dspp hardware resources
*/
struct dpu_rm {
......@@ -26,8 +27,10 @@ struct dpu_rm {
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
};
/**
......@@ -95,5 +98,15 @@ static inline struct dpu_hw_intf *dpu_rm_get_intf(struct dpu_rm *rm, enum dpu_in
return rm->hw_intf[intf_idx - INTF_0];
}
/**
* dpu_rm_get_wb - Return a struct dpu_hw_wb instance given it's index.
* @rm: DPU Resource Manager handle
* @wb_idx: WB index
*/
static inline struct dpu_hw_wb *dpu_rm_get_wb(struct dpu_rm *rm, enum dpu_wb wb_idx)
{
return rm->hw_wb[wb_idx - WB_0];
}
#endif /* __DPU_RM_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -11,6 +11,8 @@
#include "mdp4_kms.h"
#ifdef CONFIG_DRM_MSM_DSI
struct mdp4_dsi_encoder {
struct drm_encoder base;
struct drm_panel *panel;
......@@ -170,3 +172,4 @@ struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
return ERR_PTR(ret);
}
#endif /* CONFIG_DRM_MSM_DSI */
This diff is collapsed.
......@@ -8,6 +8,8 @@
#include "mdp5_kms.h"
#ifdef CONFIG_DRM_MSM_DSI
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
......@@ -198,3 +200,4 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
return 0;
}
#endif /* CONFIG_DRM_MSM_DSI */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment