Commit 12458e35 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2021-02-07' of https://gitlab.freedesktop.org/drm/msm into drm-next

* a6xx speedbin support
* a508, a509, a512 support
* various a5xx fixes
* various dpu fixes
* qseed3lite support for sm8250
* dsi fix for msm8994
* mdp5 fix for framerate bug with cmd mode panels
* a6xx GMU OOB race fixes that were showing up in CI
* various addition and removal of semicolons
* gem submit fix for legacy userspace relocs path
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvh3tvLz_xtk=4x9xUfo2h2s4xkniOvC7HyLO2jrXnXkw@mail.gmail.com
parents f730f39e 182b4a2d
...@@ -2367,6 +2367,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val) ...@@ -2367,6 +2367,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80 #define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81
#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82 #define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87 #define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
......
This diff is collapsed.
...@@ -298,7 +298,7 @@ int a5xx_power_init(struct msm_gpu *gpu) ...@@ -298,7 +298,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
int ret; int ret;
/* Not all A5xx chips have a GPMU */ /* Not all A5xx chips have a GPMU */
if (adreno_is_a510(adreno_gpu)) if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return 0; return 0;
/* Set up the limits management */ /* Set up the limits management */
...@@ -330,7 +330,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -330,7 +330,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
unsigned int *data, *ptr, *cmds; unsigned int *data, *ptr, *cmds;
unsigned int cmds_size; unsigned int cmds_size;
if (adreno_is_a510(adreno_gpu)) if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return; return;
if (a5xx_gpu->gpmu_bo) if (a5xx_gpu->gpmu_bo)
......
...@@ -245,38 +245,67 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) ...@@ -245,38 +245,67 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
return ret; return ret;
} }
struct a6xx_gmu_oob_bits {
int set, ack, set_new, ack_new;
const char *name;
};
/* These are the interrupt / ack bits for each OOB request that are set
* in a6xx_gmu_set_oob and a6xx_clear_oob
*/
static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
[GMU_OOB_GPU_SET] = {
.name = "GPU_SET",
.set = 16,
.ack = 24,
.set_new = 30,
.ack_new = 31,
},
[GMU_OOB_PERFCOUNTER_SET] = {
.name = "PERFCOUNTER",
.set = 17,
.ack = 25,
.set_new = 28,
.ack_new = 30,
},
[GMU_OOB_BOOT_SLUMBER] = {
.name = "BOOT_SLUMBER",
.set = 22,
.ack = 30,
},
[GMU_OOB_DCVS_SET] = {
.name = "GPU_DCVS",
.set = 23,
.ack = 31,
},
};
/* Trigger a OOB (out of band) request to the GMU */ /* Trigger a OOB (out of band) request to the GMU */
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{ {
int ret; int ret;
u32 val; u32 val;
int request, ack; int request, ack;
const char *name;
switch (state) { if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
case GMU_OOB_GPU_SET: return -EINVAL;
if (gmu->legacy) { if (gmu->legacy) {
request = GMU_OOB_GPU_SET_REQUEST; request = a6xx_gmu_oob_bits[state].set;
ack = GMU_OOB_GPU_SET_ACK; ack = a6xx_gmu_oob_bits[state].ack;
} else { } else {
request = GMU_OOB_GPU_SET_REQUEST_NEW; request = a6xx_gmu_oob_bits[state].set_new;
ack = GMU_OOB_GPU_SET_ACK_NEW; ack = a6xx_gmu_oob_bits[state].ack_new;
} if (!request || !ack) {
name = "GPU_SET"; DRM_DEV_ERROR(gmu->dev,
break; "Invalid non-legacy GMU request %s\n",
case GMU_OOB_BOOT_SLUMBER: a6xx_gmu_oob_bits[state].name);
request = GMU_OOB_BOOT_SLUMBER_REQUEST;
ack = GMU_OOB_BOOT_SLUMBER_ACK;
name = "BOOT_SLUMBER";
break;
case GMU_OOB_DCVS_SET:
request = GMU_OOB_DCVS_REQUEST;
ack = GMU_OOB_DCVS_ACK;
name = "GPU_DCVS";
break;
default:
return -EINVAL; return -EINVAL;
} }
}
/* Trigger the equested OOB operation */ /* Trigger the equested OOB operation */
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
...@@ -288,7 +317,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) ...@@ -288,7 +317,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
if (ret) if (ret)
DRM_DEV_ERROR(gmu->dev, DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n", "Timeout waiting for GMU OOB set %s: 0x%x\n",
name, a6xx_gmu_oob_bits[state].name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
/* Clear the acknowledge interrupt */ /* Clear the acknowledge interrupt */
...@@ -300,27 +329,17 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) ...@@ -300,27 +329,17 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */ /* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{ {
if (!gmu->legacy) { int bit;
WARN_ON(state != GMU_OOB_GPU_SET);
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
1 << GMU_OOB_GPU_SET_CLEAR_NEW);
return; return;
}
switch (state) { if (gmu->legacy)
case GMU_OOB_GPU_SET: bit = a6xx_gmu_oob_bits[state].ack;
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, else
1 << GMU_OOB_GPU_SET_CLEAR); bit = a6xx_gmu_oob_bits[state].ack_new;
break;
case GMU_OOB_BOOT_SLUMBER: gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
break;
case GMU_OOB_DCVS_SET:
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
1 << GMU_OOB_DCVS_CLEAR);
break;
}
} }
/* Enable CPU control of SPTP power power collapse */ /* Enable CPU control of SPTP power power collapse */
......
...@@ -153,43 +153,26 @@ static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value) ...@@ -153,43 +153,26 @@ static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
*/ */
enum a6xx_gmu_oob_state { enum a6xx_gmu_oob_state {
GMU_OOB_BOOT_SLUMBER = 0, /*
GMU_OOB_GPU_SET,
GMU_OOB_DCVS_SET,
};
/* These are the interrupt / ack bits for each OOB request that are set
* in a6xx_gmu_set_oob and a6xx_clear_oob
*/
/*
* Let the GMU know that a boot or slumber operation has started. The value in * Let the GMU know that a boot or slumber operation has started. The value in
* REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
* doing * doing
*/ */
#define GMU_OOB_BOOT_SLUMBER_REQUEST 22 GMU_OOB_BOOT_SLUMBER = 0,
#define GMU_OOB_BOOT_SLUMBER_ACK 30 /*
#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
/*
* Set a new power level for the GPU when the CPU is doing frequency scaling
*/
#define GMU_OOB_DCVS_REQUEST 23
#define GMU_OOB_DCVS_ACK 31
#define GMU_OOB_DCVS_CLEAR 31
/*
* Let the GMU know to not turn off any GPU registers while the CPU is in a * Let the GMU know to not turn off any GPU registers while the CPU is in a
* critical section * critical section
*/ */
#define GMU_OOB_GPU_SET_REQUEST 16 GMU_OOB_GPU_SET,
#define GMU_OOB_GPU_SET_ACK 24 /*
#define GMU_OOB_GPU_SET_CLEAR 24 * Set a new power level for the GPU when the CPU is doing frequency scaling
*/
#define GMU_OOB_GPU_SET_REQUEST_NEW 30 GMU_OOB_DCVS_SET,
#define GMU_OOB_GPU_SET_ACK_NEW 31 /*
#define GMU_OOB_GPU_SET_CLEAR_NEW 31 * Used to keep the GPU on for CPU-side reads of performance counters.
*/
GMU_OOB_PERFCOUNTER_SET,
};
void a6xx_hfi_init(struct a6xx_gmu *gmu); void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state); int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/devfreq.h> #include <linux/devfreq.h>
#include <linux/nvmem-consumer.h>
#include <linux/soc/qcom/llcc-qcom.h> #include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13 #define GPU_PAS_ID 13
...@@ -1117,7 +1118,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev, ...@@ -1117,7 +1118,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice)) if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
} }
...@@ -1169,14 +1170,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) ...@@ -1169,14 +1170,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
static DEFINE_MUTEX(perfcounter_oob);
mutex_lock(&perfcounter_oob);
/* Force the GPU power on so we can read this register */ /* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO, *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
REG_A6XX_RBBM_PERFCTR_CP_0_HI); REG_A6XX_RBBM_PERFCTR_CP_0_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
mutex_unlock(&perfcounter_oob);
return 0; return 0;
} }
...@@ -1208,6 +1213,10 @@ static void a6xx_destroy(struct msm_gpu *gpu) ...@@ -1208,6 +1213,10 @@ static void a6xx_destroy(struct msm_gpu *gpu)
a6xx_gmu_remove(a6xx_gpu); a6xx_gmu_remove(a6xx_gpu);
adreno_gpu_cleanup(adreno_gpu); adreno_gpu_cleanup(adreno_gpu);
if (a6xx_gpu->opp_table)
dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table);
kfree(a6xx_gpu); kfree(a6xx_gpu);
} }
...@@ -1239,6 +1248,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) ...@@ -1239,6 +1248,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time; return (unsigned long)busy_time;
} }
static struct msm_gem_address_space *
a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
u64 start, size;
iommu = iommu_domain_alloc(&platform_bus_type);
if (!iommu)
return NULL;
/*
* This allows GPU to set the bus attributes required to use system
* cache on behalf of the iommu page table walker.
*/
if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
adreno_set_llc_attributes(iommu);
mmu = msm_iommu_new(&pdev->dev, iommu);
if (IS_ERR(mmu)) {
iommu_domain_free(iommu);
return ERR_CAST(mmu);
}
/*
* Use the aperture start or SZ_16M, whichever is greater. This will
* ensure that we align with the allocated pagetable range while still
* allowing room in the lower 32 bits for GMEM and whatnot
*/
start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
size = iommu->geometry.aperture_end - start + 1;
aspace = msm_gem_address_space_create(mmu, "gpu",
start & GENMASK_ULL(48, 0), size);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
return aspace;
}
static struct msm_gem_address_space * static struct msm_gem_address_space *
a6xx_create_private_address_space(struct msm_gpu *gpu) a6xx_create_private_address_space(struct msm_gpu *gpu)
{ {
...@@ -1264,6 +1317,78 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) ...@@ -1264,6 +1317,78 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
} }
static u32 a618_get_speed_bin(u32 fuse)
{
if (fuse == 0)
return 0;
else if (fuse == 169)
return 1;
else if (fuse == 174)
return 2;
return UINT_MAX;
}
static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
{
u32 val = UINT_MAX;
if (revn == 618)
val = a618_get_speed_bin(fuse);
if (val == UINT_MAX) {
DRM_DEV_ERROR(dev,
"missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
fuse);
return UINT_MAX;
}
return (1 << val);
}
static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
u32 revn)
{
struct opp_table *opp_table;
struct nvmem_cell *cell;
u32 supp_hw = UINT_MAX;
void *buf;
cell = nvmem_cell_get(dev, "speed_bin");
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
*/
if (PTR_ERR(cell) == -ENOENT)
return 0;
else if (IS_ERR(cell)) {
DRM_DEV_ERROR(dev,
"failed to read speed-bin. Some OPPs may not be supported by hardware");
goto done;
}
buf = nvmem_cell_read(cell, NULL);
if (IS_ERR(buf)) {
nvmem_cell_put(cell);
DRM_DEV_ERROR(dev,
"failed to read speed-bin. Some OPPs may not be supported by hardware");
goto done;
}
supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
kfree(buf);
nvmem_cell_put(cell);
done:
opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
a6xx_gpu->opp_table = opp_table;
return 0;
}
static const struct adreno_gpu_funcs funcs = { static const struct adreno_gpu_funcs funcs = {
.base = { .base = {
.get_param = adreno_get_param, .get_param = adreno_get_param,
...@@ -1285,7 +1410,7 @@ static const struct adreno_gpu_funcs funcs = { ...@@ -1285,7 +1410,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a6xx_gpu_state_get, .gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put, .gpu_state_put = a6xx_gpu_state_put,
#endif #endif
.create_address_space = adreno_iommu_create_address_space, .create_address_space = a6xx_create_address_space,
.create_private_address_space = a6xx_create_private_address_space, .create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr, .get_rptr = a6xx_get_rptr,
}, },
...@@ -1325,6 +1450,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) ...@@ -1325,6 +1450,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
a6xx_llc_slices_init(pdev, a6xx_gpu); a6xx_llc_slices_init(pdev, a6xx_gpu);
ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
}
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) { if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base)); a6xx_destroy(&(a6xx_gpu->base.base));
......
...@@ -33,6 +33,8 @@ struct a6xx_gpu { ...@@ -33,6 +33,8 @@ struct a6xx_gpu {
void *llc_slice; void *llc_slice;
void *htw_llc_slice; void *htw_llc_slice;
bool have_mmu500; bool have_mmu500;
struct opp_table *opp_table;
}; };
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
......
...@@ -133,6 +133,41 @@ static const struct adreno_info gpulist[] = { ...@@ -133,6 +133,41 @@ static const struct adreno_info gpulist[] = {
.gmem = (SZ_1M + SZ_512K), .gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD, .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init, .init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 0, 8, ANY_ID),
.revn = 508,
.name = "A508",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_128K + SZ_8K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a508_zap.mdt",
}, {
.rev = ADRENO_REV(5, 0, 9, ANY_ID),
.revn = 509,
.name = "A509",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_256K + SZ_16K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
/* Adreno 509 uses the same ZAP as 512 */
.zapfw = "a512_zap.mdt",
}, { }, {
.rev = ADRENO_REV(5, 1, 0, ANY_ID), .rev = ADRENO_REV(5, 1, 0, ANY_ID),
.revn = 510, .revn = 510,
...@@ -148,6 +183,23 @@ static const struct adreno_info gpulist[] = { ...@@ -148,6 +183,23 @@ static const struct adreno_info gpulist[] = {
*/ */
.inactive_period = 250, .inactive_period = 250,
.init = a5xx_gpu_init, .init = a5xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 1, 2, ANY_ID),
.revn = 512,
.name = "A512",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_256K + SZ_16K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a512_zap.mdt",
}, { }, {
.rev = ADRENO_REV(5, 3, 0, 2), .rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530, .revn = 530,
...@@ -168,7 +220,7 @@ static const struct adreno_info gpulist[] = { ...@@ -168,7 +220,7 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init, .init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt", .zapfw = "a530_zap.mdt",
}, { }, {
.rev = ADRENO_REV(5, 4, 0, 2), .rev = ADRENO_REV(5, 4, 0, ANY_ID),
.revn = 540, .revn = 540,
.name = "A540", .name = "A540",
.fw = { .fw = {
......
...@@ -186,11 +186,18 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) ...@@ -186,11 +186,18 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
} }
void adreno_set_llc_attributes(struct iommu_domain *iommu)
{
struct io_pgtable_domain_attr pgtbl_cfg;
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
}
struct msm_gem_address_space * struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu, adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev) struct platform_device *pdev)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct iommu_domain *iommu; struct iommu_domain *iommu;
struct msm_mmu *mmu; struct msm_mmu *mmu;
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
...@@ -200,20 +207,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, ...@@ -200,20 +207,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu) if (!iommu)
return NULL; return NULL;
if (adreno_is_a6xx(adreno_gpu)) {
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct io_pgtable_domain_attr pgtbl_cfg;
/*
* This allows GPU to set the bus attributes required to use system
* cache on behalf of the iommu page table walker.
*/
if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
}
}
mmu = msm_iommu_new(&pdev->dev, iommu); mmu = msm_iommu_new(&pdev->dev, iommu);
if (IS_ERR(mmu)) { if (IS_ERR(mmu)) {
iommu_domain_free(iommu); iommu_domain_free(iommu);
......
...@@ -197,11 +197,26 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu) ...@@ -197,11 +197,26 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430; return gpu->revn == 430;
} }
static inline int adreno_is_a508(struct adreno_gpu *gpu)
{
return gpu->revn == 508;
}
static inline int adreno_is_a509(struct adreno_gpu *gpu)
{
return gpu->revn == 509;
}
static inline int adreno_is_a510(struct adreno_gpu *gpu) static inline int adreno_is_a510(struct adreno_gpu *gpu)
{ {
return gpu->revn == 510; return gpu->revn == 510;
} }
static inline int adreno_is_a512(struct adreno_gpu *gpu)
{
return gpu->revn == 512;
}
static inline int adreno_is_a530(struct adreno_gpu *gpu) static inline int adreno_is_a530(struct adreno_gpu *gpu)
{ {
return gpu->revn == 530; return gpu->revn == 530;
...@@ -212,11 +227,6 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu) ...@@ -212,11 +227,6 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540; return gpu->revn == 540;
} }
static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
{
return ((gpu->revn < 700 && gpu->revn > 599));
}
static inline int adreno_is_a618(struct adreno_gpu *gpu) static inline int adreno_is_a618(struct adreno_gpu *gpu)
{ {
return gpu->revn == 618; return gpu->revn == 618;
...@@ -278,6 +288,8 @@ struct msm_gem_address_space * ...@@ -278,6 +288,8 @@ struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu, adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev); struct platform_device *pdev);
void adreno_set_llc_attributes(struct iommu_domain *iommu);
/* /*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode * out of secure mode
......
...@@ -4,8 +4,10 @@ ...@@ -4,8 +4,10 @@
*/ */
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/delay.h>
#include "dpu_encoder_phys.h" #include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h" #include "dpu_hw_interrupts.h"
#include "dpu_hw_pingpong.h"
#include "dpu_core_irq.h" #include "dpu_core_irq.h"
#include "dpu_formats.h" #include "dpu_formats.h"
#include "dpu_trace.h" #include "dpu_trace.h"
...@@ -35,6 +37,8 @@ ...@@ -35,6 +37,8 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000 #define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc) static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{ {
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false; return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
...@@ -368,15 +372,12 @@ static void dpu_encoder_phys_cmd_tearcheck_config( ...@@ -368,15 +372,12 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
tc_cfg.vsync_count = vsync_hz / tc_cfg.vsync_count = vsync_hz /
(mode->vtotal * drm_mode_vrefresh(mode)); (mode->vtotal * drm_mode_vrefresh(mode));
/* enable external TE after kickoff to avoid premature autorefresh */
tc_cfg.hw_vsync_mode = 0;
/* /*
* By setting sync_cfg_height to near max register value, we essentially * Set the sync_cfg_height to twice vtotal so that if we lose a
* disable dpu hw generated TE signal, since hw TE will arrive first. * TE event coming from the display TE pin we won't stall immediately
* Only caveat is if due to error, we hit wrap-around.
*/ */
tc_cfg.sync_cfg_height = 0xFFF0; tc_cfg.hw_vsync_mode = 1;
tc_cfg.sync_cfg_height = mode->vtotal * 2;
tc_cfg.vsync_init_val = mode->vdisplay; tc_cfg.vsync_init_val = mode->vdisplay;
tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START; tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE; tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
...@@ -580,6 +581,69 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff( ...@@ -580,6 +581,69 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
atomic_read(&phys_enc->pending_kickoff_cnt)); atomic_read(&phys_enc->pending_kickoff_cnt));
} }
static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_pp_vsync_info info;
if (!phys_enc)
return false;
phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
if (info.wr_ptr_line_count > 0 &&
info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
return true;
return false;
}
static void dpu_encoder_phys_cmd_prepare_commit(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
int trial = 0;
if (!phys_enc)
return;
if (!phys_enc->hw_pp)
return;
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return;
/* If autorefresh is already disabled, we have nothing to do */
if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
return;
/*
* If autorefresh is enabled, disable it and make sure it is safe to
* proceed with current frame commit/push. Sequence fallowed is,
* 1. Disable TE
* 2. Disable autorefresh config
* 4. Poll for frame transfer ongoing to be false
* 5. Enable TE back
*/
_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
do {
udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
DPU_ERROR_CMDENC(cmd_enc,
"disable autorefresh failed\n");
break;
}
trial++;
} while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
"disabled autorefresh\n");
}
static int _dpu_encoder_phys_cmd_wait_for_ctl_start( static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
...@@ -621,20 +685,15 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete( ...@@ -621,20 +685,15 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
static int dpu_encoder_phys_cmd_wait_for_commit_done( static int dpu_encoder_phys_cmd_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc; struct dpu_encoder_phys_cmd *cmd_enc;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */ /* only required for master controller */
if (dpu_encoder_phys_cmd_is_master(phys_enc)) if (!dpu_encoder_phys_cmd_is_master(phys_enc))
rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); return 0;
/* required for both controllers */
if (!rc && cmd_enc->serialize_wait4pp)
dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
return rc; return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
} }
static int dpu_encoder_phys_cmd_wait_for_vblank( static int dpu_encoder_phys_cmd_wait_for_vblank(
...@@ -681,6 +740,7 @@ static void dpu_encoder_phys_cmd_trigger_start( ...@@ -681,6 +740,7 @@ static void dpu_encoder_phys_cmd_trigger_start(
static void dpu_encoder_phys_cmd_init_ops( static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops) struct dpu_encoder_phys_ops *ops)
{ {
ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit;
ops->is_master = dpu_encoder_phys_cmd_is_master; ops->is_master = dpu_encoder_phys_cmd_is_master;
ops->mode_set = dpu_encoder_phys_cmd_mode_set; ops->mode_set = dpu_encoder_phys_cmd_mode_set;
ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup; ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
......
...@@ -12,14 +12,17 @@ ...@@ -12,14 +12,17 @@
#define VIG_MASK \ #define VIG_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT)) BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SDM845_MASK \ #define VIG_SDM845_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
#define VIG_SC7180_MASK \ #define VIG_SC7180_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4)) (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
#define VIG_SM8250_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
#define DMA_SDM845_MASK \ #define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
...@@ -185,7 +188,7 @@ static const struct dpu_caps sm8150_dpu_caps = { ...@@ -185,7 +188,7 @@ static const struct dpu_caps sm8150_dpu_caps = {
static const struct dpu_caps sm8250_dpu_caps = { static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb, .max_mixer_blendstages = 0xb,
.qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */ .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40, .ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true, .has_src_split = true,
...@@ -444,6 +447,34 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { ...@@ -444,6 +447,34 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
}; };
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
static const struct dpu_sspp_cfg sm8250_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
/************************************************************* /*************************************************************
* MIXER sub blocks config * MIXER sub blocks config
*************************************************************/ *************************************************************/
...@@ -532,23 +563,28 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = { ...@@ -532,23 +563,28 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
.len = 0x90, .version = 0x40000}, .len = 0x90, .version = 0x40000},
}; };
#define DSPP_BLK(_name, _id, _base, _sblk) \ #define DSPP_BLK(_name, _id, _base, _mask, _sblk) \
{\ {\
.name = _name, .id = _id, \ .name = _name, .id = _id, \
.base = _base, .len = 0x1800, \ .base = _base, .len = 0x1800, \
.features = DSPP_SC7180_MASK, \ .features = _mask, \
.sblk = _sblk \ .sblk = _sblk \
} }
static const struct dpu_dspp_cfg sc7180_dspp[] = { static const struct dpu_dspp_cfg sc7180_dspp[] = {
DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk), DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
&sc7180_dspp_sblk),
}; };
static const struct dpu_dspp_cfg sm8150_dspp[] = { static const struct dpu_dspp_cfg sm8150_dspp[] = {
DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk), DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk), &sm8150_dspp_sblk),
DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk), DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk), &sm8150_dspp_sblk),
DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
&sm8150_dspp_sblk),
DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
&sm8150_dspp_sblk),
}; };
/************************************************************* /*************************************************************
...@@ -624,33 +660,33 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = { ...@@ -624,33 +660,33 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
/************************************************************* /*************************************************************
* INTF sub blocks config * INTF sub blocks config
*************************************************************/ *************************************************************/
#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \ #define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \
{\ {\
.name = _name, .id = _id, \ .name = _name, .id = _id, \
.base = _base, .len = 0x280, \ .base = _base, .len = 0x280, \
.features = _features, \ .features = _features, \
.type = _type, \ .type = _type, \
.controller_id = _ctrl_id, \ .controller_id = _ctrl_id, \
.prog_fetch_lines_worst_case = 24 \ .prog_fetch_lines_worst_case = _progfetch \
} }
static const struct dpu_intf_cfg sdm845_intf[] = { static const struct dpu_intf_cfg sdm845_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK), INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK), INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK),
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK), INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK),
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK), INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK),
}; };
static const struct dpu_intf_cfg sc7180_intf[] = { static const struct dpu_intf_cfg sc7180_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK), INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK), INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
}; };
static const struct dpu_intf_cfg sm8150_intf[] = { static const struct dpu_intf_cfg sm8150_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK), INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK), INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK), INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK),
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK), INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK),
}; };
/************************************************************* /*************************************************************
...@@ -969,9 +1005,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg) ...@@ -969,9 +1005,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.mdp = sm8250_mdp, .mdp = sm8250_mdp,
.ctl_count = ARRAY_SIZE(sm8150_ctl), .ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl, .ctl = sm8150_ctl,
/* TODO: sspp qseed version differs from 845 */ .sspp_count = ARRAY_SIZE(sm8250_sspp),
.sspp_count = ARRAY_SIZE(sdm845_sspp), .sspp = sm8250_sspp,
.sspp = sdm845_sspp,
.mixer_count = ARRAY_SIZE(sm8150_lm), .mixer_count = ARRAY_SIZE(sm8150_lm),
.mixer = sm8150_lm, .mixer = sm8150_lm,
.dspp_count = ARRAY_SIZE(sm8150_dspp), .dspp_count = ARRAY_SIZE(sm8150_dspp),
......
...@@ -95,6 +95,7 @@ enum { ...@@ -95,6 +95,7 @@ enum {
* @DPU_SSPP_SRC Src and fetch part of the pipes, * @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
* @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
* @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion * @DPU_SSPP_CSC, Support of Color space converion
...@@ -114,6 +115,7 @@ enum { ...@@ -114,6 +115,7 @@ enum {
DPU_SSPP_SRC = 0x1, DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2, DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3, DPU_SSPP_SCALER_QSEED3,
DPU_SSPP_SCALER_QSEED3LITE,
DPU_SSPP_SCALER_QSEED4, DPU_SSPP_SCALER_QSEED4,
DPU_SSPP_SCALER_RGB, DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC, DPU_SSPP_CSC,
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define PP_WR_PTR_IRQ 0x024 #define PP_WR_PTR_IRQ 0x024
#define PP_OUT_LINE_COUNT 0x028 #define PP_OUT_LINE_COUNT 0x028
#define PP_LINE_COUNT 0x02C #define PP_LINE_COUNT 0x02C
#define PP_AUTOREFRESH_CONFIG 0x030
#define PP_FBC_MODE 0x034 #define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038 #define PP_FBC_BUDGET_CTL 0x038
...@@ -120,6 +121,29 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp, ...@@ -120,6 +121,29 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
return 0; return 0;
} }
static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp,
u32 frame_count, bool enable)
{
DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG,
enable ? (BIT(31) | frame_count) : 0);
}
/*
* dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW
* @pp: DPU pingpong structure
* @frame_count: Used to return the current frame count from hw
*
* Returns: True if autorefresh enabled, false if disabled.
*/
static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
u32 *frame_count)
{
u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG);
if (frame_count != NULL)
*frame_count = val & 0xffff;
return !!((val & BIT(31)) >> 31);
}
static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp, static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
u32 timeout_us) u32 timeout_us)
{ {
...@@ -228,6 +252,8 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c, ...@@ -228,6 +252,8 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.enable_tearcheck = dpu_hw_pp_enable_te; c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
c->ops.connect_external_te = dpu_hw_pp_connect_external_te; c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info; c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config;
c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr; c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
c->ops.get_line_count = dpu_hw_pp_get_line_count; c->ops.get_line_count = dpu_hw_pp_get_line_count;
......
...@@ -63,6 +63,8 @@ struct dpu_hw_dither_cfg { ...@@ -63,6 +63,8 @@ struct dpu_hw_dither_cfg {
* @setup_tearcheck : program tear check values * @setup_tearcheck : program tear check values
* @enable_tearcheck : enables tear check * @enable_tearcheck : enables tear check
* @get_vsync_info : retries timing info of the panel * @get_vsync_info : retries timing info of the panel
* @setup_autorefresh : configure and enable the autorefresh config
* @get_autorefresh : retrieve autorefresh config from hardware
* @setup_dither : function to program the dither hw block * @setup_dither : function to program the dither hw block
* @get_line_count: obtain current vertical line counter * @get_line_count: obtain current vertical line counter
*/ */
...@@ -94,6 +96,18 @@ struct dpu_hw_pingpong_ops { ...@@ -94,6 +96,18 @@ struct dpu_hw_pingpong_ops {
int (*get_vsync_info)(struct dpu_hw_pingpong *pp, int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
struct dpu_hw_pp_vsync_info *info); struct dpu_hw_pp_vsync_info *info);
/**
* configure and enable the autorefresh config
*/
void (*setup_autorefresh)(struct dpu_hw_pingpong *pp,
u32 frame_count, bool enable);
/**
* retrieve autorefresh config from hardware
*/
bool (*get_autorefresh)(struct dpu_hw_pingpong *pp,
u32 *frame_count);
/** /**
* poll until write pointer transmission starts * poll until write pointer transmission starts
* @Return: 0 on success, -ETIMEDOUT on timeout * @Return: 0 on success, -ETIMEDOUT on timeout
......
...@@ -673,6 +673,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c, ...@@ -673,6 +673,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) || if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
test_bit(DPU_SSPP_SCALER_QSEED4, &features)) { test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
......
...@@ -28,6 +28,7 @@ struct dpu_hw_pipe; ...@@ -28,6 +28,7 @@ struct dpu_hw_pipe;
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \ #define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \ (1UL << DPU_SSPP_SCALER_QSEED2) | \
(1UL << DPU_SSPP_SCALER_QSEED3) | \ (1UL << DPU_SSPP_SCALER_QSEED3) | \
(1UL << DPU_SSPP_SCALER_QSEED3LITE) | \
(1UL << DPU_SSPP_SCALER_QSEED4)) (1UL << DPU_SSPP_SCALER_QSEED4))
/** /**
......
...@@ -59,6 +59,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE; ...@@ -59,6 +59,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
#define QSEED3_SEP_LUT_SIZE \ #define QSEED3_SEP_LUT_SIZE \
(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32)) (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
/* DPU_SCALER_QSEED3LITE */
#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4
#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5
#define QSEED3LITE_COEF_LUT_CTRL 0x4C
#define QSEED3LITE_COEF_LUT_SWAP_BIT 0
#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60
#define QSEED3LITE_FILTERS 2
#define QSEED3LITE_SEPARABLE_LUTS 10
#define QSEED3LITE_LUT_SIZE 33
#define QSEED3LITE_SEP_LUT_SIZE \
(QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
void dpu_reg_write(struct dpu_hw_blk_reg_map *c, void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
u32 reg_off, u32 reg_off,
u32 val, u32 val,
...@@ -156,6 +169,57 @@ static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c, ...@@ -156,6 +169,57 @@ static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
} }
static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
{
int j, filter;
int config_lut = 0x0;
unsigned long lut_flags;
u32 lut_addr, lut_offset;
u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 };
DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight);
if (!scaler3_cfg->sep_lut)
return;
lut_flags = (unsigned long) scaler3_cfg->lut_flag;
if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
(scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
lut[0] = scaler3_cfg->sep_lut +
scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
(scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
(scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
lut[1] = scaler3_cfg->sep_lut +
scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE;
config_lut = 1;
}
if (config_lut) {
for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
if (!lut[filter])
continue;
lut_offset = 0;
lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter];
for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) {
DPU_REG_WRITE(c,
lut_addr,
(lut[filter])[lut_offset++]);
lut_addr += 4;
}
}
}
if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
}
static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c, static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset) struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
{ {
...@@ -242,9 +306,12 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, ...@@ -242,9 +306,12 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
op_mode |= BIT(8); op_mode |= BIT(8);
} }
if (scaler3_cfg->lut_flag) if (scaler3_cfg->lut_flag) {
_dpu_hw_setup_scaler3_lut(c, scaler3_cfg, if (scaler_version < 0x2004)
scaler_offset); _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset);
else
_dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset);
}
if (scaler_version == 0x1002) { if (scaler_version == 0x1002) {
phase_init = phase_init =
......
...@@ -97,6 +97,7 @@ struct dpu_hw_scaler3_de_cfg { ...@@ -97,6 +97,7 @@ struct dpu_hw_scaler3_de_cfg {
* @ cir_lut: pointer to circular filter LUT * @ cir_lut: pointer to circular filter LUT
* @ sep_lut: pointer to separable filter LUT * @ sep_lut: pointer to separable filter LUT
* @ de: detail enhancer configuration * @ de: detail enhancer configuration
* @ dir_weight: Directional weight
*/ */
struct dpu_hw_scaler3_cfg { struct dpu_hw_scaler3_cfg {
u32 enable; u32 enable;
...@@ -137,6 +138,8 @@ struct dpu_hw_scaler3_cfg { ...@@ -137,6 +138,8 @@ struct dpu_hw_scaler3_cfg {
* Detail enhancer settings * Detail enhancer settings
*/ */
struct dpu_hw_scaler3_de_cfg de; struct dpu_hw_scaler3_de_cfg de;
u32 dir_weight;
}; };
/** /**
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#define VBIF_XIN_HALT_CTRL0 0x0200 #define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204 #define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550 #define VBIF_XINL_QOS_RP_REMAP_000 0x0550
#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590 #define VBIF_XINL_QOS_LVL_REMAP_000(v) (v < DPU_HW_VER_400 ? 0x570 : 0x0590)
static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif, static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
u32 *pnd_errors, u32 *src_errors) u32 *pnd_errors, u32 *src_errors)
...@@ -156,18 +156,19 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif, ...@@ -156,18 +156,19 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level) u32 xin_id, u32 level, u32 remap_level)
{ {
struct dpu_hw_blk_reg_map *c; struct dpu_hw_blk_reg_map *c;
u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift; u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
if (!vbif) if (!vbif)
return; return;
c = &vbif->hw; c = &vbif->hw;
reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(c->hwversion);
reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8); reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
reg_shift = (xin_id & 0x7) * 4; reg_shift = (xin_id & 0x7) * 4;
reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high); reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high); reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
mask = 0x7 << reg_shift; mask = 0x7 << reg_shift;
...@@ -178,7 +179,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif, ...@@ -178,7 +179,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
reg_val_lvl |= (remap_level << reg_shift) & mask; reg_val_lvl |= (remap_level << reg_shift) & mask;
DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val); DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl); DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
} }
static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id) static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
......
...@@ -749,7 +749,7 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, ...@@ -749,7 +749,7 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
case DRM_MODE_ENCODER_TMDS: case DRM_MODE_ENCODER_TMDS:
info.num_of_h_tiles = 1; info.num_of_h_tiles = 1;
break; break;
}; }
rc = dpu_encoder_setup(encoder->dev, encoder, &info); rc = dpu_encoder_setup(encoder->dev, encoder, &info);
if (rc) if (rc)
......
...@@ -1465,6 +1465,7 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane) ...@@ -1465,6 +1465,7 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
pdpu->debugfs_root, &pdpu->debugfs_src); pdpu->debugfs_root, &pdpu->debugfs_src);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) { cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler, dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
......
...@@ -177,7 +177,7 @@ static const struct mdp5_cfg_hw msm8x74v2_config = { ...@@ -177,7 +177,7 @@ static const struct mdp5_cfg_hw msm8x74v2_config = {
[3] = INTF_HDMI, [3] = INTF_HDMI,
}, },
}, },
.max_clk = 200000000, .max_clk = 320000000,
}; };
static const struct mdp5_cfg_hw apq8084_config = { static const struct mdp5_cfg_hw apq8084_config = {
......
...@@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) ...@@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
pp_done); pp_done);
complete(&mdp5_crtc->pp_completion); complete_all(&mdp5_crtc->pp_completion);
} }
static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
......
...@@ -336,7 +336,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ...@@ -336,7 +336,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ssize_t ret; ssize_t ret;
int const aux_cmd_native_max = 16; int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128; int const aux_cmd_i2c_max = 128;
int const retry_count = 5;
struct dp_aux_private *aux = container_of(dp_aux, struct dp_aux_private *aux = container_of(dp_aux,
struct dp_aux_private, dp_aux); struct dp_aux_private, dp_aux);
...@@ -378,12 +377,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ...@@ -378,12 +377,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ret = dp_aux_cmd_fifo_tx(aux, msg); ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) { if (ret < 0) {
if (aux->native) {
aux->retry_cnt++;
if (!(aux->retry_cnt % retry_count))
dp_catalog_aux_update_cfg(aux->catalog);
dp_catalog_aux_reset(aux->catalog);
}
usleep_range(400, 500); /* at least 400us to next try */ usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit; goto unlock_exit;
} }
......
...@@ -190,6 +190,18 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) ...@@ -190,6 +190,18 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
return 0; return 0;
} }
/**
* dp_catalog_aux_reset() - reset AUX controller
*
* @aux: DP catalog structure
*
* return: void
*
* This function reset AUX controller
*
* NOTE: reset AUX controller will also clear any pending HPD related interrupts
*
*/
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
{ {
u32 aux_ctrl; u32 aux_ctrl;
...@@ -483,6 +495,18 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, ...@@ -483,6 +495,18 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
return 0; return 0;
} }
/**
* dp_catalog_ctrl_reset() - reset DP controller
*
* @dp_catalog: DP catalog structure
*
* return: void
*
* This function reset the DP controller
*
* NOTE: reset DP controller will also clear any pending HPD related interrupts
*
*/
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
{ {
u32 sw_reset; u32 sw_reset;
......
...@@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in, ...@@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
tu = kzalloc(sizeof(*tu), GFP_KERNEL); tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (!tu) if (!tu)
return return;
dp_panel_update_tu_timings(in, tu); dp_panel_update_tu_timings(in, tu);
...@@ -1158,7 +1158,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) ...@@ -1158,7 +1158,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
}; }
if (!ret) if (!ret)
DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate); DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
...@@ -1296,7 +1296,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, ...@@ -1296,7 +1296,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
* transitioned to PUSH_IDLE. In order to start transmitting * transitioned to PUSH_IDLE. In order to start transmitting
* a link training pattern, we have to first do soft reset. * a link training pattern, we have to first do soft reset.
*/ */
dp_catalog_ctrl_reset(ctrl->catalog);
ret = dp_ctrl_link_train(ctrl, cr, training_step); ret = dp_ctrl_link_train(ctrl, cr, training_step);
...@@ -1365,7 +1364,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl) ...@@ -1365,7 +1364,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
return ret; return ret;
} }
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
{ {
struct dp_ctrl_private *ctrl; struct dp_ctrl_private *ctrl;
struct dp_io *dp_io; struct dp_io *dp_io;
...@@ -1382,6 +1381,9 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) ...@@ -1382,6 +1381,9 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
ctrl->dp_ctrl.orientation = flip; ctrl->dp_ctrl.orientation = flip;
if (reset)
dp_catalog_ctrl_reset(ctrl->catalog);
dp_catalog_ctrl_phy_reset(ctrl->catalog); dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy); phy_init(phy);
dp_catalog_ctrl_enable_irq(ctrl->catalog, true); dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
...@@ -1496,7 +1498,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) ...@@ -1496,7 +1498,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
int training_step = DP_TRAINING_NONE; int training_step = DP_TRAINING_NONE;
dp_ctrl_push_idle(&ctrl->dp_ctrl); dp_ctrl_push_idle(&ctrl->dp_ctrl);
dp_catalog_ctrl_reset(ctrl->catalog);
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
...@@ -1785,14 +1786,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) ...@@ -1785,14 +1786,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
* Set up transfer unit values and set controller state to send * Set up transfer unit values and set controller state to send
* video. * video.
*/ */
reinit_completion(&ctrl->video_comp);
dp_ctrl_configure_source_params(ctrl); dp_ctrl_configure_source_params(ctrl);
dp_catalog_ctrl_config_msa(ctrl->catalog, dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate, ctrl->link->link_params.rate,
ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl)); ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
reinit_completion(&ctrl->video_comp);
dp_ctrl_setup_tr_unit(ctrl); dp_ctrl_setup_tr_unit(ctrl);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
......
...@@ -19,7 +19,7 @@ struct dp_ctrl { ...@@ -19,7 +19,7 @@ struct dp_ctrl {
u32 pixel_rate; u32 pixel_rate;
}; };
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip); int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl); void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
......
...@@ -350,7 +350,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) ...@@ -350,7 +350,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
return rc; return rc;
} }
static void dp_display_host_init(struct dp_display_private *dp) static void dp_display_host_init(struct dp_display_private *dp, int reset)
{ {
bool flip = false; bool flip = false;
...@@ -365,7 +365,7 @@ static void dp_display_host_init(struct dp_display_private *dp) ...@@ -365,7 +365,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
dp_display_set_encoder_mode(dp); dp_display_set_encoder_mode(dp);
dp_power_init(dp->power, flip); dp_power_init(dp->power, flip);
dp_ctrl_host_init(dp->ctrl, flip); dp_ctrl_host_init(dp->ctrl, flip, reset);
dp_aux_init(dp->aux); dp_aux_init(dp->aux);
dp->core_initialized = true; dp->core_initialized = true;
} }
...@@ -403,7 +403,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev) ...@@ -403,7 +403,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
goto end; goto end;
} }
dp_display_host_init(dp); dp_display_host_init(dp, false);
/* /*
* set sink to normal operation mode -- D0 * set sink to normal operation mode -- D0
...@@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) ...@@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
/* signal the disconnect event early to ensure proper teardown */ /* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(g_dp_display, false);
reinit_completion(&dp->audio_comp); reinit_completion(&dp->audio_comp);
dp_display_handle_plugged_change(g_dp_display, false);
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK | dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
DP_DP_IRQ_HPD_INT_MASK, true); DP_DP_IRQ_HPD_INT_MASK, true);
...@@ -700,6 +700,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) ...@@ -700,6 +700,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0; return 0;
} }
if (state == ST_CONNECT_PENDING || state == ST_DISCONNECT_PENDING) {
/* wait until ST_CONNECTED */
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
return 0;
}
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev); ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */ if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false; dp->core_initialized = false;
...@@ -890,6 +897,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) ...@@ -890,6 +897,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
/* wait only if audio was enabled */ /* wait only if audio was enabled */
if (dp_display->audio_enabled) { if (dp_display->audio_enabled) {
/* signal the disconnect event */
reinit_completion(&dp->audio_comp);
dp_display_handle_plugged_change(dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp, if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5)) HZ * 5))
DRM_ERROR("audio comp timeout\n"); DRM_ERROR("audio comp timeout\n");
...@@ -1002,7 +1012,7 @@ int dp_display_get_test_bpp(struct msm_dp *dp) ...@@ -1002,7 +1012,7 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
static void dp_display_config_hpd(struct dp_display_private *dp) static void dp_display_config_hpd(struct dp_display_private *dp)
{ {
dp_display_host_init(dp); dp_display_host_init(dp, true);
dp_catalog_ctrl_hpd_config(dp->catalog); dp_catalog_ctrl_hpd_config(dp->catalog);
/* Enable interrupt first time /* Enable interrupt first time
...@@ -1256,7 +1266,7 @@ static int dp_pm_resume(struct device *dev) ...@@ -1256,7 +1266,7 @@ static int dp_pm_resume(struct device *dev)
dp->hpd_state = ST_DISCONNECTED; dp->hpd_state = ST_DISCONNECTED;
/* turn on dp ctrl/phy */ /* turn on dp ctrl/phy */
dp_display_host_init(dp); dp_display_host_init(dp, true);
dp_catalog_ctrl_hpd_config(dp->catalog); dp_catalog_ctrl_hpd_config(dp->catalog);
...@@ -1439,7 +1449,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) ...@@ -1439,7 +1449,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
state = dp_display->hpd_state; state = dp_display->hpd_state;
if (state == ST_DISPLAY_OFF) if (state == ST_DISPLAY_OFF)
dp_display_host_init(dp_display); dp_display_host_init(dp_display, true);
dp_display_enable(dp_display, 0); dp_display_enable(dp_display, 0);
......
...@@ -409,7 +409,6 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) ...@@ -409,7 +409,6 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
int dp_panel_init_panel_info(struct dp_panel *dp_panel) int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{ {
int rc = 0;
struct drm_display_mode *drm_mode; struct drm_display_mode *drm_mode;
drm_mode = &dp_panel->dp_mode.drm_mode; drm_mode = &dp_panel->dp_mode.drm_mode;
...@@ -436,7 +435,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel) ...@@ -436,7 +435,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
min_t(u32, dp_panel->dp_mode.bpp, 30)); min_t(u32, dp_panel->dp_mode.bpp, 30));
DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp); DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
return rc; return 0;
} }
struct dp_panel *dp_panel_get(struct dp_panel_in *in) struct dp_panel *dp_panel_get(struct dp_panel_in *in)
......
...@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { ...@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.disable = dsi_20nm_phy_disable, .disable = dsi_20nm_phy_disable,
.init = msm_dsi_phy_init_common, .init = msm_dsi_phy_init_common,
}, },
.io_start = { 0xfd998300, 0xfd9a0300 }, .io_start = { 0xfd998500, 0xfd9a0500 },
.num_dsi_phy = 2, .num_dsi_phy = 2,
}; };
...@@ -172,9 +172,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll) ...@@ -172,9 +172,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
multiplier = 1 << config->frac_bits; multiplier = 1 << config->frac_bits;
dec_multiple = div_u64(pll_freq * multiplier, divider); dec_multiple = div_u64(pll_freq * multiplier, divider);
div_u64_rem(dec_multiple, multiplier, &frac); dec = div_u64_rem(dec_multiple, multiplier, &frac);
dec = div_u64(dec_multiple, multiplier);
if (pll_freq <= 1900000000UL) if (pll_freq <= 1900000000UL)
regs->pll_prop_gain_rate = 8; regs->pll_prop_gain_rate = 8;
...@@ -306,7 +304,8 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll) ...@@ -306,7 +304,8 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll)
reg->frac_div_start_mid); reg->frac_div_start_mid);
pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1, pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
reg->frac_div_start_high); reg->frac_div_start_high);
pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,
reg->pll_lockdet_rate);
pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10); pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS, pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
...@@ -345,6 +344,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, ...@@ -345,6 +344,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
{ {
struct device *dev = &pll->pdev->dev;
int rc; int rc;
u32 status = 0; u32 status = 0;
u32 const delay_us = 100; u32 const delay_us = 100;
...@@ -357,7 +357,7 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) ...@@ -357,7 +357,7 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
delay_us, delay_us,
timeout_us); timeout_us);
if (rc) if (rc)
pr_err("DSI PLL(%d) lock failed, status=0x%08x\n", DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
pll->id, status); pll->id, status);
return rc; return rc;
...@@ -405,6 +405,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) ...@@ -405,6 +405,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
{ {
struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
struct device *dev = &pll_10nm->pdev->dev;
int rc; int rc;
dsi_pll_enable_pll_bias(pll_10nm); dsi_pll_enable_pll_bias(pll_10nm);
...@@ -413,7 +414,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) ...@@ -413,7 +414,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
if (rc) { if (rc) {
pr_err("vco_set_rate failed, rc=%d\n", rc); DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
return rc; return rc;
} }
...@@ -430,7 +431,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) ...@@ -430,7 +431,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
/* Check for PLL lock */ /* Check for PLL lock */
rc = dsi_pll_10nm_lock_status(pll_10nm); rc = dsi_pll_10nm_lock_status(pll_10nm);
if (rc) { if (rc) {
pr_err("PLL(%d) lock failed\n", pll_10nm->id); DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);
goto error; goto error;
} }
...@@ -483,6 +484,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, ...@@ -483,6 +484,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
{ {
struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
struct dsi_pll_config *config = &pll_10nm->pll_configuration;
void __iomem *base = pll_10nm->mmio; void __iomem *base = pll_10nm->mmio;
u64 ref_clk = pll_10nm->vco_ref_clk_rate; u64 ref_clk = pll_10nm->vco_ref_clk_rate;
u64 vco_rate = 0x0; u64 vco_rate = 0x0;
...@@ -503,9 +505,8 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, ...@@ -503,9 +505,8 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
/* /*
* TODO: * TODO:
* 1. Assumes prescaler is disabled * 1. Assumes prescaler is disabled
* 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/ */
multiplier = 1 << 18; multiplier = 1 << config->frac_bits;
pll_freq = dec * (ref_clk * 2); pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac); tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier); pll_freq += div_u64(tmp64, multiplier);
......
...@@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev, ...@@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
struct drm_file *file, struct drm_gem_object *obj, struct drm_file *file, struct drm_gem_object *obj,
uint64_t *iova) uint64_t *iova)
{ {
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv; struct msm_file_private *ctx = file->driver_priv;
if (!ctx->aspace) if (!priv->gpu)
return -EINVAL; return -EINVAL;
/* /*
......
...@@ -987,7 +987,6 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -987,7 +987,6 @@ void msm_gem_free_object(struct drm_gem_object *obj)
/* Don't drop the pages for imported dmabuf, as they are not /* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated: * ours, just free the array we allocated:
*/ */
if (msm_obj->pages)
kvfree(msm_obj->pages); kvfree(msm_obj->pages);
put_iova_vmas(obj); put_iova_vmas(obj);
......
...@@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, ...@@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
submit->cmd[i].idx = submit_cmd.submit_idx; submit->cmd[i].idx = submit_cmd.submit_idx;
submit->cmd[i].nr_relocs = submit_cmd.nr_relocs; submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
userptr = u64_to_user_ptr(submit_cmd.relocs);
sz = array_size(submit_cmd.nr_relocs, sz = array_size(submit_cmd.nr_relocs,
sizeof(struct drm_msm_gem_submit_reloc)); sizeof(struct drm_msm_gem_submit_reloc));
/* check for overflow: */ /* check for overflow: */
......
...@@ -157,6 +157,7 @@ struct msm_kms { ...@@ -157,6 +157,7 @@ struct msm_kms {
* from the crtc's pending_timer close to end of the frame: * from the crtc's pending_timer close to end of the frame:
*/ */
struct mutex commit_lock[MAX_CRTCS]; struct mutex commit_lock[MAX_CRTCS];
struct lock_class_key commit_lock_keys[MAX_CRTCS];
unsigned pending_crtc_mask; unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS]; struct msm_pending_timer pending_timers[MAX_CRTCS];
}; };
...@@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms, ...@@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms,
{ {
unsigned i, ret; unsigned i, ret;
for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
mutex_init(&kms->commit_lock[i]); lockdep_register_key(&kms->commit_lock_keys[i]);
__mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
&kms->commit_lock_keys[i]);
}
kms->funcs = funcs; kms->funcs = funcs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment