Commit 36a170b1 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2019-11-05' of https://gitlab.freedesktop.org/drm/msm into drm-next

+ OCMEM support to enable the couple generations that had shared OCMEM
  rather than GMEM exclusively for the GPU (late a3xx and I think basically
  all of a4xx).  Bjorn and Brian decided to land this through the drm
  tree to avoid having to coordinate merge requests.
+ a510 support, and various associated display support
+ the usual misc cleanups and fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGv-JWswEJRxe5AmnGQO1SZnpxK05kO1E29K6UUzC9GMMw@mail.gmail.com
parents acc61b89 e20c9284
......@@ -31,6 +31,10 @@ Required properties:
- iommus: phandle to the adreno iommu
- operating-points-v2: phandle to the OPP operating points
Optional properties:
- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon
SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
Example:
/ {
......@@ -63,3 +67,50 @@ Example:
operating-points-v2 = <&gmu_opp_table>;
};
};
a3xx example with OCMEM support:
/ {
...
gpu: adreno@fdb00000 {
compatible = "qcom,adreno-330.2",
"qcom,adreno";
reg = <0xfdb00000 0x10000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "kgsl_3d0_irq";
clock-names = "core",
"iface",
"mem_iface";
clocks = <&mmcc OXILI_GFX3D_CLK>,
<&mmcc OXILICX_AHB_CLK>,
<&mmcc OXILICX_AXI_CLK>;
sram = <&gmu_sram>;
power-domains = <&mmcc OXILICX_GDSC>;
operating-points-v2 = <&gpu_opp_table>;
iommus = <&gpu_iommu 0>;
};
ocmem@fdd00000 {
compatible = "qcom,msm8974-ocmem";
reg = <0xfdd00000 0x2000>,
<0xfec00000 0x180000>;
reg-names = "ctrl",
"mem";
clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
<&mmcc OCMEMCX_OCMEMNOC_CLK>;
clock-names = "core",
"iface";
#address-cells = <1>;
#size-cells = <1>;
gmu_sram: gmu-sram@0 {
reg = <0x0 0x100000>;
ranges = <0 0 0xfec00000 0x100000>;
};
};
};
......@@ -76,6 +76,8 @@ Required properties:
Optional properties:
- clock-names: the following clocks are optional:
* "lut"
* "tbu"
* "tbu_rt"
Example:
......
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/sram/qcom,ocmem.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: On Chip Memory (OCMEM) that is present on some Qualcomm Snapdragon SoCs.
maintainers:
- Brian Masney <masneyb@onstation.org>
description: |
The On Chip Memory (OCMEM) is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
properties:
compatible:
const: qcom,msm8974-ocmem
reg:
items:
- description: Control registers
- description: OCMEM address range
reg-names:
items:
- const: ctrl
- const: mem
clocks:
items:
- description: Core clock
- description: Interface clock
clock-names:
items:
- const: core
- const: iface
'#address-cells':
const: 1
'#size-cells':
const: 1
required:
- compatible
- reg
- reg-names
- clocks
- clock-names
- '#address-cells'
- '#size-cells'
patternProperties:
"^.+-sram$":
type: object
description: A region of reserved memory.
properties:
reg:
maxItems: 1
ranges:
maxItems: 1
required:
- reg
- ranges
examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/clock/qcom,mmcc-msm8974.h>
ocmem: ocmem@fdd00000 {
compatible = "qcom,msm8974-ocmem";
reg = <0xfdd00000 0x2000>,
<0xfec00000 0x180000>;
reg-names = "ctrl",
"mem";
clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
<&mmcc OCMEMCX_OCMEMNOC_CLK>;
clock-names = "core",
"iface";
#address-cells = <1>;
#size-cells = <1>;
gmu-sram@0 {
reg = <0x0 0x100000>;
ranges = <0 0 0xfec00000 0x100000>;
};
};
......@@ -442,6 +442,41 @@ int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
}
int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size,
u32 mode)
{
struct ocmem_tz_lock {
__le32 id;
__le32 offset;
__le32 size;
__le32 mode;
} request;
request.id = cpu_to_le32(id);
request.offset = cpu_to_le32(offset);
request.size = cpu_to_le32(size);
request.mode = cpu_to_le32(mode);
return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD,
&request, sizeof(request), NULL, 0);
}
int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size)
{
struct ocmem_tz_unlock {
__le32 id;
__le32 offset;
__le32 size;
} request;
request.id = cpu_to_le32(id);
request.offset = cpu_to_le32(offset);
request.size = cpu_to_le32(size);
return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD,
&request, sizeof(request), NULL, 0);
}
void __qcom_scm_init(void)
{
}
......@@ -582,7 +617,22 @@ int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare)
{
return -ENODEV;
struct msm_scm_sec_cfg {
__le32 id;
__le32 ctx_bank_num;
} cfg;
int ret, scm_ret = 0;
cfg.id = cpu_to_le32(device_id);
cfg.ctx_bank_num = cpu_to_le32(spare);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
&cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret));
if (ret || scm_ret)
return ret ? ret : -EINVAL;
return 0;
}
int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
......
......@@ -241,6 +241,18 @@ int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
return ret;
}
int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset,
uint32_t size, uint32_t mode)
{
return -ENOTSUPP;
}
int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset,
uint32_t size)
{
return -ENOTSUPP;
}
void __qcom_scm_init(void)
{
u64 cmd;
......
......@@ -191,6 +191,46 @@ bool qcom_scm_pas_supported(u32 peripheral)
}
EXPORT_SYMBOL(qcom_scm_pas_supported);
/**
* qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
*/
bool qcom_scm_ocmem_lock_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC,
QCOM_SCM_OCMEM_LOCK_CMD);
}
EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
/**
* qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
* region to the specified initiator
*
* @id: tz initiator id
* @offset: OCMEM offset
* @size: OCMEM size
* @mode: access mode (WIDE/NARROW)
*/
int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
u32 mode)
{
return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode);
}
EXPORT_SYMBOL(qcom_scm_ocmem_lock);
/**
* qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
* region from the specified initiator
*
* @id: tz initiator id
* @offset: OCMEM offset
* @size: OCMEM size
*/
int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
{
return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size);
}
EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
/**
* qcom_scm_pas_init_image() - Initialize peripheral authentication service
* state machine for a given peripheral, using the
......@@ -327,6 +367,19 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
.deassert = qcom_scm_pas_reset_deassert,
};
/**
* qcom_scm_restore_sec_cfg_available() - Check if secure environment
* supports restore security config interface.
*
* Return true if restore-cfg interface is supported, false if not.
*/
bool qcom_scm_restore_sec_cfg_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
QCOM_SCM_RESTORE_SEC_CFG);
}
EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
{
return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
......
......@@ -42,6 +42,15 @@ extern int __qcom_scm_hdcp_req(struct device *dev,
extern void __qcom_scm_init(void);
#define QCOM_SCM_OCMEM_SVC 0xf
#define QCOM_SCM_OCMEM_LOCK_CMD 0x1
#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2
extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset,
u32 size, u32 mode);
extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset,
u32 size);
#define QCOM_SCM_SVC_PIL 0x2
#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
......
......@@ -7,6 +7,7 @@ config DRM_MSM
depends on OF && COMMON_CLK
depends on MMU
depends on INTERCONNECT || !INTERCONNECT
depends on QCOM_OCMEM || QCOM_OCMEM=n
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
......
......@@ -6,10 +6,6 @@
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
#ifdef CONFIG_MSM_OCMEM
# include <mach/ocmem.h>
#endif
#include "a3xx_gpu.h"
#define A3XX_INT0_MASK \
......@@ -195,9 +191,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
/* Set the OCMEM base address for A330, etc */
if (a3xx_gpu->ocmem_hdl) {
if (a3xx_gpu->ocmem.hdl) {
gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
(unsigned int)(a3xx_gpu->ocmem_base >> 14));
(unsigned int)(a3xx_gpu->ocmem.base >> 14));
}
/* Turn on performance counters: */
......@@ -318,10 +314,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
adreno_gpu_cleanup(adreno_gpu);
#ifdef CONFIG_MSM_OCMEM
if (a3xx_gpu->ocmem_base)
ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
#endif
adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem);
kfree(a3xx_gpu);
}
......@@ -494,17 +487,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
/* if needed, allocate gmem: */
if (adreno_is_a330(adreno_gpu)) {
#ifdef CONFIG_MSM_OCMEM
/* TODO this is different/missing upstream: */
struct ocmem_buf *ocmem_hdl =
ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
a3xx_gpu->ocmem_hdl = ocmem_hdl;
a3xx_gpu->ocmem_base = ocmem_hdl->addr;
adreno_gpu->gmem = ocmem_hdl->len;
DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
a3xx_gpu->ocmem_base);
#endif
ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
adreno_gpu, &a3xx_gpu->ocmem);
if (ret)
goto fail;
}
if (!gpu->aspace) {
......
......@@ -19,8 +19,7 @@ struct a3xx_gpu {
struct adreno_gpu base;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
void *ocmem_hdl;
struct adreno_ocmem ocmem;
};
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
......
......@@ -2,9 +2,6 @@
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
#include "a4xx_gpu.h"
#ifdef CONFIG_MSM_OCMEM
# include <soc/qcom/ocmem.h>
#endif
#define A4XX_INT0_MASK \
(A4XX_INT0_RBBM_AHB_ERROR | \
......@@ -188,7 +185,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
(1 << 30) | 0xFFFF);
gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
(unsigned int)(a4xx_gpu->ocmem_base >> 14));
(unsigned int)(a4xx_gpu->ocmem.base >> 14));
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
......@@ -318,10 +315,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
adreno_gpu_cleanup(adreno_gpu);
#ifdef CONFIG_MSM_OCMEM
if (a4xx_gpu->ocmem_base)
ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
#endif
adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem);
kfree(a4xx_gpu);
}
......@@ -578,17 +572,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
/* if needed, allocate gmem: */
if (adreno_is_a4xx(adreno_gpu)) {
#ifdef CONFIG_MSM_OCMEM
/* TODO this is different/missing upstream: */
struct ocmem_buf *ocmem_hdl =
ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
a4xx_gpu->ocmem_hdl = ocmem_hdl;
a4xx_gpu->ocmem_base = ocmem_hdl->addr;
adreno_gpu->gmem = ocmem_hdl->len;
DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
a4xx_gpu->ocmem_base);
#endif
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
&a4xx_gpu->ocmem);
if (ret)
goto fail;
}
if (!gpu->aspace) {
......
......@@ -16,8 +16,7 @@ struct a4xx_gpu {
struct adreno_gpu base;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
void *ocmem_hdl;
struct adreno_ocmem ocmem;
};
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
......
......@@ -353,6 +353,9 @@ static int a5xx_me_init(struct msm_gpu *gpu)
* 2D mode 3 draw
*/
OUT_RING(ring, 0x0000000B);
} else if (adreno_is_a510(adreno_gpu)) {
/* Workaround for token and syncs */
OUT_RING(ring, 0x00000001);
} else {
/* No workarounds enabled */
OUT_RING(ring, 0x00000000);
......@@ -568,15 +571,24 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
if (adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
if (adreno_is_a510(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x200 << 11 | 0x200 << 22));
} else {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
if (adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x400 << 11 | 0x300 << 22));
}
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
......@@ -589,6 +601,19 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* Enable ME/PFP split notification */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/*
* In A5x, CCU can send context_done event of a particular context to
* UCHE which ultimately reaches CP even when there is valid
* transaction of that context inside CCU. This can let CP to program
* config registers, which will make the "valid transaction" inside
* CCU to be interpreted differently. This can cause gpu fault. This
* bug is fixed in latest A510 revision. To enable this bug fix -
* bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
* (disable). For older A510 version this bit is unused.
*/
if (adreno_is_a510(adreno_gpu))
gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
/* Enable HWCG */
a5xx_set_hwcg(gpu, true);
......@@ -635,7 +660,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
if (adreno_is_a530(adreno_gpu))
if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
......@@ -679,7 +704,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
a5xx_preempt_hw_init(gpu);
a5xx_gpmu_ucode_init(gpu);
if (!adreno_is_a510(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
if (ret)
......@@ -712,7 +738,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
}
/*
* Try to load a zap shader into the secure world. If successful
* If the chip that we are using does support loading one, then
* try to load a zap shader into the secure world. If successful
* we can use the CP to switch out of secure mode. If not then we
* have no resource but to try to switch ourselves out manually. If we
* guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
......@@ -1066,6 +1093,7 @@ static void a5xx_dump(struct msm_gpu *gpu)
static int a5xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/* Turn on the core power */
......@@ -1073,6 +1101,15 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
if (adreno_is_a510(adreno_gpu)) {
/* Halt the sp_input_clk at HM level */
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
a5xx_set_hwcg(gpu, true);
/* Turn on sp_input_clk at HM level */
gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
return 0;
}
/* Turn the RBCCU domain first to limit the chances of voltage droop */
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
......@@ -1101,9 +1138,17 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
static int a5xx_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
u32 mask = 0xf;
/* A510 has 3 XIN ports in VBIF */
if (adreno_is_a510(adreno_gpu))
mask = 0x7;
/* Clear the VBIF pipe before shutting down */
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
mask) == mask);
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
......@@ -1289,7 +1334,7 @@ static void a5xx_gpu_state_destroy(struct kref *kref)
kfree(a5xx_state);
}
int a5xx_gpu_state_put(struct msm_gpu_state *state)
static int a5xx_gpu_state_put(struct msm_gpu_state *state)
{
if (IS_ERR_OR_NULL(state))
return 1;
......@@ -1299,8 +1344,8 @@ int a5xx_gpu_state_put(struct msm_gpu_state *state)
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p)
static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p)
{
int i, j;
u32 pos = 0;
......
......@@ -297,6 +297,10 @@ int a5xx_power_init(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/* Not all A5xx chips have a GPMU */
if (adreno_is_a510(adreno_gpu))
return 0;
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
......@@ -326,6 +330,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
if (adreno_is_a510(adreno_gpu))
return;
if (a5xx_gpu->gpmu_bo)
return;
......
......@@ -114,6 +114,21 @@ static const struct adreno_info gpulist[] = {
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 1, 0, ANY_ID),
.revn = 510,
.name = "A510",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = SZ_256K,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.init = a5xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
......
......@@ -14,6 +14,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <soc/qcom/ocmem.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
......@@ -893,6 +894,45 @@ static int adreno_get_pwrlevels(struct device *dev,
return 0;
}
int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
struct adreno_ocmem *adreno_ocmem)
{
struct ocmem_buf *ocmem_hdl;
struct ocmem *ocmem;
ocmem = of_get_ocmem(dev);
if (IS_ERR(ocmem)) {
if (PTR_ERR(ocmem) == -ENODEV) {
/*
* Return success since either the ocmem property was
* not specified in device tree, or ocmem support is
* not compiled into the kernel.
*/
return 0;
}
return PTR_ERR(ocmem);
}
ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
if (IS_ERR(ocmem_hdl))
return PTR_ERR(ocmem_hdl);
adreno_ocmem->ocmem = ocmem;
adreno_ocmem->base = ocmem_hdl->addr;
adreno_ocmem->hdl = ocmem_hdl;
adreno_gpu->gmem = ocmem_hdl->len;
return 0;
}
void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
{
if (adreno_ocmem && adreno_ocmem->base)
ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
adreno_ocmem->hdl);
}
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
......
......@@ -126,6 +126,12 @@ struct adreno_gpu {
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
struct adreno_ocmem {
struct ocmem *ocmem;
unsigned long base;
void *hdl;
};
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
......@@ -206,6 +212,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
static inline int adreno_is_a510(struct adreno_gpu *gpu)
{
return gpu->revn == 510;
}
static inline int adreno_is_a530(struct adreno_gpu *gpu)
{
return gpu->revn == 530;
......@@ -236,6 +247,10 @@ void adreno_dump(struct msm_gpu *gpu);
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
struct adreno_ocmem *ocmem);
void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
......
......@@ -55,8 +55,7 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
enum dpu_intr_type intr_type, u32 instance_idx)
{
if (!dpu_kms || !dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.irq_idx_lookup)
if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
return -EINVAL;
return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
......@@ -73,7 +72,7 @@ static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
unsigned long irq_flags;
int ret = 0, enable_count;
if (!dpu_kms || !dpu_kms->hw_intr ||
if (!dpu_kms->hw_intr ||
!dpu_kms->irq_obj.enable_counts ||
!dpu_kms->irq_obj.irq_counts) {
DPU_ERROR("invalid params\n");
......@@ -114,7 +113,7 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
if (!dpu_kms || !irq_idxs || !irq_count) {
if (!irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
......@@ -138,7 +137,7 @@ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
{
int ret = 0, enable_count;
if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
......@@ -169,7 +168,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
if (!dpu_kms || !irq_idxs || !irq_count) {
if (!irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
......@@ -186,7 +185,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
{
if (!dpu_kms || !dpu_kms->hw_intr ||
if (!dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.get_interrupt_status)
return 0;
......@@ -205,7 +204,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
{
unsigned long irq_flags;
if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
if (!dpu_kms->irq_obj.irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
......@@ -240,7 +239,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
{
unsigned long irq_flags;
if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
if (!dpu_kms->irq_obj.irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
......@@ -274,8 +273,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
{
if (!dpu_kms || !dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.clear_all_irqs)
if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs)
return;
dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
......@@ -283,8 +281,7 @@ static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
{
if (!dpu_kms || !dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.disable_all_irqs)
if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs)
return;
dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
......@@ -343,18 +340,8 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
DPU_ERROR("invalid device private\n");
return;
}
priv = dpu_kms->dev->dev_private;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_clear_all_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
......@@ -379,18 +366,8 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
DPU_ERROR("invalid device private\n");
return;
}
priv = dpu_kms->dev->dev_private;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
......
......@@ -32,18 +32,7 @@ enum dpu_perf_mode {
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
if (!crtc->dev || !crtc->dev->dev_private) {
DPU_ERROR("invalid device\n");
return NULL;
}
priv = crtc->dev->dev_private;
if (!priv || !priv->kms) {
DPU_ERROR("invalid kms\n");
return NULL;
}
return to_dpu_kms(priv->kms);
}
......@@ -116,7 +105,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
}
kms = _dpu_crtc_get_kms(crtc);
if (!kms || !kms->catalog) {
if (!kms->catalog) {
DPU_ERROR("invalid parameters\n");
return 0;
}
......@@ -215,7 +204,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
struct dpu_kms *kms;
if (!crtc) {
......@@ -224,13 +212,12 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
}
kms = _dpu_crtc_get_kms(crtc);
if (!kms || !kms->catalog) {
if (!kms->catalog) {
DPU_ERROR("invalid kms\n");
return;
}
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
if (atomic_dec_return(&kms->bandwidth_ref) > 0)
return;
......@@ -287,7 +274,6 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
u64 clk_rate = 0;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
struct msm_drm_private *priv;
struct dpu_kms *kms;
int ret;
......@@ -297,11 +283,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
}
kms = _dpu_crtc_get_kms(crtc);
if (!kms || !kms->catalog) {
if (!kms->catalog) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
priv = kms->dev->dev_private;
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
......
......@@ -266,11 +266,20 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
if (!crtc || !crtc->dev) {
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return INTF_MODE_NONE;
}
/*
* TODO: This function is called from dpu debugfs and as part of atomic
* check. When called from debugfs, the crtc->mutex must be held to
* read crtc->state. However reading crtc->state from atomic check isn't
* allowed (unless you have a good reason, a big comment, and a deep
* understanding of how the atomic/modeset locks work (<- and this is
* probably not possible)). So we'll keep the WARN_ON here for now, but
* really we need to figure out a better way to track our operating mode
*/
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
/* TODO: Returns the first INTF_MODE, could there be multiple values? */
......@@ -694,7 +703,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
unsigned long flags;
bool release_bandwidth = false;
if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
if (!crtc || !crtc->state) {
DPU_ERROR("invalid crtc\n");
return;
}
......@@ -766,7 +775,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
struct msm_drm_private *priv;
bool request_bandwidth;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
......@@ -1288,13 +1297,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
{
struct drm_crtc *crtc = NULL;
struct dpu_crtc *dpu_crtc = NULL;
struct msm_drm_private *priv = NULL;
struct dpu_kms *kms = NULL;
int i;
priv = dev->dev_private;
kms = to_dpu_kms(priv->kms);
dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
if (!dpu_crtc)
return ERR_PTR(-ENOMEM);
......
......@@ -645,11 +645,6 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
if (!dpu_kms) {
DPU_ERROR("invalid dpu_kms\n");
return;
}
hw_mdptop = dpu_kms->hw_mdp;
if (!hw_mdptop) {
DPU_ERROR("invalid mdptop\n");
......@@ -735,8 +730,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
struct msm_drm_private *priv;
bool is_vid_mode = false;
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
!drm_enc->crtc) {
if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
......@@ -1092,17 +1086,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
if (!drm_enc || !drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
return;
}
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
if (!dpu_kms) {
DPU_ERROR("invalid dpu_kms\n");
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!dpu_enc || !dpu_enc->cur_master) {
......@@ -1184,7 +1174,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
struct drm_display_mode *mode;
int i = 0;
if (!drm_enc) {
......@@ -1193,9 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
} else if (!drm_enc->dev) {
DPU_ERROR("invalid dev\n");
return;
} else if (!drm_enc->dev->dev_private) {
DPU_ERROR("invalid dev_private\n");
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
......@@ -1204,8 +1190,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
mutex_lock(&dpu_enc->enc_lock);
dpu_enc->enabled = false;
mode = &drm_enc->crtc->state->adjusted_mode;
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
......@@ -1734,8 +1718,7 @@ static void dpu_encoder_vsync_event_handler(struct timer_list *t)
struct msm_drm_private *priv;
struct msm_drm_thread *event_thread;
if (!drm_enc->dev || !drm_enc->dev->dev_private ||
!drm_enc->crtc) {
if (!drm_enc->dev || !drm_enc->crtc) {
DPU_ERROR("invalid parameters\n");
return;
}
......@@ -1914,8 +1897,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
int i;
static const struct file_operations debugfs_status_fops = {
......@@ -1927,14 +1908,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
char name[DPU_NAME_SIZE];
if (!drm_enc->dev || !drm_enc->dev->dev_private) {
if (!drm_enc->dev) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
/* create overall sub-directory for the encoder */
......@@ -2042,9 +2020,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
enum dpu_intf_type intf_type;
struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc || !dpu_kms) {
DPU_ERROR("invalid arg(s), enc %d kms %d\n",
dpu_enc != 0, dpu_kms != 0);
if (!dpu_enc) {
DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
return -EINVAL;
}
......@@ -2133,14 +2110,12 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
frame_done_timer);
struct drm_encoder *drm_enc = &dpu_enc->base;
struct msm_drm_private *priv;
u32 event;
if (!drm_enc->dev || !drm_enc->dev->dev_private) {
if (!drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
return;
}
priv = drm_enc->dev->dev_private;
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
......
......@@ -124,13 +124,11 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc || !phys_enc->hw_ctl)
return;
DPU_ATRACE_BEGIN("ctl_start_irq");
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
......@@ -316,13 +314,9 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc)
return;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
......@@ -355,7 +349,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
struct drm_display_mode *mode;
bool tc_enable = true;
u32 vsync_hz;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
if (!phys_enc || !phys_enc->hw_pp) {
......@@ -373,11 +366,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
}
dpu_kms = phys_enc->dpu_kms;
if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
DPU_ERROR("invalid device\n");
return;
}
priv = dpu_kms->dev->dev_private;
/*
* TE default: dsi byte clock calculated base on 70 fps;
......@@ -650,13 +638,10 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
struct dpu_encoder_phys *phys_enc)
{
int rc;
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc)
return -EINVAL;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (rc) {
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
......
......@@ -374,7 +374,7 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
if (!phys_enc || !phys_enc->dpu_kms) {
if (!phys_enc) {
DPU_ERROR("invalid encoder/kms\n");
return;
}
......@@ -566,16 +566,13 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
unsigned long lock_flags;
int ret;
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
!phys_enc->parent->dev->dev_private) {
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) {
DPU_ERROR("invalid encoder/device\n");
return;
}
priv = phys_enc->parent->dev->dev_private;
if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
......
......@@ -30,10 +30,6 @@
#define CREATE_TRACE_POINTS
#include "dpu_trace.h"
static const char * const iommu_ports[] = {
"mdp_0",
};
/*
* To enable overall DRM driver logging
* # echo 0x2 > /sys/module/drm/parameters/debug
......@@ -68,16 +64,14 @@ static int _dpu_danger_signal_status(struct seq_file *s,
bool danger_status)
{
struct dpu_kms *kms = (struct dpu_kms *)s->private;
struct msm_drm_private *priv;
struct dpu_danger_safe_status status;
int i;
if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
if (!kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
priv = kms->dev->dev_private;
memset(&status, 0, sizeof(struct dpu_danger_safe_status));
pm_runtime_get_sync(&kms->pdev->dev);
......@@ -153,13 +147,7 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
return 0;
dev = dpu_kms->dev;
if (!dev)
return 0;
priv = dev->dev_private;
if (!priv)
return 0;
base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */
......@@ -280,7 +268,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct dpu_kms *dpu_kms;
struct msm_drm_private *priv;
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
......@@ -292,10 +279,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
if (!dev || !dev->dev_private)
return;
priv = dev->dev_private;
/* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
drm_for_each_encoder_mask(encoder, crtc->dev,
......@@ -333,7 +316,6 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder)
if (funcs && funcs->commit)
funcs->commit(encoder);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
drm_for_each_crtc(crtc, dev) {
if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
continue;
......@@ -464,16 +446,6 @@ static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
struct msm_drm_private *priv;
int i;
if (!dpu_kms) {
DPU_ERROR("invalid dpu_kms\n");
return;
} else if (!dpu_kms->dev) {
DPU_ERROR("invalid dev\n");
return;
} else if (!dpu_kms->dev->dev_private) {
DPU_ERROR("invalid dev_private\n");
return;
}
priv = dpu_kms->dev->dev_private;
for (i = 0; i < priv->num_crtcs; i++)
......@@ -505,7 +477,6 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
dev = dpu_kms->dev;
priv = dev->dev_private;
catalog = dpu_kms->catalog;
......@@ -585,8 +556,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
int i;
dev = dpu_kms->dev;
if (!dev)
return;
if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
......@@ -725,8 +694,7 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
mmu = dpu_kms->base.aspace->mmu;
mmu->funcs->detach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
mmu->funcs->detach(mmu);
msm_gem_address_space_put(dpu_kms->base.aspace);
dpu_kms->base.aspace = NULL;
......@@ -752,8 +720,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return PTR_ERR(aspace);
}
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
DPU_ERROR("failed to attach iommu %d\n", ret);
msm_gem_address_space_put(aspace);
......@@ -803,16 +770,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
if (!dev) {
DPU_ERROR("invalid device\n");
return rc;
}
priv = dev->dev_private;
if (!priv) {
DPU_ERROR("invalid private data\n");
return rc;
}
atomic_set(&dpu_kms->bandwidth_ref, 0);
......@@ -974,7 +932,7 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev)
struct dpu_kms *dpu_kms;
int irq;
if (!dev || !dev->dev_private) {
if (!dev) {
DPU_ERROR("drm device node invalid\n");
return ERR_PTR(-EINVAL);
}
......@@ -1064,11 +1022,6 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
return rc;
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
......@@ -1086,11 +1039,6 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
return rc;
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
......
......@@ -139,10 +139,6 @@ struct vsync_info {
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
/* get struct msm_kms * from drm_device * */
#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
/**
* Debugfs functions - extra helper functions for debugfs support
*
......
......@@ -154,10 +154,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
u32 ot_lim;
int ret, i;
if (!dpu_kms) {
DPU_ERROR("invalid arguments\n");
return;
}
mdp = dpu_kms->hw_mdp;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
......@@ -214,7 +210,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
const struct dpu_vbif_qos_tbl *qos_tbl;
int i;
if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
if (!params || !dpu_kms->hw_mdp) {
DPU_ERROR("invalid arguments\n");
return;
}
......
......@@ -157,10 +157,6 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
}
}
static const char * const iommu_ports[] = {
"mdp_port0_cb0", "mdp_port1_cb0",
};
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
......@@ -172,8 +168,7 @@ static void mdp4_destroy(struct msm_kms *kms)
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
......@@ -524,8 +519,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret)
goto fail;
} else {
......
......@@ -14,7 +14,7 @@ struct mdp5_cfg_handler {
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
const struct mdp5_cfg_hw msm8x74v1_config = {
static const struct mdp5_cfg_hw msm8x74v1_config = {
.name = "msm8x74v1",
.mdp = {
.count = 1,
......@@ -98,7 +98,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.max_clk = 200000000,
};
const struct mdp5_cfg_hw msm8x74v2_config = {
static const struct mdp5_cfg_hw msm8x74v2_config = {
.name = "msm8x74",
.mdp = {
.count = 1,
......@@ -180,7 +180,7 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
.max_clk = 200000000,
};
const struct mdp5_cfg_hw apq8084_config = {
static const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
.mdp = {
.count = 1,
......@@ -275,7 +275,7 @@ const struct mdp5_cfg_hw apq8084_config = {
.max_clk = 320000000,
};
const struct mdp5_cfg_hw msm8x16_config = {
static const struct mdp5_cfg_hw msm8x16_config = {
.name = "msm8x16",
.mdp = {
.count = 1,
......@@ -342,7 +342,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.max_clk = 320000000,
};
const struct mdp5_cfg_hw msm8x94_config = {
static const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
.count = 1,
......@@ -437,7 +437,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
.max_clk = 400000000,
};
const struct mdp5_cfg_hw msm8x96_config = {
static const struct mdp5_cfg_hw msm8x96_config = {
.name = "msm8x96",
.mdp = {
.count = 1,
......@@ -545,7 +545,104 @@ const struct mdp5_cfg_hw msm8x96_config = {
.max_clk = 412500000,
};
const struct mdp5_cfg_hw msm8917_config = {
const struct mdp5_cfg_hw msm8x76_config = {
.name = "msm8x76",
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
MDP_CAP_DSC |
MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
.count = 3,
.base = { 0x01000, 0x01200, 0x01400 },
.flush_hw_mask = 0xffffffff,
},
.smp = {
.mmb_count = 10,
.mmb_size = 10240,
.clients = {
[SSPP_VIG0] = 1, [SSPP_VIG1] = 9,
[SSPP_DMA0] = 4,
[SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
},
},
.pipe_vig = {
.count = 2,
.base = { 0x04000, 0x06000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 1,
.base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 1,
.base = { 0x440DC },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 2,
.base = { 0x44000, 0x45000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY, },
{ .id = 1, .pp = -1, .dspp = -1,
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.pp = {
.count = 3,
.base = { 0x70000, 0x70800, 0x72000 },
},
.dsc = {
.count = 2,
.base = { 0x80000, 0x80400 },
},
.intf = {
.base = { 0x6a000, 0x6a800, 0x6b000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
[2] = INTF_DSI,
},
},
.max_clk = 360000000,
};
static const struct mdp5_cfg_hw msm8917_config = {
.name = "msm8917",
.mdp = {
.count = 1,
......@@ -630,7 +727,7 @@ const struct mdp5_cfg_hw msm8917_config = {
.max_clk = 320000000,
};
const struct mdp5_cfg_hw msm8998_config = {
static const struct mdp5_cfg_hw msm8998_config = {
.name = "msm8998",
.mdp = {
.count = 1,
......@@ -745,6 +842,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 11, .config = { .hw = &msm8x76_config } },
{ .revision = 15, .config = { .hw = &msm8917_config } },
};
......
......@@ -214,7 +214,6 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
const struct mdp_format *format;
struct mdp5_hw_mixer *mixer = pipeline->mixer;
......@@ -232,8 +231,6 @@ static void blend_setup(struct drm_crtc *crtc)
u32 val;
#define blender(stage) ((stage) - STAGE0)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
/* ctl could be released already when we are shutting down: */
......
......@@ -19,10 +19,6 @@
#include "msm_mmu.h"
#include "mdp5_kms.h"
static const char *iommu_ports[] = {
"mdp_0",
};
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
......@@ -233,8 +229,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
}
......@@ -314,6 +309,10 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
mdp5_kms->enable_count--;
WARN_ON(mdp5_kms->enable_count < 0);
if (mdp5_kms->tbu_rt_clk)
clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
if (mdp5_kms->tbu_clk)
clk_disable_unprepare(mdp5_kms->tbu_clk);
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
......@@ -334,6 +333,10 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
clk_prepare_enable(mdp5_kms->core_clk);
if (mdp5_kms->lut_clk)
clk_prepare_enable(mdp5_kms->lut_clk);
if (mdp5_kms->tbu_clk)
clk_prepare_enable(mdp5_kms->tbu_clk);
if (mdp5_kms->tbu_rt_clk)
clk_prepare_enable(mdp5_kms->tbu_rt_clk);
return 0;
}
......@@ -466,14 +469,11 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg;
unsigned int num_crtcs;
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
struct drm_plane *cursor[MAX_BASES] = { NULL };
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
/*
* Construct encoders and modeset initialize connector devices
* for each external display interface.
......@@ -737,8 +737,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
......@@ -974,6 +973,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
/* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
......
......@@ -53,6 +53,8 @@ struct mdp5_kms {
struct clk *ahb_clk;
struct clk *core_clk;
struct clk *lut_clk;
struct clk *tbu_clk;
struct clk *tbu_rt_clk;
struct clk *vsync_clk;
/*
......
......@@ -121,7 +121,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
struct mdp5_kms *mdp5_kms = get_kms(smp);
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines;
u32 fmt = format->base.pixel_format;
uint32_t blkcfg = 0;
nplanes = info->num_planes;
......@@ -135,7 +134,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
* them together, writes to SMP using a single client.
*/
if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
fmt = DRM_FORMAT_NV24;
nplanes = 2;
/* if decimation is enabled, HW decimates less on the
......
......@@ -66,6 +66,26 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
.num_dsi = 1,
};
static const char * const dsi_8976_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 3,
.regs = {
{"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
},
.bus_clk_names = dsi_8976_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
.io_start = { 0x1a94000, 0x1a96000 },
.num_dsi = 2,
};
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
......@@ -147,7 +167,7 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_enable = dsi_link_clk_enable_v2,
.link_clk_disable = dsi_link_clk_disable_v2,
.clk_init_ver = dsi_clk_init_v2,
......@@ -158,7 +178,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_v2,
};
const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = NULL,
......@@ -169,7 +189,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
.calc_clk_rate = dsi_calc_clk_rate_6g,
};
const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = dsi_clk_init_6g_v2,
......@@ -197,6 +217,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2,
&msm8976_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
......
......@@ -17,6 +17,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
......
......@@ -1293,14 +1293,13 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
u8 *buf, int rx_byte, int pkt_size)
{
u32 *lp, *temp, data;
u32 *temp, data;
int i, j = 0, cnt;
u32 read_cnt;
u8 reg[16];
int repeated_bytes = 0;
int buf_offset = buf - msm_host->rx_buf;
lp = (u32 *)buf;
temp = (u32 *)reg;
cnt = (rx_byte + 3) >> 2;
if (cnt > 4)
......
......@@ -145,7 +145,7 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x8, lpx;
s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
......@@ -175,7 +175,6 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
tmin = max_t(s32, temp, 0);
......@@ -262,7 +261,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x8, lpx;
s32 ui, ui_x8;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
......@@ -284,7 +283,6 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
tmin = max_t(s32, temp, 0);
......@@ -485,6 +483,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
.data = &dsi_phy_28nm_hpm_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
.data = &dsi_phy_28nm_hpm_famb_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-lp",
.data = &dsi_phy_28nm_lp_cfgs },
#endif
......
......@@ -40,6 +40,7 @@ struct msm_dsi_phy_cfg {
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
......
......@@ -39,15 +39,10 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
if (!enable) {
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
return;
}
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
......@@ -56,6 +51,39 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
}
static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
if (phy->cfg->type == MSM_DSI_PHY_28NM_LP)
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
else
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
{
if (!enable) {
dsi_phy_write(phy->reg_base +
REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
return;
}
if (phy->regulator_ldo_mode)
dsi_28nm_phy_regulator_enable_ldo(phy);
else
dsi_28nm_phy_regulator_enable_dcdc(phy);
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
......@@ -77,8 +105,6 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
dsi_28nm_phy_regulator_ctrl(phy, true);
dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
dsi_28nm_dphy_set_timing(phy, timing);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
......@@ -142,6 +168,24 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
.type = MSM_DSI_PHY_28NM_HPM,
.src_pll_truthtable = { {true, true}, {false, true} },
.reg_cfg = {
.num = 1,
.regs = {
{"vddio", 100000, 100},
},
},
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
.init = msm_dsi_phy_init_common,
},
.io_start = { 0x1a94400, 0x1a96400 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.type = MSM_DSI_PHY_28NM_LP,
.src_pll_truthtable = { {true, true}, {true, true} },
......
......@@ -29,8 +29,12 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
reg = devm_regulator_get(dev, cfg->reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
cfg->reg_names[i], ret);
if (ret != -EPROBE_DEFER) {
DRM_DEV_ERROR(dev,
"failed to get phy regulator: %s (%d)\n",
cfg->reg_names[i], ret);
}
return ret;
}
......
......@@ -16,6 +16,7 @@
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/devcoredump.h>
#include <linux/sched/task.h>
/*
* Power Management:
......@@ -838,7 +839,7 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
return ERR_CAST(aspace);
}
ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
msm_gem_address_space_put(aspace);
return ERR_PTR(ret);
......@@ -995,8 +996,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
NULL, 0);
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
msm_gem_address_space_put(gpu->aspace);
}
}
......@@ -21,14 +21,12 @@ struct msm_gpummu {
#define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
static int msm_gpummu_attach(struct msm_mmu *mmu)
{
return 0;
}
static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
static void msm_gpummu_detach(struct msm_mmu *mmu)
{
}
......
......@@ -23,16 +23,14 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
static int msm_iommu_attach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
......
......@@ -10,8 +10,8 @@
#include <linux/iommu.h>
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
int (*attach)(struct msm_mmu *mmu);
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
......
......@@ -298,7 +298,7 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
uint64_t iova, uint32_t size)
uint64_t iova, uint32_t size, bool full)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
unsigned offset = 0;
......@@ -318,6 +318,9 @@ static void snapshot_buf(struct msm_rd_state *rd,
rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12);
if (!full)
return;
/* But only dump the contents of buffers marked READ */
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
......@@ -381,18 +384,21 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
for (i = 0; i < submit->nr_bos; i++)
if (should_dump(submit, i))
snapshot_buf(rd, submit, i, 0, 0);
snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
submit->cmd[i].iova, szd * 4);
submit->cmd[i].iova, szd * 4, true);
}
}
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
......
......@@ -74,6 +74,16 @@ config QCOM_MDT_LOADER
tristate
select QCOM_SCM
config QCOM_OCMEM
tristate "Qualcomm On Chip Memory (OCMEM) driver"
depends on ARCH_QCOM
select QCOM_SCM
help
The On Chip Memory (OCMEM) allocator allows various clients to
allocate memory from OCMEM based on performance, latency and power
requirements. This is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
config QCOM_PM
bool "Qualcomm Power Management"
depends on ARCH_QCOM && !ARM64
......
......@@ -6,6 +6,7 @@ obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* The On Chip Memory (OCMEM) allocator allows various clients to allocate
* memory from OCMEM based on performance, latency and power requirements.
* This is typically used by the GPU, camera/video, and audio components on
* some Snapdragon SoCs.
*
* Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
* Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/qcom_scm.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <soc/qcom/ocmem.h>
enum region_mode {
WIDE_MODE = 0x0,
THIN_MODE,
MODE_DEFAULT = WIDE_MODE,
};
enum ocmem_macro_state {
PASSTHROUGH = 0,
PERI_ON = 1,
CORE_ON = 2,
CLK_OFF = 4,
};
struct ocmem_region {
bool interleaved;
enum region_mode mode;
unsigned int num_macros;
enum ocmem_macro_state macro_state[4];
unsigned long macro_size;
unsigned long region_size;
};
struct ocmem_config {
uint8_t num_regions;
unsigned long macro_size;
};
struct ocmem {
struct device *dev;
const struct ocmem_config *config;
struct resource *memory;
void __iomem *mmio;
unsigned int num_ports;
unsigned int num_macros;
bool interleaved;
struct ocmem_region *regions;
unsigned long active_allocations;
};
#define OCMEM_MIN_ALIGN SZ_64K
#define OCMEM_MIN_ALLOC SZ_64K
#define OCMEM_REG_HW_VERSION 0x00000000
#define OCMEM_REG_HW_PROFILE 0x00000004
#define OCMEM_REG_REGION_MODE_CTL 0x00001000
#define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001
#define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002
#define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004
#define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008
#define OCMEM_REG_GFX_MPU_START 0x00001004
#define OCMEM_REG_GFX_MPU_END 0x00001008
#define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_PREP(0x0000000f, (val))
#define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_PREP(0x00003f00, (val))
#define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000
#define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000
#define OCMEM_REG_GEN_STATUS 0x0000000c
#define OCMEM_REG_PSGSC_STATUS 0x00000038
#define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0))
#define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val))
#define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val))
#define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val))
#define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val))
#define OCMEM_CLK_CORE_IDX 0
static struct clk_bulk_data ocmem_clks[] = {
{
.id = "core",
},
{
.id = "iface",
},
};
static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data)
{
writel(data, ocmem->mmio + reg);
}
static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg)
{
return readl(ocmem->mmio + reg);
}
static void update_ocmem(struct ocmem *ocmem)
{
uint32_t region_mode_ctrl = 0x0;
int i;
if (!qcom_scm_ocmem_lock_available()) {
for (i = 0; i < ocmem->config->num_regions; i++) {
struct ocmem_region *region = &ocmem->regions[i];
if (region->mode == THIN_MODE)
region_mode_ctrl |= BIT(i);
}
dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n",
region_mode_ctrl);
ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl);
}
for (i = 0; i < ocmem->config->num_regions; i++) {
struct ocmem_region *region = &ocmem->regions[i];
u32 data;
data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) |
OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) |
OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) |
OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]);
ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data);
}
}
static unsigned long phys_to_offset(struct ocmem *ocmem,
unsigned long addr)
{
if (addr < ocmem->memory->start || addr >= ocmem->memory->end)
return 0;
return addr - ocmem->memory->start;
}
static unsigned long device_address(struct ocmem *ocmem,
enum ocmem_client client,
unsigned long addr)
{
WARN_ON(client != OCMEM_GRAPHICS);
/* TODO: gpu uses phys_to_offset, but others do not.. */
return phys_to_offset(ocmem, addr);
}
static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf,
enum ocmem_macro_state mstate, enum region_mode rmode)
{
unsigned long offset = 0;
int i, j;
for (i = 0; i < ocmem->config->num_regions; i++) {
struct ocmem_region *region = &ocmem->regions[i];
if (buf->offset <= offset && offset < buf->offset + buf->len)
region->mode = rmode;
for (j = 0; j < region->num_macros; j++) {
if (buf->offset <= offset &&
offset < buf->offset + buf->len)
region->macro_state[j] = mstate;
offset += region->macro_size;
}
}
update_ocmem(ocmem);
}
struct ocmem *of_get_ocmem(struct device *dev)
{
struct platform_device *pdev;
struct device_node *devnode;
devnode = of_parse_phandle(dev->of_node, "sram", 0);
if (!devnode || !devnode->parent) {
dev_err(dev, "Cannot look up sram phandle\n");
return ERR_PTR(-ENODEV);
}
pdev = of_find_device_by_node(devnode->parent);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", devnode->name);
return ERR_PTR(-EPROBE_DEFER);
}
return platform_get_drvdata(pdev);
}
EXPORT_SYMBOL(of_get_ocmem);
struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
unsigned long size)
{
struct ocmem_buf *buf;
int ret;
/* TODO: add support for other clients... */
if (WARN_ON(client != OCMEM_GRAPHICS))
return ERR_PTR(-ENODEV);
if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN))
return ERR_PTR(-EINVAL);
if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations))
return ERR_PTR(-EBUSY);
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_unlock;
}
buf->offset = 0;
buf->addr = device_address(ocmem, client, buf->offset);
buf->len = size;
update_range(ocmem, buf, CORE_ON, WIDE_MODE);
if (qcom_scm_ocmem_lock_available()) {
ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID,
buf->offset, buf->len, WIDE_MODE);
if (ret) {
dev_err(ocmem->dev, "could not lock: %d\n", ret);
ret = -EINVAL;
goto err_kfree;
}
} else {
ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset);
ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END,
buf->offset + buf->len);
}
dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n",
size / 1024, buf->addr, client);
return buf;
err_kfree:
kfree(buf);
err_unlock:
clear_bit_unlock(BIT(client), &ocmem->active_allocations);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(ocmem_allocate);
void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
struct ocmem_buf *buf)
{
/* TODO: add support for other clients... */
if (WARN_ON(client != OCMEM_GRAPHICS))
return;
update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT);
if (qcom_scm_ocmem_lock_available()) {
int ret;
ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID,
buf->offset, buf->len);
if (ret)
dev_err(ocmem->dev, "could not unlock: %d\n", ret);
} else {
ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0);
ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0);
}
kfree(buf);
clear_bit_unlock(BIT(client), &ocmem->active_allocations);
}
EXPORT_SYMBOL(ocmem_free);
static int ocmem_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
unsigned long reg, region_size;
int i, j, ret, num_banks;
struct resource *res;
struct ocmem *ocmem;
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL);
if (!ocmem)
return -ENOMEM;
ocmem->dev = dev;
ocmem->config = device_get_match_data(dev);
ret = devm_clk_bulk_get(dev, ARRAY_SIZE(ocmem_clks), ocmem_clks);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "Unable to get clocks\n");
return ret;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
ocmem->mmio = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ocmem->mmio)) {
dev_err(&pdev->dev, "Failed to ioremap ocmem_ctrl resource\n");
return PTR_ERR(ocmem->mmio);
}
ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mem");
if (!ocmem->memory) {
dev_err(dev, "Could not get mem region\n");
return -ENXIO;
}
/* The core clock is synchronous with graphics */
WARN_ON(clk_set_rate(ocmem_clks[OCMEM_CLK_CORE_IDX].clk, 1000) < 0);
ret = clk_bulk_prepare_enable(ARRAY_SIZE(ocmem_clks), ocmem_clks);
if (ret) {
dev_info(ocmem->dev, "Failed to enable clocks\n");
return ret;
}
if (qcom_scm_restore_sec_cfg_available()) {
dev_dbg(dev, "configuring scm\n");
ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0);
if (ret) {
dev_err(dev, "Could not enable secure configuration\n");
goto err_clk_disable;
}
}
reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE);
ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg);
ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg);
ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING);
num_banks = ocmem->num_ports / 2;
region_size = ocmem->config->macro_size * num_banks;
dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n",
ocmem->num_ports, ocmem->config->num_regions,
ocmem->num_macros, ocmem->interleaved ? "" : "not ");
ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions,
sizeof(struct ocmem_region), GFP_KERNEL);
if (!ocmem->regions) {
ret = -ENOMEM;
goto err_clk_disable;
}
for (i = 0; i < ocmem->config->num_regions; i++) {
struct ocmem_region *region = &ocmem->regions[i];
if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) {
ret = -EINVAL;
goto err_clk_disable;
}
region->mode = MODE_DEFAULT;
region->num_macros = num_banks;
if (i == (ocmem->config->num_regions - 1) &&
reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) {
region->macro_size = ocmem->config->macro_size / 2;
region->region_size = region_size / 2;
} else {
region->macro_size = ocmem->config->macro_size;
region->region_size = region_size;
}
for (j = 0; j < ARRAY_SIZE(region->macro_state); j++)
region->macro_state[j] = CLK_OFF;
}
platform_set_drvdata(pdev, ocmem);
return 0;
err_clk_disable:
clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks);
return ret;
}
static int ocmem_dev_remove(struct platform_device *pdev)
{
clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks);
return 0;
}
static const struct ocmem_config ocmem_8974_config = {
.num_regions = 3,
.macro_size = SZ_128K,
};
static const struct of_device_id ocmem_of_match[] = {
{ .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config },
{ }
};
MODULE_DEVICE_TABLE(of, ocmem_of_match);
static struct platform_driver ocmem_driver = {
.probe = ocmem_dev_probe,
.remove = ocmem_dev_remove,
.driver = {
.name = "ocmem",
.of_match_table = ocmem_of_match,
},
};
module_platform_driver(ocmem_driver);
MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs");
MODULE_LICENSE("GPL v2");
......@@ -24,6 +24,26 @@ struct qcom_scm_vmperm {
int perm;
};
enum qcom_scm_ocmem_client {
QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
QCOM_SCM_OCMEM_GRAPHICS_ID,
QCOM_SCM_OCMEM_VIDEO_ID,
QCOM_SCM_OCMEM_LP_AUDIO_ID,
QCOM_SCM_OCMEM_SENSORS_ID,
QCOM_SCM_OCMEM_OTHER_OS_ID,
QCOM_SCM_OCMEM_DEBUG_ID,
};
enum qcom_scm_sec_dev_id {
QCOM_SCM_MDSS_DEV_ID = 1,
QCOM_SCM_OCMEM_DEV_ID = 5,
QCOM_SCM_PCIE0_DEV_ID = 11,
QCOM_SCM_PCIE1_DEV_ID = 12,
QCOM_SCM_GFX_DEV_ID = 18,
QCOM_SCM_UFS_DEV_ID = 19,
QCOM_SCM_ICE_DEV_ID = 20,
};
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_MSS_MSA 0xF
#define QCOM_SCM_VMID_WLAN 0x18
......@@ -41,6 +61,11 @@ extern bool qcom_scm_is_available(void);
extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
extern bool qcom_scm_ocmem_lock_available(void);
extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size, u32 mode);
extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size);
extern bool qcom_scm_pas_supported(u32 peripheral);
extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
size_t size);
......@@ -55,6 +80,7 @@ extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
extern void qcom_scm_cpu_power_down(u32 flags);
extern u32 qcom_scm_get_version(void);
extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern bool qcom_scm_restore_sec_cfg_available(void);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* The On Chip Memory (OCMEM) allocator allows various clients to allocate
* memory from OCMEM based on performance, latency and power requirements.
* This is typically used by the GPU, camera/video, and audio components on
* some Snapdragon SoCs.
*
* Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
* Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/device.h>
#include <linux/err.h>
#ifndef __OCMEM_H__
#define __OCMEM_H__
enum ocmem_client {
/* GMEM clients */
OCMEM_GRAPHICS = 0x0,
/*
* TODO add more once ocmem_allocate() is clever enough to
* deal with multiple clients.
*/
OCMEM_CLIENT_MAX,
};
struct ocmem;
struct ocmem_buf {
unsigned long offset;
unsigned long addr;
unsigned long len;
};
#if IS_ENABLED(CONFIG_QCOM_OCMEM)
struct ocmem *of_get_ocmem(struct device *dev);
struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
unsigned long size);
void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
struct ocmem_buf *buf);
#else /* IS_ENABLED(CONFIG_QCOM_OCMEM) */
static inline struct ocmem *of_get_ocmem(struct device *dev)
{
return ERR_PTR(-ENODEV);
}
static inline struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem,
enum ocmem_client client,
unsigned long size)
{
return ERR_PTR(-ENODEV);
}
static inline void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
struct ocmem_buf *buf)
{
}
#endif /* IS_ENABLED(CONFIG_QCOM_OCMEM) */
#endif /* __OCMEM_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment