Commit 59e7a8cc authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2020-03-22' of https://gitlab.freedesktop.org/drm/msm into drm-next

A bit smaller this time around.. there are still a couple uabi
additions for vulkan waiting in the wings, but I punted on them this
cycle due to running low on time.  (They should be easy enough to
rebase, and if it is a problem for anyone I can push a next+uabi
branch so that tu work can proceed.)

The bigger change is refactoring dpu resource manager and moving dpu
to use atomic global state.  Other than that, it is mostly cleanups
and fixes.

From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGuf1R4Xz-t9Z7_cwx9jD=b4wUvvwfqA5cHR8fCSXSd5XQ@mail.gmail.com
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parents 5fc0df93 a5fb8b91
Qualcomm adreno/snapdragon GMU (Graphics management unit)
The GMU is a programmable power controller for the GPU. the CPU controls the
GMU which in turn handles power controls for the GPU.
Required properties:
- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu"
for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu"
Note that you need to list the less specific "qcom,adreno-gmu"
for generic matches and the more specific identifier to identify
the specific device.
- reg: Physical base address and length of the GMU registers.
- reg-names: Matching names for the register regions
* "gmu"
* "gmu_pdc"
* "gmu_pdc_seg"
- interrupts: The interrupt signals from the GMU.
- interrupt-names: Matching names for the interrupts
* "hfi"
* "gmu"
- clocks: phandles to the device clocks
- clock-names: Matching names for the clocks
* "gmu"
* "cxo"
* "axi"
* "mnoc"
- power-domains: should be:
<&clock_gpucc GPU_CX_GDSC>
<&clock_gpucc GPU_GX_GDSC>
- power-domain-names: Matching names for the power domains
- iommus: phandle to the adreno iommu
- operating-points-v2: phandle to the OPP operating points
Optional properties:
- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon
SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
Example:
/ {
...
gmu: gmu@506a000 {
compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
reg = <0x506a000 0x30000>,
<0xb280000 0x10000>,
<0xb480000 0x10000>;
reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hfi", "gmu";
clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
<&gpucc GPU_CC_CXO_CLK>,
<&gcc GCC_DDRSS_GPU_AXI_CLK>,
<&gcc GCC_GPU_MEMNOC_GFX_CLK>;
clock-names = "gmu", "cxo", "axi", "memnoc";
power-domains = <&gpucc GPU_CX_GDSC>,
<&gpucc GPU_GX_GDSC>;
power-domain-names = "cx", "gx";
iommus = <&adreno_smmu 5>;
operating-points-v2 = <&gmu_opp_table>;
};
};
a3xx example with OCMEM support:
/ {
...
gpu: adreno@fdb00000 {
compatible = "qcom,adreno-330.2",
"qcom,adreno";
reg = <0xfdb00000 0x10000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "kgsl_3d0_irq";
clock-names = "core",
"iface",
"mem_iface";
clocks = <&mmcc OXILI_GFX3D_CLK>,
<&mmcc OXILICX_AHB_CLK>,
<&mmcc OXILICX_AXI_CLK>;
sram = <&gmu_sram>;
power-domains = <&mmcc OXILICX_GDSC>;
operating-points-v2 = <&gpu_opp_table>;
iommus = <&gpu_iommu 0>;
};
ocmem@fdd00000 {
compatible = "qcom,msm8974-ocmem";
reg = <0xfdd00000 0x2000>,
<0xfec00000 0x180000>;
reg-names = "ctrl",
"mem";
clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
<&mmcc OCMEMCX_OCMEMNOC_CLK>;
clock-names = "core",
"iface";
#address-cells = <1>;
#size-cells = <1>;
gmu_sram: gmu-sram@0 {
reg = <0x0 0x100000>;
ranges = <0 0 0xfec00000 0x100000>;
};
};
};
# SPDX-License-Identifier: GPL-2.0-only
# Copyright 2019-2020, The Linux Foundation, All Rights Reserved
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/msm/gmu.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Devicetree bindings for the GMU attached to certain Adreno GPUs
maintainers:
- Rob Clark <robdclark@gmail.com>
description: |
These bindings describe the Graphics Management Unit (GMU) that is attached
to members of the Adreno A6xx GPU family. The GMU provides on-device power
management and support to improve power efficiency and reduce the load on
the CPU.
properties:
compatible:
items:
- enum:
- qcom,adreno-gmu-630.2
- const: qcom,adreno-gmu
reg:
items:
- description: Core GMU registers
- description: GMU PDC registers
- description: GMU PDC sequence registers
reg-names:
items:
- const: gmu
- const: gmu_pdc
- const: gmu_pdc_seq
clocks:
items:
- description: GMU clock
- description: GPU CX clock
- description: GPU AXI clock
- description: GPU MEMNOC clock
clock-names:
items:
- const: gmu
- const: cxo
- const: axi
- const: memnoc
interrupts:
items:
- description: GMU HFI interrupt
- description: GMU interrupt
interrupt-names:
items:
- const: hfi
- const: gmu
power-domains:
items:
- description: CX power domain
- description: GX power domain
power-domain-names:
items:
- const: cx
- const: gx
iommus:
maxItems: 1
operating-points-v2: true
required:
- compatible
- reg
- reg-names
- clocks
- clock-names
- interrupts
- interrupt-names
- power-domains
- power-domain-names
- iommus
- operating-points-v2
examples:
- |
#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
gmu: gmu@506a000 {
compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
reg = <0x506a000 0x30000>,
<0xb280000 0x10000>,
<0xb480000 0x10000>;
reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
<&gpucc GPU_CC_CXO_CLK>,
<&gcc GCC_DDRSS_GPU_AXI_CLK>,
<&gcc GCC_GPU_MEMNOC_GFX_CLK>;
clock-names = "gmu", "cxo", "axi", "memnoc";
interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hfi", "gmu";
power-domains = <&gpucc GPU_CX_GDSC>,
<&gpucc GPU_GX_GDSC>;
power-domain-names = "cx", "gx";
iommus = <&adreno_smmu 5>;
operating-points-v2 = <&gmu_opp_table>;
};
...@@ -35,25 +35,54 @@ Required properties: ...@@ -35,25 +35,54 @@ Required properties:
bring the GPU out of secure mode. bring the GPU out of secure mode.
- firmware-name: optional property of the 'zap-shader' node, listing the - firmware-name: optional property of the 'zap-shader' node, listing the
relative path of the device specific zap firmware. relative path of the device specific zap firmware.
- sram: phandle to the On Chip Memory (OCMEM) that's present on some a3xx and
a4xx Snapdragon SoCs. See
Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
Example 3xx/4xx/a5xx: Example 3xx/4xx:
/ { / {
... ...
gpu: qcom,kgsl-3d0@4300000 { gpu: adreno@fdb00000 {
compatible = "qcom,adreno-320.2", "qcom,adreno"; compatible = "qcom,adreno-330.2",
reg = <0x04300000 0x20000>; "qcom,adreno";
reg = <0xfdb00000 0x10000>;
reg-names = "kgsl_3d0_reg_memory"; reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 80 0>; interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
clock-names = interrupt-names = "kgsl_3d0_irq";
"core", clock-names = "core",
"iface", "iface",
"mem_iface"; "mem_iface";
clocks = clocks = <&mmcc OXILI_GFX3D_CLK>,
<&mmcc GFX3D_CLK>, <&mmcc OXILICX_AHB_CLK>,
<&mmcc GFX3D_AHB_CLK>, <&mmcc OXILICX_AXI_CLK>;
<&mmcc MMSS_IMEM_AHB_CLK>; sram = <&gpu_sram>;
power-domains = <&mmcc OXILICX_GDSC>;
operating-points-v2 = <&gpu_opp_table>;
iommus = <&gpu_iommu 0>;
};
gpu_sram: ocmem@fdd00000 {
compatible = "qcom,msm8974-ocmem";
reg = <0xfdd00000 0x2000>,
<0xfec00000 0x180000>;
reg-names = "ctrl",
"mem";
clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
<&mmcc OCMEMCX_OCMEMNOC_CLK>;
clock-names = "core",
"iface";
#address-cells = <1>;
#size-cells = <1>;
gpu_sram: gpu-sram@0 {
reg = <0x0 0x100000>;
ranges = <0 0 0xfec00000 0x100000>;
};
}; };
}; };
......
...@@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = { ...@@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = {
static void check_speed_bin(struct device *dev) static void check_speed_bin(struct device *dev)
{ {
struct nvmem_cell *cell; struct nvmem_cell *cell;
u32 bin, val; u32 val;
/*
* If the OPP table specifies a opp-supported-hw property then we have
* to set something with dev_pm_opp_set_supported_hw() or the table
* doesn't get populated so pick an arbitrary value that should
* ensure the default frequencies are selected but not conflict with any
* actual bins
*/
val = 0x80;
cell = nvmem_cell_get(dev, "speed_bin"); cell = nvmem_cell_get(dev, "speed_bin");
/* If a nvmem cell isn't defined, nothing to do */ if (!IS_ERR(cell)) {
if (IS_ERR(cell)) void *buf = nvmem_cell_read(cell, NULL);
return;
bin = *((u32 *) nvmem_cell_read(cell, NULL)); if (!IS_ERR(buf)) {
nvmem_cell_put(cell); u8 bin = *((u8 *) buf);
val = (1 << bin); val = (1 << bin);
kfree(buf);
}
nvmem_cell_put(cell);
}
dev_pm_opp_set_supported_hw(dev, &val, 1); dev_pm_opp_set_supported_hw(dev, &val, 1);
} }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h> #include <linux/interconnect.h>
#include <linux/pm_domain.h> #include <linux/pm_domain.h>
#include <linux/pm_opp.h> #include <linux/pm_opp.h>
...@@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) ...@@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
{ {
int count, i;
u64 iova;
if (IS_ERR_OR_NULL(bo)) if (IS_ERR_OR_NULL(bo))
return; return;
count = bo->size >> PAGE_SHIFT; dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
iova = bo->iova;
for (i = 0; i < count; i++, iova += PAGE_SIZE) {
iommu_unmap(gmu->domain, iova, PAGE_SIZE);
__free_pages(bo->pages[i], 0);
}
kfree(bo->pages);
kfree(bo); kfree(bo);
} }
...@@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, ...@@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
size_t size) size_t size)
{ {
struct a6xx_gmu_bo *bo; struct a6xx_gmu_bo *bo;
int ret, count, i;
bo = kzalloc(sizeof(*bo), GFP_KERNEL); bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo) if (!bo)
...@@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, ...@@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
bo->size = PAGE_ALIGN(size); bo->size = PAGE_ALIGN(size);
count = bo->size >> PAGE_SHIFT; bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); if (!bo->virt) {
if (!bo->pages) {
kfree(bo); kfree(bo);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
for (i = 0; i < count; i++) {
bo->pages[i] = alloc_page(GFP_KERNEL);
if (!bo->pages[i])
goto err;
}
bo->iova = gmu->uncached_iova_base;
for (i = 0; i < count; i++) {
ret = iommu_map(gmu->domain,
bo->iova + (PAGE_SIZE * i),
page_to_phys(bo->pages[i]), PAGE_SIZE,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
for (i = i - 1 ; i >= 0; i--)
iommu_unmap(gmu->domain,
bo->iova + (PAGE_SIZE * i),
PAGE_SIZE);
goto err;
}
}
bo->virt = vmap(bo->pages, count, VM_IOREMAP,
pgprot_writecombine(PAGE_KERNEL));
if (!bo->virt)
goto err;
/* Align future IOVA addresses on 1MB boundaries */
gmu->uncached_iova_base += ALIGN(size, SZ_1M);
return bo; return bo;
err:
for (i = 0; i < count; i++) {
if (bo->pages[i])
__free_pages(bo->pages[i], 0);
}
kfree(bo->pages);
kfree(bo);
return ERR_PTR(-ENOMEM);
}
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
int ret;
/*
* The GMU address space is hardcoded to treat the range
* 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
* between the GMU and the CPU will live in this space
*/
gmu->uncached_iova_base = 0x60000000;
gmu->domain = iommu_domain_alloc(&platform_bus_type);
if (!gmu->domain)
return -ENODEV;
ret = iommu_attach_device(gmu->domain, gmu->dev);
if (ret) {
iommu_domain_free(gmu->domain);
gmu->domain = NULL;
}
return ret;
} }
/* Return the 'arc-level' for the given frequency */ /* Return the 'arc-level' for the given frequency */
...@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) ...@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_memory_free(gmu, gmu->hfi); a6xx_gmu_memory_free(gmu, gmu->hfi);
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
free_irq(gmu->gmu_irq, gmu); free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu); free_irq(gmu->hfi_irq, gmu);
...@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) ...@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev; gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true); /* Pass force_dma false to require the DT to set the dma region */
ret = of_dma_configure(gmu->dev, node, false);
if (ret)
return ret;
/* Set the mask after the of_dma_configure() */
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
if (ret)
return ret;
/* Fow now, don't do anything fancy until we get our feet under us */ /* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE; gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
...@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) ...@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret) if (ret)
goto err_put_device; goto err_put_device;
/* Set up the IOMMU context bank */
ret = a6xx_gmu_memory_probe(gmu);
if (ret)
goto err_put_device;
/* Allocate memory for for the HFI queues */ /* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi)) if (IS_ERR(gmu->hfi))
...@@ -1375,11 +1291,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) ...@@ -1375,11 +1291,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
err_memory: err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi); a6xx_gmu_memory_free(gmu, gmu->hfi);
if (gmu->domain) {
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
}
ret = -ENODEV; ret = -ENODEV;
err_put_device: err_put_device:
......
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
struct a6xx_gmu_bo { struct a6xx_gmu_bo {
void *virt; void *virt;
size_t size; size_t size;
u64 iova; dma_addr_t iova;
struct page **pages;
}; };
/* /*
...@@ -49,9 +48,6 @@ struct a6xx_gmu { ...@@ -49,9 +48,6 @@ struct a6xx_gmu {
int hfi_irq; int hfi_irq;
int gmu_irq; int gmu_irq;
struct iommu_domain *domain;
u64 uncached_iova_base;
struct device *gxpd; struct device *gxpd;
int idle_level; int idle_level;
......
...@@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers { ...@@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers {
}; };
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = { static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
}; };
......
...@@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len) ...@@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
return NULL; return NULL;
for (i = 0; i < l; i++) for (i = 0; i < l; i++)
buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s", buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
ascii85_encode(src[i], out)); ascii85_encode(src[i], out));
return buf; return buf;
......
...@@ -164,7 +164,6 @@ enum dpu_enc_rc_states { ...@@ -164,7 +164,6 @@ enum dpu_enc_rc_states {
* clks and resources after IDLE_TIMEOUT time. * clks and resources after IDLE_TIMEOUT time.
* @vsync_event_work: worker to handle vsync event for autorefresh * @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display * @topology: topology of the display
* @mode_set_complete: flag to indicate modeset completion
* @idle_timeout: idle timeout duration in milliseconds * @idle_timeout: idle timeout duration in milliseconds
*/ */
struct dpu_encoder_virt { struct dpu_encoder_virt {
...@@ -202,7 +201,6 @@ struct dpu_encoder_virt { ...@@ -202,7 +201,6 @@ struct dpu_encoder_virt {
struct delayed_work delayed_off_work; struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work; struct kthread_work vsync_event_work;
struct msm_display_topology topology; struct msm_display_topology topology;
bool mode_set_complete;
u32 idle_timeout; u32 idle_timeout;
}; };
...@@ -461,7 +459,7 @@ void dpu_encoder_helper_split_config( ...@@ -461,7 +459,7 @@ void dpu_encoder_helper_split_config(
struct msm_display_info *disp_info; struct msm_display_info *disp_info;
if (!phys_enc->hw_mdptop || !phys_enc->parent) { if (!phys_enc->hw_mdptop || !phys_enc->parent) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return; return;
} }
...@@ -562,12 +560,13 @@ static int dpu_encoder_virt_atomic_check( ...@@ -562,12 +560,13 @@ static int dpu_encoder_virt_atomic_check(
const struct drm_display_mode *mode; const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode; struct drm_display_mode *adj_mode;
struct msm_display_topology topology; struct msm_display_topology topology;
struct dpu_global_state *global_state;
int i = 0; int i = 0;
int ret = 0; int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) { if (!drm_enc || !crtc_state || !conn_state) {
DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
drm_enc != 0, crtc_state != 0, conn_state != 0); drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
return -EINVAL; return -EINVAL;
} }
...@@ -578,6 +577,7 @@ static int dpu_encoder_virt_atomic_check( ...@@ -578,6 +577,7 @@ static int dpu_encoder_virt_atomic_check(
dpu_kms = to_dpu_kms(priv->kms); dpu_kms = to_dpu_kms(priv->kms);
mode = &crtc_state->mode; mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode; adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_atomic_check(DRMID(drm_enc)); trace_dpu_enc_atomic_check(DRMID(drm_enc));
/* /*
...@@ -609,17 +609,15 @@ static int dpu_encoder_virt_atomic_check( ...@@ -609,17 +609,15 @@ static int dpu_encoder_virt_atomic_check(
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
/* Reserve dynamic resources now. Indicating AtomicTest phase */ /* Reserve dynamic resources now. */
if (!ret) { if (!ret) {
/* /*
* Avoid reserving resources when mode set is pending. Topology * Avoid reserving resources when mode set is pending. Topology
* info may not be available to complete reservation. * info may not be available to complete reservation.
*/ */
if (drm_atomic_crtc_needs_modeset(crtc_state) if (drm_atomic_crtc_needs_modeset(crtc_state)) {
&& dpu_enc->mode_set_complete) { ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state, drm_enc, crtc_state, topology);
topology, true);
dpu_enc->mode_set_complete = false;
} }
} }
...@@ -956,12 +954,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -956,12 +954,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct drm_connector *conn = NULL, *conn_iter; struct drm_connector *conn = NULL, *conn_iter;
struct drm_crtc *drm_crtc; struct drm_crtc *drm_crtc;
struct dpu_crtc_state *cstate; struct dpu_crtc_state *cstate;
struct dpu_rm_hw_iter hw_iter; struct dpu_global_state *global_state;
struct msm_display_topology topology; struct msm_display_topology topology;
struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL }; struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL }; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
int num_lm = 0, num_ctl = 0; struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
int i, j, ret; int num_lm, num_ctl, num_pp;
int i, j;
if (!drm_enc) { if (!drm_enc) {
DPU_ERROR("invalid encoder\n"); DPU_ERROR("invalid encoder\n");
...@@ -975,6 +974,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -975,6 +974,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_kms = to_dpu_kms(priv->kms); dpu_kms = to_dpu_kms(priv->kms);
connector_list = &dpu_kms->dev->mode_config.connector_list; connector_list = &dpu_kms->dev->mode_config.connector_list;
global_state = dpu_kms_get_existing_global_state(dpu_kms);
if (IS_ERR_OR_NULL(global_state)) {
DPU_ERROR("Failed to get global state");
return;
}
trace_dpu_enc_mode_set(DRMID(drm_enc)); trace_dpu_enc_mode_set(DRMID(drm_enc));
list_for_each_entry(conn_iter, connector_list, head) list_for_each_entry(conn_iter, connector_list, head)
...@@ -995,77 +1000,57 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -995,77 +1000,57 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */ /* Query resource that have been reserved in atomic check step. */
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state, num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
topology, false); drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
if (ret) { ARRAY_SIZE(hw_pp));
DPU_ERROR_ENC(dpu_enc, num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
"failed to reserve hw resources, %d\n", ret); drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
return; num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
} drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
dpu_enc->hw_pp[i] = NULL;
if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
}
dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
num_ctl++;
}
dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) : NULL;
break;
hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
num_lm++;
}
cstate = to_dpu_crtc_state(drm_crtc->state); cstate = to_dpu_crtc_state(drm_crtc->state);
for (i = 0; i < num_lm; i++) { for (i = 0; i < num_lm; i++) {
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
cstate->mixers[i].hw_lm = hw_lm[i]; cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx]; cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
} }
cstate->num_mixers = num_lm; cstate->num_mixers = num_lm;
for (i = 0; i < dpu_enc->num_phys_encs; i++) { for (i = 0; i < dpu_enc->num_phys_encs; i++) {
int num_blk;
struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!dpu_enc->hw_pp[i]) { if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc, DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i); "no pp block assigned at idx: %d\n", i);
goto error; return;
} }
if (!hw_ctl[i]) { if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc, DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i); "no ctl block assigned at idx: %d\n", i);
goto error; return;
} }
phys->hw_pp = dpu_enc->hw_pp[i]; phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = hw_ctl[i]; phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
DPU_HW_BLK_INTF); global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { hw_blk, ARRAY_SIZE(hw_blk));
for (j = 0; j < num_blk; j++) {
struct dpu_hw_intf *hw_intf; struct dpu_hw_intf *hw_intf;
if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) hw_intf = to_dpu_hw_intf(hw_blk[i]);
break;
hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
if (hw_intf->idx == phys->intf_idx) if (hw_intf->idx == phys->intf_idx)
phys->hw_intf = hw_intf; phys->hw_intf = hw_intf;
} }
...@@ -1073,18 +1058,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -1073,18 +1058,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
if (!phys->hw_intf) { if (!phys->hw_intf) {
DPU_ERROR_ENC(dpu_enc, DPU_ERROR_ENC(dpu_enc,
"no intf block assigned at idx: %d\n", i); "no intf block assigned at idx: %d\n", i);
goto error; return;
} }
phys->connector = conn->state->connector; phys->connector = conn->state->connector;
if (phys->ops.mode_set) if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode); phys->ops.mode_set(phys, mode, adj_mode);
} }
dpu_enc->mode_set_complete = true;
error:
dpu_rm_release(&dpu_kms->rm, drm_enc);
} }
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
...@@ -1181,6 +1161,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) ...@@ -1181,6 +1161,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL; struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv; struct msm_drm_private *priv;
struct dpu_kms *dpu_kms; struct dpu_kms *dpu_kms;
struct dpu_global_state *global_state;
int i = 0; int i = 0;
if (!drm_enc) { if (!drm_enc) {
...@@ -1199,6 +1180,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) ...@@ -1199,6 +1180,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private; priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms); dpu_kms = to_dpu_kms(priv->kms);
global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_disable(DRMID(drm_enc)); trace_dpu_enc_disable(DRMID(drm_enc));
...@@ -1228,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) ...@@ -1228,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc); dpu_rm_release(global_state, drm_enc);
mutex_unlock(&dpu_enc->enc_lock); mutex_unlock(&dpu_enc->enc_lock);
} }
...@@ -1964,7 +1946,7 @@ static int dpu_encoder_virt_add_phys_encs( ...@@ -1964,7 +1946,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) { if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
PTR_ERR(enc)); PTR_ERR(enc));
return enc == 0 ? -EINVAL : PTR_ERR(enc); return enc == NULL ? -EINVAL : PTR_ERR(enc);
} }
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
...@@ -1977,7 +1959,7 @@ static int dpu_encoder_virt_add_phys_encs( ...@@ -1977,7 +1959,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) { if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
PTR_ERR(enc)); PTR_ERR(enc));
return enc == 0 ? -EINVAL : PTR_ERR(enc); return enc == NULL ? -EINVAL : PTR_ERR(enc);
} }
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
...@@ -2008,7 +1990,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, ...@@ -2008,7 +1990,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params phys_params; struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc) { if (!dpu_enc) {
DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0); DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
return -EINVAL; return -EINVAL;
} }
......
...@@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config( ...@@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
to_dpu_encoder_phys_cmd(phys_enc); to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) { if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0); DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
return; return;
} }
...@@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper( ...@@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
u32 flush_mask = 0; u32 flush_mask = 0;
if (!phys_enc->hw_pp) { if (!phys_enc->hw_pp) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return; return;
} }
......
...@@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( ...@@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_hw_intf_cfg intf_cfg = { 0 }; struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc->hw_ctl->ops.setup_intf_cfg) { if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid encoder %d\n", phys_enc != 0); DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
return; return;
} }
...@@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) ...@@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
if (!phys_enc->hw_intf) { if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0); phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
return; return;
} }
......
...@@ -89,6 +89,16 @@ struct dpu_hw_intf { ...@@ -89,6 +89,16 @@ struct dpu_hw_intf {
struct dpu_hw_intf_ops ops; struct dpu_hw_intf_ops ops;
}; };
/**
* to_dpu_hw_intf - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_intf, base);
}
/** /**
* dpu_hw_intf_init(): Initializes the intf driver for the passed * dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx. * interface idx.
......
...@@ -96,6 +96,16 @@ struct dpu_hw_pingpong { ...@@ -96,6 +96,16 @@ struct dpu_hw_pingpong {
struct dpu_hw_pingpong_ops ops; struct dpu_hw_pingpong_ops ops;
}; };
/**
* to_dpu_hw_pingpong - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_pingpong, base);
}
/** /**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx. * pingpong idx.
......
...@@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) ...@@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{ {
struct dpu_debugfs_regset32 *regset = s->private; struct dpu_debugfs_regset32 *regset = s->private;
struct dpu_kms *dpu_kms = regset->dpu_kms; struct dpu_kms *dpu_kms = regset->dpu_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
void __iomem *base; void __iomem *base;
uint32_t i, addr; uint32_t i, addr;
if (!dpu_kms->mmio) if (!dpu_kms->mmio)
return 0; return 0;
dev = dpu_kms->dev;
priv = dev->dev_private;
base = dpu_kms->mmio + regset->offset; base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */ /* insert padding spaces, if needed */
...@@ -228,6 +224,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) ...@@ -228,6 +224,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
} }
#endif #endif
/* Global/shared object state funcs */
/*
* This is a helper that returns the private state currently in operation.
* Note that this would return the "old_state" if called in the atomic check
* path, and the "new_state" after the atomic swap has been done.
*/
struct dpu_global_state *
dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
{
return to_dpu_global_state(dpu_kms->global_state.state);
}
/*
* This acquires the modeset lock set aside for global state, creates
* a new duplicated private object state.
*/
struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_private_state *priv_state;
int ret;
ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
if (ret)
return ERR_PTR(ret);
priv_state = drm_atomic_get_private_obj_state(s,
&dpu_kms->global_state);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_dpu_global_state(priv_state);
}
static struct drm_private_state *
dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
{
struct dpu_global_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct dpu_global_state *dpu_state = to_dpu_global_state(state);
kfree(dpu_state);
}
static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
.atomic_duplicate_state = dpu_kms_global_duplicate_state,
.atomic_destroy_state = dpu_kms_global_destroy_state,
};
static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
{
struct dpu_global_state *state;
drm_modeset_lock_init(&dpu_kms->global_state_lock);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
&state->base,
&dpu_kms_global_state_funcs);
return 0;
}
static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{ {
return dpu_crtc_vblank(crtc, true); return dpu_crtc_vblank(crtc, true);
...@@ -267,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc) ...@@ -267,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
static void dpu_kms_prepare_commit(struct msm_kms *kms, static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
struct dpu_kms *dpu_kms;
struct drm_device *dev;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder; struct drm_encoder *encoder;
...@@ -276,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms, ...@@ -276,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
if (!kms) if (!kms)
return; return;
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
/* Call prepare_commit for all affected encoders */ /* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) { for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
...@@ -552,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate, ...@@ -552,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
{ {
struct drm_device *dev;
int i; int i;
dev = dpu_kms->dev;
if (dpu_kms->hw_intr) if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr); dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL; dpu_kms->hw_intr = NULL;
...@@ -760,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms) ...@@ -760,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
{ {
struct dpu_kms *dpu_kms; struct dpu_kms *dpu_kms;
struct drm_device *dev; struct drm_device *dev;
struct msm_drm_private *priv;
int i, rc = -EINVAL; int i, rc = -EINVAL;
if (!kms) { if (!kms) {
...@@ -770,7 +837,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms) ...@@ -770,7 +837,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms); dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev; dev = dpu_kms->dev;
priv = dev->dev_private;
rc = dpu_kms_global_obj_init(dpu_kms);
if (rc)
return rc;
atomic_set(&dpu_kms->bandwidth_ref, 0); atomic_set(&dpu_kms->bandwidth_ref, 0);
...@@ -1018,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) ...@@ -1018,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
int rc = -1; int rc = -1;
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp; struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc) if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc); DPU_ERROR("clock disable failed rc:%d\n", rc);
......
...@@ -111,6 +111,13 @@ struct dpu_kms { ...@@ -111,6 +111,13 @@ struct dpu_kms {
struct dpu_core_perf perf; struct dpu_core_perf perf;
/*
* Global private object state, Do not access directly, use
* dpu_kms_global_get_state()
*/
struct drm_modeset_lock global_state_lock;
struct drm_private_obj global_state;
struct dpu_rm rm; struct dpu_rm rm;
bool rm_init; bool rm_init;
...@@ -139,6 +146,25 @@ struct vsync_info { ...@@ -139,6 +146,25 @@ struct vsync_info {
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base) #define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
/* Global private object state for tracking resources that are shared across
* multiple kms objects (planes/crtcs/etc).
*/
struct dpu_global_state {
struct drm_private_state base;
uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
};
struct dpu_global_state
*dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms);
struct dpu_global_state
*__must_check dpu_kms_get_global_state(struct drm_atomic_state *s);
/** /**
* Debugfs functions - extra helper functions for debugfs support * Debugfs functions - extra helper functions for debugfs support
* *
......
...@@ -12,8 +12,12 @@ ...@@ -12,8 +12,12 @@
#include "dpu_encoder.h" #include "dpu_encoder.h"
#include "dpu_trace.h" #include "dpu_trace.h"
#define RESERVED_BY_OTHER(h, r) \
((h)->enc_id && (h)->enc_id != r) static inline bool reserved_by_other(uint32_t *res_map, int idx,
uint32_t enc_id)
{
return res_map[idx] && res_map[idx] != enc_id;
}
/** /**
* struct dpu_rm_requirements - Reservation requirements parameter bundle * struct dpu_rm_requirements - Reservation requirements parameter bundle
...@@ -25,171 +29,43 @@ struct dpu_rm_requirements { ...@@ -25,171 +29,43 @@ struct dpu_rm_requirements {
struct dpu_encoder_hw_resources hw_res; struct dpu_encoder_hw_resources hw_res;
}; };
int dpu_rm_destroy(struct dpu_rm *rm)
/**
* struct dpu_rm_hw_blk - hardware block tracking list member
* @list: List head for list of all hardware blocks tracking items
* @id: Hardware ID number, within it's own space, ie. LM_X
* @enc_id: Encoder id to which this blk is binded
* @hw: Pointer to the hardware register access object for this block
*/
struct dpu_rm_hw_blk {
struct list_head list;
uint32_t id;
uint32_t enc_id;
struct dpu_hw_blk *hw;
};
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
enum dpu_hw_blk_type type)
{ {
memset(iter, 0, sizeof(*iter)); int i;
iter->enc_id = enc_id;
iter->type = type;
}
static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
{ struct dpu_hw_pingpong *hw;
struct list_head *blk_list;
if (!rm || !i || i->type >= DPU_HW_BLK_MAX) { if (rm->pingpong_blks[i]) {
DPU_ERROR("invalid rm\n"); hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
return false; dpu_hw_pingpong_destroy(hw);
} }
i->hw = NULL;
blk_list = &rm->hw_blks[i->type];
if (i->blk && (&i->blk->list == blk_list)) {
DPU_DEBUG("attempt resume iteration past last\n");
return false;
} }
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
struct dpu_hw_mixer *hw;
i->blk = list_prepare_entry(i->blk, blk_list, list); if (rm->mixer_blks[i]) {
hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
list_for_each_entry_continue(i->blk, blk_list, list) { dpu_hw_lm_destroy(hw);
if (i->enc_id == i->blk->enc_id) {
i->hw = i->blk->hw;
DPU_DEBUG("found type %d id %d for enc %d\n",
i->type, i->blk->id, i->enc_id);
return true;
} }
} }
for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
struct dpu_hw_ctl *hw;
DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id); if (rm->ctl_blks[i]) {
hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
return false;
}
bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
{
bool ret;
mutex_lock(&rm->rm_lock);
ret = _dpu_rm_get_hw_locked(rm, i);
mutex_unlock(&rm->rm_lock);
return ret;
}
static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
{
switch (type) {
case DPU_HW_BLK_LM:
dpu_hw_lm_destroy(hw);
break;
case DPU_HW_BLK_CTL:
dpu_hw_ctl_destroy(hw); dpu_hw_ctl_destroy(hw);
break;
case DPU_HW_BLK_PINGPONG:
dpu_hw_pingpong_destroy(hw);
break;
case DPU_HW_BLK_INTF:
dpu_hw_intf_destroy(hw);
break;
case DPU_HW_BLK_SSPP:
/* SSPPs are not managed by the resource manager */
case DPU_HW_BLK_TOP:
/* Top is a singleton, not managed in hw_blks list */
case DPU_HW_BLK_MAX:
default:
DPU_ERROR("unsupported block type %d\n", type);
break;
} }
}
int dpu_rm_destroy(struct dpu_rm *rm)
{
struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
enum dpu_hw_blk_type type;
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
list) {
list_del(&hw_cur->list);
_dpu_rm_hw_destroy(type, hw_cur->hw);
kfree(hw_cur);
}
}
mutex_destroy(&rm->rm_lock);
return 0;
}
static int _dpu_rm_hw_blk_create(
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
void __iomem *mmio,
enum dpu_hw_blk_type type,
uint32_t id,
const void *hw_catalog_info)
{
struct dpu_rm_hw_blk *blk;
void *hw;
switch (type) {
case DPU_HW_BLK_LM:
hw = dpu_hw_lm_init(id, mmio, cat);
break;
case DPU_HW_BLK_CTL:
hw = dpu_hw_ctl_init(id, mmio, cat);
break;
case DPU_HW_BLK_PINGPONG:
hw = dpu_hw_pingpong_init(id, mmio, cat);
break;
case DPU_HW_BLK_INTF:
hw = dpu_hw_intf_init(id, mmio, cat);
break;
case DPU_HW_BLK_SSPP:
/* SSPPs are not managed by the resource manager */
case DPU_HW_BLK_TOP:
/* Top is a singleton, not managed in hw_blks list */
case DPU_HW_BLK_MAX:
default:
DPU_ERROR("unsupported block type %d\n", type);
return -EINVAL;
} }
for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) {
struct dpu_hw_intf *hw;
if (IS_ERR_OR_NULL(hw)) { if (rm->intf_blks[i]) {
DPU_ERROR("failed hw object creation: type %d, err %ld\n", hw = to_dpu_hw_intf(rm->intf_blks[i]);
type, PTR_ERR(hw)); dpu_hw_intf_destroy(hw);
return -EFAULT;
} }
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
if (!blk) {
_dpu_rm_hw_destroy(type, hw);
return -ENOMEM;
} }
blk->id = id;
blk->hw = hw;
blk->enc_id = 0;
list_add_tail(&blk->list, &rm->hw_blks[type]);
return 0; return 0;
} }
...@@ -198,7 +74,6 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -198,7 +74,6 @@ int dpu_rm_init(struct dpu_rm *rm,
void __iomem *mmio) void __iomem *mmio)
{ {
int rc, i; int rc, i;
enum dpu_hw_blk_type type;
if (!rm || !cat || !mmio) { if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n"); DPU_ERROR("invalid kms\n");
...@@ -208,13 +83,9 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -208,13 +83,9 @@ int dpu_rm_init(struct dpu_rm *rm,
/* Clear, setup lists */ /* Clear, setup lists */
memset(rm, 0, sizeof(*rm)); memset(rm, 0, sizeof(*rm));
mutex_init(&rm->rm_lock);
for (type = 0; type < DPU_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
/* Interrogate HW catalog and create tracking items for hw blocks */ /* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) { for (i = 0; i < cat->mixer_count; i++) {
struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i]; const struct dpu_lm_cfg *lm = &cat->mixer[i];
if (lm->pingpong == PINGPONG_MAX) { if (lm->pingpong == PINGPONG_MAX) {
...@@ -222,12 +93,17 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -222,12 +93,17 @@ int dpu_rm_init(struct dpu_rm *rm,
continue; continue;
} }
rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM, if (lm->id < LM_0 || lm->id >= LM_MAX) {
cat->mixer[i].id, &cat->mixer[i]); DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
if (rc) { continue;
DPU_ERROR("failed: lm hw not available\n"); }
hw = dpu_hw_lm_init(lm->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc);
goto fail; goto fail;
} }
rm->mixer_blks[lm->id - LM_0] = &hw->base;
if (!rm->lm_max_width) { if (!rm->lm_max_width) {
rm->lm_max_width = lm->sblk->maxwidth; rm->lm_max_width = lm->sblk->maxwidth;
...@@ -243,35 +119,59 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -243,35 +119,59 @@ int dpu_rm_init(struct dpu_rm *rm,
} }
for (i = 0; i < cat->pingpong_count; i++) { for (i = 0; i < cat->pingpong_count; i++) {
rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG, struct dpu_hw_pingpong *hw;
cat->pingpong[i].id, &cat->pingpong[i]); const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
if (rc) {
DPU_ERROR("failed: pp hw not available\n"); if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
continue;
}
hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed pingpong object creation: err %d\n",
rc);
goto fail; goto fail;
} }
rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
} }
for (i = 0; i < cat->intf_count; i++) { for (i = 0; i < cat->intf_count; i++) {
if (cat->intf[i].type == INTF_NONE) { struct dpu_hw_intf *hw;
const struct dpu_intf_cfg *intf = &cat->intf[i];
if (intf->type == INTF_NONE) {
DPU_DEBUG("skip intf %d with type none\n", i); DPU_DEBUG("skip intf %d with type none\n", i);
continue; continue;
} }
if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF, DPU_ERROR("skip intf %d with invalid id\n", intf->id);
cat->intf[i].id, &cat->intf[i]); continue;
if (rc) { }
DPU_ERROR("failed: intf hw not available\n"); hw = dpu_hw_intf_init(intf->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed intf object creation: err %d\n", rc);
goto fail; goto fail;
} }
rm->intf_blks[intf->id - INTF_0] = &hw->base;
} }
for (i = 0; i < cat->ctl_count; i++) { for (i = 0; i < cat->ctl_count; i++) {
rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL, struct dpu_hw_ctl *hw;
cat->ctl[i].id, &cat->ctl[i]); const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
if (rc) {
DPU_ERROR("failed: ctl hw not available\n"); if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
continue;
}
hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc);
goto fail; goto fail;
} }
rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
} }
return 0; return 0;
...@@ -279,7 +179,7 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -279,7 +179,7 @@ int dpu_rm_init(struct dpu_rm *rm,
fail: fail:
dpu_rm_destroy(rm); dpu_rm_destroy(rm);
return rc; return rc ? rc : -EFAULT;
} }
static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
...@@ -287,86 +187,82 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) ...@@ -287,86 +187,82 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
return top->num_intf > 1; return top->num_intf > 1;
} }
/**
* _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
* @rm: dpu resource manager handle
* @primary_idx: index of primary mixer in rm->mixer_blks[]
* @peer_idx: index of other mixer in rm->mixer_blks[]
* @Return: true if rm->mixer_blks[peer_idx] is a peer of
* rm->mixer_blks[primary_idx]
*/
static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
int peer_idx)
{
const struct dpu_lm_cfg *prim_lm_cfg;
const struct dpu_lm_cfg *peer_cfg;
prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
peer_cfg->id);
return false;
}
return true;
}
/** /**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like * proposed use case requirements, incl. hardwired dependent blocks like
* pingpong * pingpong
* @rm: dpu resource manager handle * @rm: dpu resource manager handle
* @enc_id: encoder id requesting for allocation * @enc_id: encoder id requesting for allocation
* @reqs: proposed use case requirements * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
* @lm: proposed layer mixer, function checks if lm, and all other hardwired * if lm, and all other hardwired blocks connected to the lm (pp) is
* blocks connected to the lm (pp) is available and appropriate * available and appropriate
* @pp: output parameter, pingpong block attached to the layer mixer. * @pp_idx: output parameter, index of pingpong block attached to the layer
* NULL if pp was not available, or not matching requirements. * mixer in rm->pongpong_blks[].
* @primary_lm: if non-null, this function check if lm is compatible primary_lm
* as well as satisfying all other requirements
* @Return: true if lm matches all requirements, false otherwise * @Return: true if lm matches all requirements, false otherwise
*/ */
static bool _dpu_rm_check_lm_and_get_connected_blks( static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_rm *rm, struct dpu_global_state *global_state,
uint32_t enc_id, uint32_t enc_id, int lm_idx, int *pp_idx)
struct dpu_rm_requirements *reqs,
struct dpu_rm_hw_blk *lm,
struct dpu_rm_hw_blk **pp,
struct dpu_rm_hw_blk *primary_lm)
{ {
const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap; const struct dpu_lm_cfg *lm_cfg;
struct dpu_rm_hw_iter iter; int idx;
*pp = NULL;
DPU_DEBUG("check lm %d pp %d\n",
lm_cfg->id, lm_cfg->pingpong);
/* Check if this layer mixer is a peer of the proposed primary LM */
if (primary_lm) {
const struct dpu_lm_cfg *prim_lm_cfg =
to_dpu_hw_mixer(primary_lm->hw)->cap;
if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
prim_lm_cfg->id);
return false;
}
}
/* Already reserved? */ /* Already reserved? */
if (RESERVED_BY_OTHER(lm, enc_id)) { if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_cfg->id); DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false; return false;
} }
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG); lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
while (_dpu_rm_get_hw_locked(rm, &iter)) { idx = lm_cfg->pingpong - PINGPONG_0;
if (iter.blk->id == lm_cfg->pingpong) { if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
*pp = iter.blk;
break;
}
}
if (!*pp) {
DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
return false; return false;
} }
if (RESERVED_BY_OTHER(*pp, enc_id)) { if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm->id, DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
(*pp)->id); lm_cfg->pingpong);
return false; return false;
} }
*pp_idx = idx;
return true; return true;
} }
static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
struct dpu_rm_requirements *reqs) struct dpu_rm_requirements *reqs)
{ {
struct dpu_rm_hw_blk *lm[MAX_BLOCKS]; int lm_idx[MAX_BLOCKS];
struct dpu_rm_hw_blk *pp[MAX_BLOCKS]; int pp_idx[MAX_BLOCKS];
struct dpu_rm_hw_iter iter_i, iter_j; int i, j, lm_count = 0;
int lm_count = 0;
int i, rc = 0;
if (!reqs->topology.num_lm) { if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
...@@ -374,36 +270,40 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, ...@@ -374,36 +270,40 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
} }
/* Find a primary mixer */ /* Find a primary mixer */
dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM); for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
while (lm_count != reqs->topology.num_lm && lm_count < reqs->topology.num_lm; i++) {
_dpu_rm_get_hw_locked(rm, &iter_i)) { if (!rm->mixer_blks[i])
memset(&lm, 0, sizeof(lm)); continue;
memset(&pp, 0, sizeof(pp));
lm_count = 0; lm_count = 0;
lm[lm_count] = iter_i.blk; lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks( if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
rm, enc_id, reqs, lm[lm_count], enc_id, i, &pp_idx[lm_count])) {
&pp[lm_count], NULL))
continue; continue;
}
++lm_count; ++lm_count;
/* Valid primary mixer found, find matching peers */ /* Valid primary mixer found, find matching peers */
dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM); for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
lm_count < reqs->topology.num_lm; j++) {
if (!rm->mixer_blks[j])
continue;
while (lm_count != reqs->topology.num_lm && if (!_dpu_rm_check_lm_peer(rm, i, j)) {
_dpu_rm_get_hw_locked(rm, &iter_j)) { DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
if (iter_i.blk == iter_j.blk) LM_0 + i);
continue; continue;
}
if (!_dpu_rm_check_lm_and_get_connected_blks( if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
rm, enc_id, reqs, iter_j.blk, global_state, enc_id, j,
&pp[lm_count], iter_i.blk)) &pp_idx[lm_count])) {
continue; continue;
}
lm[lm_count] = iter_j.blk; lm_idx[lm_count] = j;
++lm_count; ++lm_count;
} }
} }
...@@ -413,65 +313,65 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, ...@@ -413,65 +313,65 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
return -ENAVAIL; return -ENAVAIL;
} }
for (i = 0; i < ARRAY_SIZE(lm); i++) { for (i = 0; i < lm_count; i++) {
if (!lm[i]) global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
break; global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
lm[i]->enc_id = enc_id;
pp[i]->enc_id = enc_id;
trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id); trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
} }
return rc; return 0;
} }
static int _dpu_rm_reserve_ctls( static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id, uint32_t enc_id,
const struct msm_display_topology *top) const struct msm_display_topology *top)
{ {
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; int ctl_idx[MAX_BLOCKS];
struct dpu_rm_hw_iter iter; int i = 0, j, num_ctls;
int i = 0, num_ctls = 0; bool needs_split_display;
bool needs_split_display = false;
memset(&ctls, 0, sizeof(ctls));
/* each hw_intf needs its own hw_ctrl to program its control path */ /* each hw_intf needs its own hw_ctrl to program its control path */
num_ctls = top->num_intf; num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top); needs_split_display = _dpu_rm_needs_split_display(top);
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL); for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
while (_dpu_rm_get_hw_locked(rm, &iter)) { const struct dpu_hw_ctl *ctl;
const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw); unsigned long features;
unsigned long features = ctl->caps->features;
bool has_split_display; bool has_split_display;
if (RESERVED_BY_OTHER(iter.blk, enc_id)) if (!rm->ctl_blks[j])
continue;
if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
continue; continue;
ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
features = ctl->caps->features;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features); DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
if (needs_split_display != has_split_display) if (needs_split_display != has_split_display)
continue; continue;
ctls[i] = iter.blk; ctl_idx[i] = j;
DPU_DEBUG("ctl %d match\n", iter.blk->id); DPU_DEBUG("ctl %d match\n", j + CTL_0);
if (++i == num_ctls) if (++i == num_ctls)
break; break;
} }
if (i != num_ctls) if (i != num_ctls)
return -ENAVAIL; return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) { for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
ctls[i]->enc_id = enc_id; global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id); trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
} }
return 0; return 0;
...@@ -479,40 +379,34 @@ static int _dpu_rm_reserve_ctls( ...@@ -479,40 +379,34 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_reserve_intf( static int _dpu_rm_reserve_intf(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id, uint32_t enc_id,
uint32_t id, uint32_t id)
enum dpu_hw_blk_type type)
{ {
struct dpu_rm_hw_iter iter; int idx = id - INTF_0;
int ret = 0;
/* Find the block entry in the rm, and note the reservation */
dpu_rm_init_hw_iter(&iter, 0, type);
while (_dpu_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id != id)
continue;
if (RESERVED_BY_OTHER(iter.blk, enc_id)) { if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
DPU_ERROR("type %d id %d already reserved\n", type, id); DPU_ERROR("invalid intf id: %d", id);
return -ENAVAIL; return -EINVAL;
} }
iter.blk->enc_id = enc_id; if (!rm->intf_blks[idx]) {
trace_dpu_rm_reserve_intf(iter.blk->id, enc_id); DPU_ERROR("couldn't find intf id %d\n", id);
break; return -EINVAL;
} }
/* Shouldn't happen since intfs are fixed at probe */ if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
if (!iter.hw) { DPU_ERROR("intf id %d already reserved\n", id);
DPU_ERROR("couldn't find type %d id %d\n", type, id); return -ENAVAIL;
return -EINVAL;
} }
return ret; global_state->intf_to_enc_id[idx] = enc_id;
return 0;
} }
static int _dpu_rm_reserve_intf_related_hw( static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id, uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res) struct dpu_encoder_hw_resources *hw_res)
{ {
...@@ -523,8 +417,7 @@ static int _dpu_rm_reserve_intf_related_hw( ...@@ -523,8 +417,7 @@ static int _dpu_rm_reserve_intf_related_hw(
if (hw_res->intfs[i] == INTF_MODE_NONE) if (hw_res->intfs[i] == INTF_MODE_NONE)
continue; continue;
id = i + INTF_0; id = i + INTF_0;
ret = _dpu_rm_reserve_intf(rm, enc_id, id, ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
DPU_HW_BLK_INTF);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -534,25 +427,27 @@ static int _dpu_rm_reserve_intf_related_hw( ...@@ -534,25 +427,27 @@ static int _dpu_rm_reserve_intf_related_hw(
static int _dpu_rm_make_reservation( static int _dpu_rm_make_reservation(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc, struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs) struct dpu_rm_requirements *reqs)
{ {
int ret; int ret;
ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs); ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
if (ret) { if (ret) {
DPU_ERROR("unable to find appropriate mixers\n"); DPU_ERROR("unable to find appropriate mixers\n");
return ret; return ret;
} }
ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology); ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
&reqs->topology);
if (ret) { if (ret) {
DPU_ERROR("unable to find appropriate CTL\n"); DPU_ERROR("unable to find appropriate CTL\n");
return ret; return ret;
} }
ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res); ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
&reqs->hw_res);
if (ret) if (ret)
return ret; return ret;
...@@ -560,9 +455,7 @@ static int _dpu_rm_make_reservation( ...@@ -560,9 +455,7 @@ static int _dpu_rm_make_reservation(
} }
static int _dpu_rm_populate_requirements( static int _dpu_rm_populate_requirements(
struct dpu_rm *rm,
struct drm_encoder *enc, struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs, struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology) struct msm_display_topology req_topology)
{ {
...@@ -577,37 +470,36 @@ static int _dpu_rm_populate_requirements( ...@@ -577,37 +470,36 @@ static int _dpu_rm_populate_requirements(
return 0; return 0;
} }
static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id) static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
uint32_t enc_id)
{ {
struct dpu_rm_hw_blk *blk; int i;
enum dpu_hw_blk_type type;
for (type = 0; type < DPU_HW_BLK_MAX; type++) { for (i = 0; i < cnt; i++) {
list_for_each_entry(blk, &rm->hw_blks[type], list) { if (res_mapping[i] == enc_id)
if (blk->enc_id == enc_id) { res_mapping[i] = 0;
blk->enc_id = 0;
DPU_DEBUG("rel enc %d %d %d\n", enc_id,
type, blk->id);
}
}
} }
} }
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc)
{ {
mutex_lock(&rm->rm_lock); _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
_dpu_rm_release_reservation(rm, enc->base.id); _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
mutex_unlock(&rm->rm_lock); _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->intf_to_enc_id,
ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
} }
int dpu_rm_reserve( int dpu_rm_reserve(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc, struct drm_encoder *enc,
struct drm_crtc_state *crtc_state, struct drm_crtc_state *crtc_state,
struct msm_display_topology topology, struct msm_display_topology topology)
bool test_only)
{ {
struct dpu_rm_requirements reqs; struct dpu_rm_requirements reqs;
int ret; int ret;
...@@ -616,31 +508,75 @@ int dpu_rm_reserve( ...@@ -616,31 +508,75 @@ int dpu_rm_reserve(
if (!drm_atomic_crtc_needs_modeset(crtc_state)) if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0; return 0;
DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n", if (IS_ERR(global_state)) {
enc->base.id, crtc_state->crtc->base.id, test_only); DPU_ERROR("failed to global state\n");
return PTR_ERR(global_state);
}
mutex_lock(&rm->rm_lock); DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
enc->base.id, crtc_state->crtc->base.id);
ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs, ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
topology);
if (ret) { if (ret) {
DPU_ERROR("failed to populate hw requirements\n"); DPU_ERROR("failed to populate hw requirements\n");
goto end; return ret;
} }
ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs); ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
if (ret) { if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret); DPU_ERROR("failed to reserve hw resources: %d\n", ret);
_dpu_rm_release_reservation(rm, enc->base.id);
} else if (test_only) {
/* test_only: test the reservation and then undo */
DPU_DEBUG("test_only: discard test [enc: %d]\n",
enc->base.id);
_dpu_rm_release_reservation(rm, enc->base.id);
}
end:
mutex_unlock(&rm->rm_lock);
return ret; return ret;
} }
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
{
struct dpu_hw_blk **hw_blks;
uint32_t *hw_to_enc_id;
int i, num_blks, max_blks;
switch (type) {
case DPU_HW_BLK_PINGPONG:
hw_blks = rm->pingpong_blks;
hw_to_enc_id = global_state->pingpong_to_enc_id;
max_blks = ARRAY_SIZE(rm->pingpong_blks);
break;
case DPU_HW_BLK_LM:
hw_blks = rm->mixer_blks;
hw_to_enc_id = global_state->mixer_to_enc_id;
max_blks = ARRAY_SIZE(rm->mixer_blks);
break;
case DPU_HW_BLK_CTL:
hw_blks = rm->ctl_blks;
hw_to_enc_id = global_state->ctl_to_enc_id;
max_blks = ARRAY_SIZE(rm->ctl_blks);
break;
case DPU_HW_BLK_INTF:
hw_blks = rm->intf_blks;
hw_to_enc_id = global_state->intf_to_enc_id;
max_blks = ARRAY_SIZE(rm->intf_blks);
break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
}
num_blks = 0;
for (i = 0; i < max_blks; i++) {
if (hw_to_enc_id[i] != enc_id)
continue;
if (num_blks == blks_size) {
DPU_ERROR("More than %d resources assigned to enc %d\n",
blks_size, enc_id);
break;
}
blks[num_blks++] = hw_blks[i];
}
return num_blks;
}
...@@ -11,37 +11,24 @@ ...@@ -11,37 +11,24 @@
#include "msm_kms.h" #include "msm_kms.h"
#include "dpu_hw_top.h" #include "dpu_hw_top.h"
struct dpu_global_state;
/** /**
* struct dpu_rm - DPU dynamic hardware resource manager * struct dpu_rm - DPU dynamic hardware resource manager
* @hw_blks: array of lists of hardware resources present in the system, one * @pingpong_blks: array of pingpong hardware resources
* list per type of hardware block * @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources
* @intf_blks: array of intf hardware resources
* @lm_max_width: cached layer mixer maximum width * @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex * @rm_lock: resource manager mutex
*/ */
struct dpu_rm { struct dpu_rm {
struct list_head hw_blks[DPU_HW_BLK_MAX]; struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
uint32_t lm_max_width; struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct mutex rm_lock; struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
}; struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
/** uint32_t lm_max_width;
* struct dpu_rm_hw_blk - resource manager internal structure
* forward declaration for single iterator definition without void pointer
*/
struct dpu_rm_hw_blk;
/**
* struct dpu_rm_hw_iter - iterator for use with dpu_rm
* @hw: dpu_hw object requested, or NULL on failure
* @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
* @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
* @type: Hardware Block Type client wishes to search for.
*/
struct dpu_rm_hw_iter {
void *hw;
struct dpu_rm_hw_blk *blk;
uint32_t enc_id;
enum dpu_hw_blk_type type;
}; };
/** /**
...@@ -74,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm); ...@@ -74,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm);
* @drm_enc: DRM Encoder handle * @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle * @crtc_state: Proposed Atomic DRM CRTC State handle
* @topology: Pointer to topology info for the display * @topology: Pointer to topology info for the display
* @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR * @Return: 0 on Success otherwise -ERROR
*/ */
int dpu_rm_reserve(struct dpu_rm *rm, int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *drm_enc, struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state, struct drm_crtc_state *crtc_state,
struct msm_display_topology topology, struct msm_display_topology topology);
bool test_only);
/** /**
* dpu_rm_reserve - Given the encoder for the display chain, release any * dpu_rm_reserve - Given the encoder for the display chain, release any
...@@ -90,31 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm, ...@@ -90,31 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm,
* @enc: DRM Encoder handle * @enc: DRM Encoder handle
* @Return: 0 on Success otherwise -ERROR * @Return: 0 on Success otherwise -ERROR
*/ */
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc); void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc);
/** /**
* dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list * Get hw resources of the given type that are assigned to this encoder.
* using dpu_rm_get_hw
* @iter: iter object to initialize
* @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
* @type: Hardware Block Type client wishes to search for.
*/
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
enum dpu_hw_blk_type type);
/**
* dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
* Meant to do a single pass through the hardware list to iteratively
* retrieve hardware blocks of a given type for a given encoder.
* Initialize an iterator object.
* Set hw block type of interest. Set encoder id of interest, 0 for any.
* Function returns first hw of type for that encoder.
* Subsequent calls will return the next reserved hw of that type in-order.
* Iterator HW pointer will be null on failure to find hw.
* @rm: DPU Resource Manager handle
* @iter: iterator object
* @Return: true on match found, false on no match found
*/ */
bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter); int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
#endif /* __DPU_RM_H__ */ #endif /* __DPU_RM_H__ */
...@@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id) ...@@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
int rc; int rc;
if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) { if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
DPU_ERROR("invalid arguments vbif %d\n", vbif != 0); DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL; return -EINVAL;
} }
...@@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif, ...@@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
u32 val; u32 val;
if (!vbif || !vbif->cap) { if (!vbif || !vbif->cap) {
DPU_ERROR("invalid arguments vbif %d\n", vbif != 0); DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL; return -EINVAL;
} }
...@@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, ...@@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (!vbif || !mdp) { if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n", DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
vbif != 0, mdp != 0); vbif != NULL, mdp != NULL);
return; return;
} }
......
...@@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, ...@@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail; goto fail;
} }
ret = drm_bridge_attach(encoder, edp->bridge, NULL, 0);
if (ret)
goto fail;
priv->bridges[priv->num_bridges++] = edp->bridge; priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector; priv->connectors[priv->num_connectors++] = edp->connector;
......
...@@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, ...@@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail; goto fail;
} }
ret = drm_bridge_attach(encoder, hdmi->bridge, NULL, 0);
if (ret)
goto fail;
priv->bridges[priv->num_bridges++] = hdmi->bridge; priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector; priv->connectors[priv->num_connectors++] = hdmi->connector;
......
...@@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) ...@@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (!dev->dma_parms) { if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL); GFP_KERNEL);
if (!dev->dma_parms) if (!dev->dma_parms) {
return -ENOMEM; ret = -ENOMEM;
goto err_msm_uninit;
}
} }
dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
......
...@@ -157,7 +157,17 @@ struct msm_gem_submit { ...@@ -157,7 +157,17 @@ struct msm_gem_submit {
uint32_t handle; uint32_t handle;
}; };
uint64_t iova; uint64_t iova;
} bos[0]; } bos[];
}; };
/* helper to determine of a buffer in submit should be dumped, used for both
* devcoredump and debugfs cmdstream dumping:
*/
static inline bool
should_dump(struct msm_gem_submit *submit, int idx)
{
extern bool rd_full;
return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
}
#endif /* __MSM_GEM_H__ */ #endif /* __MSM_GEM_H__ */
...@@ -355,18 +355,36 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, ...@@ -355,18 +355,36 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
state->cmd = kstrdup(cmd, GFP_KERNEL); state->cmd = kstrdup(cmd, GFP_KERNEL);
if (submit) { if (submit) {
int i; int i, nr = 0;
state->bos = kcalloc(submit->nr_cmds, /* count # of buffers to dump: */
for (i = 0; i < submit->nr_bos; i++)
if (should_dump(submit, i))
nr++;
/* always dump cmd bo's, but don't double count them: */
for (i = 0; i < submit->nr_cmds; i++)
if (!should_dump(submit, submit->cmd[i].idx))
nr++;
state->bos = kcalloc(nr,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL); sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
for (i = 0; i < submit->nr_bos; i++) {
if (should_dump(submit, i)) {
msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
submit->bos[i].iova, submit->bos[i].flags);
}
}
for (i = 0; state->bos && i < submit->nr_cmds; i++) { for (i = 0; state->bos && i < submit->nr_cmds; i++) {
int idx = submit->cmd[i].idx; int idx = submit->cmd[i].idx;
if (!should_dump(submit, submit->cmd[i].idx)) {
msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
submit->bos[idx].iova, submit->bos[idx].flags); submit->bos[idx].iova, submit->bos[idx].flags);
} }
} }
}
/* Set the active crash state to be dumped on failure */ /* Set the active crash state to be dumped on failure */
gpu->crashstate = state; gpu->crashstate = state;
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#include "msm_gpu.h" #include "msm_gpu.h"
#include "msm_gem.h" #include "msm_gem.h"
static bool rd_full = false; bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents"); MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600); module_param_named(rd_full, rd_full, bool, 0600);
...@@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd, ...@@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
msm_gem_put_vaddr(&obj->base); msm_gem_put_vaddr(&obj->base);
} }
static bool
should_dump(struct msm_gem_submit *submit, int idx)
{
return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
}
/* called under struct_mutex */ /* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...) const char *fmt, ...)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment