Commit 17bf3df9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2023-07-28' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular scheduled fixes, msm and amdgpu leading the way, with some
  i915 and a single misc fbdev, all seems fine.

  fbdev:
   - remove unused function

  amdgpu:
   - gfxhub partition fix
   - Fix error handling in psp_sw_init()
   - SMU13 fix
   - DCN 3.1 fix
   - DCN 3.2 fix
   - Fix for display PHY programming sequence
   - DP MST error handling fix
   - GFX 9.4.3 fix

  amdkfd:
   - GFX11 trap handling fix

  i915:
   - Use shmem for dpt objects
   - Fix an error handling path in igt_write_huge()

  msm:
   - display:
      - Fix to correct the UBWC programming for decoder version 4.3 seen
        on SM8550
      - Add the missing flush and fetch bits for DMA4 and DMA5 SSPPs.
      - Fix to drop the unused dpu_core_perf_data_bus_id enum from the
        code
      - Drop the unused dsi_phy_14nm_17mA_regulators from QCM 2290 DSI
        cfg.
   - gpu:
      - Fix warn splat for newer devices without revn
      - Remove name/revn for a690.. we shouldn't be populating these for
        newer devices, for consistency, but it slipped through review
      - Fix a6xx gpu snapshot BINDLESS_DATA size (was listed in bytes
        instead of dwords, causing AHB faults on a6xx gen4/a660-family)
      - Disallow submit with fence id 0"

* tag 'drm-fixes-2023-07-28' of git://anongit.freedesktop.org/drm/drm: (22 commits)
  drm/msm: Disallow submit with fence id 0
  drm/amdgpu: Restore HQD persistent state register
  drm/amd/display: Unlock on error path in dm_handle_mst_sideband_msg_ready_event()
  drm/amd/display: Exit idle optimizations before attempt to access PHY
  drm/amd/display: Don't apply FIFO resync W/A if rdivider = 0
  drm/amd/display: Guard DCN31 PHYD32CLK logic against chip family
  drm/amd/smu: use AverageGfxclkFrequency* to replace previous GFX Curr Clock
  drm/amd: Fix an error handling mistake in psp_sw_init()
  drm/amdgpu: Fix infinite loop in gfxhub_v1_2_xcc_gart_enable (v2)
  drm/amdkfd: fix trap handling work around for debugging
  drm/fb-helper: Remove unused inline function drm_fb_helper_defio_init()
  drm/i915: Fix an error handling path in igt_write_huge()
  drm/i915/dpt: Use shmem for dpt objects
  drm/msm: Fix hw_fence error path cleanup
  drm/msm: Fix IS_ERR_OR_NULL() vs NULL check in a5xx_submit_in_rb()
  drm/msm/adreno: Fix snapshot BINDLESS_DATA size
  drm/msm/a690: Remove revn and name
  drm/msm/adreno: Fix warn splat for devices without revn
  drm/msm/dsi: Drop unused regulators from QCM2290 14nm DSI PHY config
  drm/msm/dpu: drop enum dpu_core_perf_data_bus_id
  ...
parents f24767ca 9a767faa
...@@ -498,11 +498,11 @@ static int psp_sw_init(void *handle) ...@@ -498,11 +498,11 @@ static int psp_sw_init(void *handle)
return 0; return 0;
failed2: failed2:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fence_buf_bo, amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf); &psp->fence_buf_mc_addr, &psp->fence_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
return ret; return ret;
} }
......
...@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); ...@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
struct amdgpu_gfx_ras gfx_v9_4_3_ras; struct amdgpu_gfx_ras gfx_v9_4_3_ras;
...@@ -1736,7 +1737,7 @@ static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, ...@@ -1736,7 +1737,7 @@ static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
......
...@@ -402,18 +402,15 @@ static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev, ...@@ -402,18 +402,15 @@ static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev, static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
uint32_t xcc_mask) uint32_t xcc_mask)
{ {
uint32_t tmp_mask;
int i; int i;
tmp_mask = xcc_mask;
/* /*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are
* VF copy registers so vbios post doesn't program them, for * VF copy registers so vbios post doesn't program them, for
* SRIOV driver need to program them * SRIOV driver need to program them
*/ */
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
for_each_inst(i, tmp_mask) { for_each_inst(i, xcc_mask) {
i = ffs(tmp_mask) - 1;
WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE,
adev->gmc.vram_start >> 24); adev->gmc.vram_start >> 24);
WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP,
......
...@@ -302,8 +302,7 @@ static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) ...@@ -302,8 +302,7 @@ static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
if (!q) if (!q)
return 0; return 0;
if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || if (!kfd_dbg_has_cwsr_workaround(q->device))
KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0))
return 0; return 0;
if (enable && q->properties.is_user_cu_masked) if (enable && q->properties.is_user_cu_masked)
...@@ -349,7 +348,7 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) ...@@ -349,7 +348,7 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd)
{ {
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags; uint32_t flags = pdd->process->dbg_flags;
bool sq_trap_en = !!spi_dbg_cntl; bool sq_trap_en = !!spi_dbg_cntl || !kfd_dbg_has_cwsr_workaround(pdd->dev);
if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0; return 0;
......
...@@ -100,6 +100,12 @@ static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev) ...@@ -100,6 +100,12 @@ static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev)
KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1)); KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1));
} }
static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev)
{
return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3);
}
static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev) static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
{ {
if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1) if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1)
......
...@@ -226,8 +226,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, ...@@ -226,8 +226,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.paging = false; queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr; queue_input.tba_addr = qpd->tba_addr;
queue_input.tma_addr = qpd->tma_addr; queue_input.tma_addr = qpd->tma_addr;
queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
KFD_GC_VERSION(q->device) > IP_VERSION(11, 0, 3);
queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled;
queue_type = convert_to_mes_queue_type(q->properties.type); queue_type = convert_to_mes_queue_type(q->properties.type);
...@@ -1806,8 +1805,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -1806,8 +1805,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
*/ */
q->properties.is_evicted = !!qpd->evicted; q->properties.is_evicted = !!qpd->evicted;
q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) && kfd_dbg_has_cwsr_workaround(q->device);
KFD_GC_VERSION(q->device) <= IP_VERSION(11, 0, 3);
if (qd) if (qd)
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
......
...@@ -706,7 +706,7 @@ void dm_handle_mst_sideband_msg_ready_event( ...@@ -706,7 +706,7 @@ void dm_handle_mst_sideband_msg_ready_event(
if (retry == 3) { if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n"); DRM_ERROR("Failed to ack MST event.\n");
return; break;
} }
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
......
...@@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) ...@@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false); hws->funcs.edp_backlight_control(edp_link_with_sink, false);
} }
/*resume from S3, no vbios posting, no need to power down again*/ /*resume from S3, no vbios posting, no need to power down again*/
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
power_down_all_hw_blocks(dc); power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc); disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on) if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false); dc->hwss.edp_power_control(edp_link_with_sink, false);
clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
} }
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
} }
......
...@@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk( ...@@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
struct dcn_dccg *dccg_dcn, struct dcn_dccg *dccg_dcn,
enum phyd32clk_clock_source src) enum phyd32clk_clock_source src)
{ {
if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (src == PHYD32CLKC) if (src == PHYD32CLKC)
src = PHYD32CLKF; src = PHYD32CLKF;
if (src == PHYD32CLKD) if (src == PHYD32CLKD)
......
...@@ -49,7 +49,10 @@ static void dccg32_trigger_dio_fifo_resync( ...@@ -49,7 +49,10 @@ static void dccg32_trigger_dio_fifo_resync(
uint32_t dispclk_rdivider_value = 0; uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
/* Not valid for the WDIVIDER to be set to 0 */
if (dispclk_rdivider_value != 0)
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
} }
static void dccg32_get_pixel_rate_div( static void dccg32_get_pixel_rate_div(
......
...@@ -1734,7 +1734,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, ...@@ -1734,7 +1734,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
......
...@@ -166,6 +166,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) ...@@ -166,6 +166,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
i915_vma_get(vma); i915_vma_get(vma);
} }
dpt->obj->mm.dirty = true;
atomic_dec(&i915->gpu_error.pending_fb_pin); atomic_dec(&i915->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(&i915->runtime_pm, wakeref);
...@@ -261,7 +263,7 @@ intel_dpt_create(struct intel_framebuffer *fb) ...@@ -261,7 +263,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
dpt_obj = i915_gem_object_create_stolen(i915, size); dpt_obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) { if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n"); drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
dpt_obj = i915_gem_object_create_internal(i915, size); dpt_obj = i915_gem_object_create_shmem(i915, size);
} }
if (IS_ERR(dpt_obj)) if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj); return ERR_CAST(dpt_obj);
......
...@@ -1246,8 +1246,10 @@ static int igt_write_huge(struct drm_i915_private *i915, ...@@ -1246,8 +1246,10 @@ static int igt_write_huge(struct drm_i915_private *i915,
* times in succession a possibility by enlarging the permutation array. * times in succession a possibility by enlarging the permutation array.
*/ */
order = i915_random_order(count * count, &prng); order = i915_random_order(count * count, &prng);
if (!order) if (!order) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size); max = div_u64(max - size, max_page_size);
......
...@@ -89,7 +89,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit ...@@ -89,7 +89,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
* since we've already mapped it once in * since we've already mapped it once in
* submit_reloc() * submit_reloc()
*/ */
if (WARN_ON(!ptr)) if (WARN_ON(IS_ERR_OR_NULL(ptr)))
return; return;
for (i = 0; i < dwords; i++) { for (i = 0; i < dwords; i++) {
......
...@@ -206,7 +206,7 @@ static const struct a6xx_shader_block { ...@@ -206,7 +206,7 @@ static const struct a6xx_shader_block {
SHADER(A6XX_SP_LB_3_DATA, 0x800), SHADER(A6XX_SP_LB_3_DATA, 0x800),
SHADER(A6XX_SP_LB_4_DATA, 0x800), SHADER(A6XX_SP_LB_4_DATA, 0x800),
SHADER(A6XX_SP_LB_5_DATA, 0x200), SHADER(A6XX_SP_LB_5_DATA, 0x200),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000), SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280), SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
SHADER(A6XX_SP_UAV_DATA, 0x80), SHADER(A6XX_SP_UAV_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80), SHADER(A6XX_SP_INST_TAG, 0x80),
......
...@@ -369,8 +369,6 @@ static const struct adreno_info gpulist[] = { ...@@ -369,8 +369,6 @@ static const struct adreno_info gpulist[] = {
.hwcg = a640_hwcg, .hwcg = a640_hwcg,
}, { }, {
.rev = ADRENO_REV(6, 9, 0, ANY_ID), .rev = ADRENO_REV(6, 9, 0, ANY_ID),
.revn = 690,
.name = "A690",
.fw = { .fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw", [ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a690_gmu.bin", [ADRENO_FW_GMU] = "a690_gmu.bin",
......
...@@ -149,7 +149,8 @@ bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2); ...@@ -149,7 +149,8 @@ bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn)
{ {
WARN_ON_ONCE(!gpu->revn); /* revn can be zero, but if not is set at same time as info */
WARN_ON_ONCE(!gpu->info);
return gpu->revn == revn; return gpu->revn == revn;
} }
...@@ -161,14 +162,16 @@ static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu) ...@@ -161,14 +162,16 @@ static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu)
static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu) static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu)
{ {
WARN_ON_ONCE(!gpu->revn); /* revn can be zero, but if not is set at same time as info */
WARN_ON_ONCE(!gpu->info);
return (gpu->revn < 300); return (gpu->revn < 300);
} }
static inline bool adreno_is_a20x(const struct adreno_gpu *gpu) static inline bool adreno_is_a20x(const struct adreno_gpu *gpu)
{ {
WARN_ON_ONCE(!gpu->revn); /* revn can be zero, but if not is set at same time as info */
WARN_ON_ONCE(!gpu->info);
return (gpu->revn < 210); return (gpu->revn < 210);
} }
...@@ -307,7 +310,8 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu) ...@@ -307,7 +310,8 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu)
static inline int adreno_is_a690(const struct adreno_gpu *gpu) static inline int adreno_is_a690(const struct adreno_gpu *gpu)
{ {
return adreno_is_revn(gpu, 690); /* The order of args is important here to handle ANY_ID correctly */
return adreno_cmp_rev(ADRENO_REV(6, 9, 0, ANY_ID), gpu->rev);
}; };
/* check for a615, a616, a618, a619 or any derivatives */ /* check for a615, a616, a618, a619 or any derivatives */
......
...@@ -14,19 +14,6 @@ ...@@ -14,19 +14,6 @@
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000 #define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
* enum dpu_core_perf_data_bus_id - data bus identifier
* @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
*/
enum dpu_core_perf_data_bus_id {
DPU_CORE_PERF_DATA_BUS_ID_MNOC,
DPU_CORE_PERF_DATA_BUS_ID_LLCC,
DPU_CORE_PERF_DATA_BUS_ID_EBI,
DPU_CORE_PERF_DATA_BUS_ID_MAX,
};
/** /**
* struct dpu_core_perf_params - definition of performance parameters * struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request * @max_per_pipe_ib: maximum instantaneous bandwidth request
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19, static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT}; 1, 2, 3, 4, 5};
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count, static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm) enum dpu_lm lm)
...@@ -198,6 +198,12 @@ static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx, ...@@ -198,6 +198,12 @@ static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
case SSPP_DMA3: case SSPP_DMA3:
ctx->pending_flush_mask |= BIT(25); ctx->pending_flush_mask |= BIT(25);
break; break;
case SSPP_DMA4:
ctx->pending_flush_mask |= BIT(13);
break;
case SSPP_DMA5:
ctx->pending_flush_mask |= BIT(14);
break;
case SSPP_CURSOR0: case SSPP_CURSOR0:
ctx->pending_flush_mask |= BIT(22); ctx->pending_flush_mask |= BIT(22);
break; break;
......
...@@ -1087,8 +1087,6 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = { ...@@ -1087,8 +1087,6 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = {
.has_phy_lane = true, .has_phy_lane = true,
.regulator_data = dsi_phy_14nm_17mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = { .ops = {
.enable = dsi_14nm_phy_enable, .enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable, .disable = dsi_14nm_phy_disable,
......
...@@ -191,6 +191,12 @@ msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx) ...@@ -191,6 +191,12 @@ msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx)
f->fctx = fctx; f->fctx = fctx;
/*
* Until this point, the fence was just some pre-allocated memory,
* no-one should have taken a reference to it yet.
*/
WARN_ON(kref_read(&fence->refcount));
dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
fctx->context, ++fctx->last_fence); fctx->context, ++fctx->last_fence);
} }
...@@ -86,7 +86,19 @@ void __msm_gem_submit_destroy(struct kref *kref) ...@@ -86,7 +86,19 @@ void __msm_gem_submit_destroy(struct kref *kref)
} }
dma_fence_put(submit->user_fence); dma_fence_put(submit->user_fence);
dma_fence_put(submit->hw_fence);
/*
* If the submit is freed before msm_job_run(), then hw_fence is
* just some pre-allocated memory, not a reference counted fence.
* Once the job runs and the hw_fence is initialized, it will
* have a refcount of at least one, since the submit holds a ref
* to the hw_fence.
*/
if (kref_read(&submit->hw_fence->refcount) == 0) {
kfree(submit->hw_fence);
} else {
dma_fence_put(submit->hw_fence);
}
put_pid(submit->pid); put_pid(submit->pid);
msm_submitqueue_put(submit->queue); msm_submitqueue_put(submit->queue);
...@@ -889,7 +901,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -889,7 +901,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* after the job is armed * after the job is armed
*/ */
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) && if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
idr_find(&queue->fence_idr, args->fence)) { (!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock); spin_unlock(&queue->idr_lock);
idr_preload_end(); idr_preload_end();
ret = -EINVAL; ret = -EINVAL;
......
...@@ -189,6 +189,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss) ...@@ -189,6 +189,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
#define UBWC_2_0 0x20000000 #define UBWC_2_0 0x20000000
#define UBWC_3_0 0x30000000 #define UBWC_3_0 0x30000000
#define UBWC_4_0 0x40000000 #define UBWC_4_0 0x40000000
#define UBWC_4_3 0x40030000
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss) static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{ {
...@@ -227,7 +228,10 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) ...@@ -227,7 +228,10 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2); writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE); writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
} else { } else {
writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2); if (data->ubwc_dec_version == UBWC_4_3)
writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2);
else
writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE); writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
} }
} }
...@@ -271,6 +275,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss) ...@@ -271,6 +275,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
msm_mdss_setup_ubwc_dec_30(msm_mdss); msm_mdss_setup_ubwc_dec_30(msm_mdss);
break; break;
case UBWC_4_0: case UBWC_4_0:
case UBWC_4_3:
msm_mdss_setup_ubwc_dec_40(msm_mdss); msm_mdss_setup_ubwc_dec_40(msm_mdss);
break; break;
default: default:
...@@ -569,6 +574,16 @@ static const struct msm_mdss_data sm8250_data = { ...@@ -569,6 +574,16 @@ static const struct msm_mdss_data sm8250_data = {
.macrotile_mode = 1, .macrotile_mode = 1,
}; };
static const struct msm_mdss_data sm8550_data = {
.ubwc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
.ubwc_static = 1,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
};
static const struct of_device_id mdss_dt_match[] = { static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" }, { .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss" }, { .compatible = "qcom,msm8998-mdss" },
...@@ -585,7 +600,7 @@ static const struct of_device_id mdss_dt_match[] = { ...@@ -585,7 +600,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data }, { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8350-mdss", .data = &sm8250_data }, { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8450-mdss", .data = &sm8250_data }, { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8250_data }, { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
{} {}
}; };
MODULE_DEVICE_TABLE(of, mdss_dt_match); MODULE_DEVICE_TABLE(of, mdss_dt_match);
......
...@@ -368,11 +368,6 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info, ...@@ -368,11 +368,6 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info,
{ {
} }
static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
{
return -ENODEV;
}
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
bool suspend) bool suspend)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment