Commit 1b24b3cd authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2024-04-11' of...

Merge tag 'drm-misc-fixes-2024-04-11' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

Short summary of fixes pull:

ast:
- Fix soft lockup

client:
- Protect connector modes with mode_config mutex

host1x:
- Do not setup DMA for virtual addresses

ivpu:
- Fix deadlock in context_xa
- PCI fixes
- Fixes to error handling

nouveau:
- gsp: Fix OOB access
- Fix casting

panfrost:
- Fix error path in MMU code

qxl:
- Revert "drm/qxl: simplify qxl_fence_wait"

vmwgfx:
- Enable DMA for SEV mappings
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20240411073403.GA9895@localhost.localdomain
parents b4589db5 4c08f019
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (C) 2020-2023 Intel Corporation * Copyright (C) 2020-2024 Intel Corporation
*/ */
#include <linux/firmware.h> #include <linux/firmware.h>
...@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param ...@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0; return 0;
} }
static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
{
int ret;
ret = ivpu_rpm_get_if_active(vdev);
if (ret < 0)
return ret;
*clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
if (ret)
ivpu_rpm_put(vdev);
return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ {
struct ivpu_file_priv *file_priv = file->driver_priv; struct ivpu_file_priv *file_priv = file->driver_priv;
...@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f ...@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform; args->value = vdev->platform;
break; break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE: case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
ret = ivpu_get_core_clock_rate(vdev, &args->value); args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
break; break;
case DRM_IVPU_PARAM_NUM_CONTEXTS: case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev); args->value = ivpu_get_context_count(vdev);
...@@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev) ...@@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev)
{ {
int ret; int ret;
ivpu_prepare_for_reset(vdev); /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
pci_save_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_down(vdev); ret = ivpu_hw_power_down(vdev);
if (ret) if (ret)
ivpu_warn(vdev, "Failed to power down HW: %d\n", ret); ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
return ret; return ret;
} }
...@@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ...@@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID; vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0); atomic64_set(&vdev->unique_id_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC); xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1); xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
...@@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ...@@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
/* Power up early so the rest of init code can access VPU registers */ /* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev); ret = ivpu_hw_power_up(vdev);
if (ret) if (ret)
goto err_power_down; goto err_shutdown;
ret = ivpu_mmu_global_context_init(vdev); ret = ivpu_mmu_global_context_init(vdev);
if (ret) if (ret)
goto err_power_down; goto err_shutdown;
ret = ivpu_mmu_init(vdev); ret = ivpu_mmu_init(vdev);
if (ret) if (ret)
...@@ -601,10 +588,8 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ...@@ -601,10 +588,8 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
ivpu_mmu_reserved_context_fini(vdev); ivpu_mmu_reserved_context_fini(vdev);
err_mmu_gctx_fini: err_mmu_gctx_fini:
ivpu_mmu_global_context_fini(vdev); ivpu_mmu_global_context_fini(vdev);
err_power_down: err_shutdown:
ivpu_hw_power_down(vdev); ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
err_xa_destroy: err_xa_destroy:
xa_destroy(&vdev->db_xa); xa_destroy(&vdev->db_xa);
xa_destroy(&vdev->submitted_jobs_xa); xa_destroy(&vdev->submitted_jobs_xa);
...@@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev) ...@@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev) static void ivpu_dev_fini(struct ivpu_device *vdev)
{ {
ivpu_pm_disable(vdev); ivpu_pm_disable(vdev);
ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev); ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_jobs_abort_all(vdev); ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev); ivpu_job_done_consumer_fini(vdev);
......
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (C) 2020-2023 Intel Corporation * Copyright (C) 2020-2024 Intel Corporation
*/ */
#ifndef __IVPU_DRV_H__ #ifndef __IVPU_DRV_H__
...@@ -90,7 +90,6 @@ ...@@ -90,7 +90,6 @@
struct ivpu_wa_table { struct ivpu_wa_table {
bool punit_disabled; bool punit_disabled;
bool clear_runtime_mem; bool clear_runtime_mem;
bool d3hot_after_power_off;
bool interrupt_clear_with_0; bool interrupt_clear_with_0;
bool disable_clock_relinquish; bool disable_clock_relinquish;
bool disable_d0i3_msg; bool disable_d0i3_msg;
......
...@@ -21,6 +21,7 @@ struct ivpu_hw_ops { ...@@ -21,6 +21,7 @@ struct ivpu_hw_ops {
u32 (*profiling_freq_get)(struct ivpu_device *vdev); u32 (*profiling_freq_get)(struct ivpu_device *vdev);
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable); void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev); u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev); u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev); u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev); u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
...@@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev) ...@@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
return vdev->hw->ops->reg_pll_freq_get(vdev); return vdev->hw->ops->reg_pll_freq_get(vdev);
}; };
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
return vdev->hw->ops->ratio_to_freq(vdev, ratio);
}
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev) static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
{ {
return vdev->hw->ops->reg_telemetry_offset_get(vdev); return vdev->hw->ops->reg_telemetry_offset_get(vdev);
......
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (C) 2020-2023 Intel Corporation * Copyright (C) 2020-2024 Intel Corporation
*/ */
#include "ivpu_drv.h" #include "ivpu_drv.h"
...@@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev) ...@@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{ {
vdev->wa.punit_disabled = false; vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false; vdev->wa.clear_runtime_mem = false;
vdev->wa.d3hot_after_power_off = true;
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK); REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) { if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
...@@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev) ...@@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
IVPU_PRINT_WA(punit_disabled); IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem); IVPU_PRINT_WA(clear_runtime_mem);
IVPU_PRINT_WA(d3hot_after_power_off);
IVPU_PRINT_WA(interrupt_clear_with_0); IVPU_PRINT_WA(interrupt_clear_with_0);
} }
...@@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena ...@@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */ /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
} }
static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config) static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{ {
u32 pll_clock = PLL_REF_CLK_FREQ * ratio; u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock; u32 cpu_clock;
if ((config & 0xff) == PLL_RATIO_4_3) if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4; cpu_clock = pll_clock * 2 / 4;
else else
cpu_clock = pll_clock * 2 / 5; cpu_clock = pll_clock * 2 / 5;
...@@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev) ...@@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev)) if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ; return PLL_SIMULATION_FREQ;
return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config); return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
} }
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev) static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
...@@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = { ...@@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get, .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive, .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get, .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
.ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get, .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get, .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get, .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
......
...@@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev) ...@@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
return PLL_RATIO_TO_FREQ(pll_curr_ratio); return PLL_RATIO_TO_FREQ(pll_curr_ratio);
} }
static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
return PLL_RATIO_TO_FREQ(ratio);
}
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev) static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{ {
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET); return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
...@@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = { ...@@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.profiling_freq_get = ivpu_hw_40xx_profiling_freq_get, .profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive, .profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get, .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
.ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get, .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get, .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get, .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
......
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (C) 2020-2023 Intel Corporation * Copyright (C) 2020-2024 Intel Corporation
*/ */
#include <linux/genalloc.h> #include <linux/genalloc.h>
...@@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev) ...@@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
spin_lock_init(&ipc->cons_lock); spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list); INIT_LIST_HEAD(&ipc->cons_list);
INIT_LIST_HEAD(&ipc->cb_msg_list); INIT_LIST_HEAD(&ipc->cb_msg_list);
drmm_mutex_init(&vdev->drm, &ipc->lock); ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
if (ret) {
ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
goto err_free_rx;
}
ivpu_ipc_reset(vdev); ivpu_ipc_reset(vdev);
return 0; return 0;
......
...@@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd) ...@@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd)
case IVPU_MMU_EVT_F_VMS_FETCH: case IVPU_MMU_EVT_F_VMS_FETCH:
return "Fetch of VMS caused external abort"; return "Fetch of VMS caused external abort";
default: default:
return "Unknown CMDQ command"; return "Unknown event";
} }
} }
...@@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err) ...@@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
{ {
switch (err) { switch (err) {
case IVPU_MMU_CERROR_NONE: case IVPU_MMU_CERROR_NONE:
return "No CMDQ Error"; return "No error";
case IVPU_MMU_CERROR_ILL: case IVPU_MMU_CERROR_ILL:
return "Illegal command"; return "Illegal command";
case IVPU_MMU_CERROR_ABT: case IVPU_MMU_CERROR_ABT:
return "External abort on CMDQ read"; return "External abort on command queue read";
case IVPU_MMU_CERROR_ATC_INV_SYNC: case IVPU_MMU_CERROR_ATC_INV_SYNC:
return "Sync failed to complete ATS invalidation"; return "Sync failed to complete ATS invalidation";
default: default:
return "Unknown CMDQ Error"; return "Unknown error";
} }
} }
......
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (C) 2020-2023 Intel Corporation * Copyright (C) 2020-2024 Intel Corporation
*/ */
#include <linux/highmem.h> #include <linux/highmem.h>
...@@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev) ...@@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{ {
int ret; int ret;
/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */ ivpu_prepare_for_reset(vdev);
pci_save_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_shutdown(vdev); ret = ivpu_shutdown(vdev);
if (ret) if (ret)
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret); ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
return ret; return ret;
} }
...@@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev) ...@@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
{ {
int ret; int ret;
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0); retry:
pci_restore_state(to_pci_dev(vdev->drm.dev)); pci_restore_state(to_pci_dev(vdev->drm.dev));
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
retry:
ret = ivpu_hw_power_up(vdev); ret = ivpu_hw_power_up(vdev);
if (ret) { if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret); ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
...@@ -100,6 +97,7 @@ static int ivpu_resume(struct ivpu_device *vdev) ...@@ -100,6 +97,7 @@ static int ivpu_resume(struct ivpu_device *vdev)
ivpu_mmu_disable(vdev); ivpu_mmu_disable(vdev);
err_power_down: err_power_down:
ivpu_hw_power_down(vdev); ivpu_hw_power_down(vdev);
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
if (!ivpu_fw_is_cold_boot(vdev)) { if (!ivpu_fw_is_cold_boot(vdev)) {
ivpu_pm_prepare_cold_boot(vdev); ivpu_pm_prepare_cold_boot(vdev);
......
...@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on) ...@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{ {
struct ast_device *ast = to_ast_device(dev); struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on; u8 video_on_off = on;
u32 i = 0;
// Video On/Off // Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
...@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on) ...@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) { ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms // wait 1 ms
mdelay(1); mdelay(1);
if (++i > 200)
break;
} }
} }
} }
......
...@@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, ...@@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0; unsigned int total_modes_count = 0;
struct drm_client_offset *offsets; struct drm_client_offset *offsets;
unsigned int connector_count = 0; unsigned int connector_count = 0;
/* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes; struct drm_display_mode **modes;
struct drm_crtc **crtcs; struct drm_crtc **crtcs;
int i, ret = 0; int i, ret = 0;
...@@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, ...@@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count, drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height); crtcs, modes, 0, width, height);
} }
mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client); drm_client_modeset_release(client);
...@@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, ...@@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y; modeset->y = offset->y;
} }
} }
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex); mutex_unlock(&client->modeset_mutex);
out: out:
......
...@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name) ...@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static void of_fini(void *p)
{
kfree(p);
}
const struct nvbios_source const struct nvbios_source
nvbios_of = { nvbios_of = {
.name = "OpenFirmware", .name = "OpenFirmware",
.init = of_init, .init = of_init,
.fini = (void(*)(void *))kfree, .fini = of_fini,
.read = of_read, .read = of_read,
.size = of_size, .size = of_size,
.rw = false, .rw = false,
......
...@@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) ...@@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES]; strings = (char *)rpc + str_offset;
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
int name_len = strlen(r535_registry_entries[i].name) + 1; int name_len = strlen(r535_registry_entries[i].name) + 1;
......
...@@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
mapping_set_unevictable(mapping); mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
/* Can happen if the last fault only partially filled this
* section of the pages array before failing. In that case
* we skip already filled pages.
*/
if (pages[i])
continue;
pages[i] = shmem_read_mapping_page(mapping, i); pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) { if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]); ret = PTR_ERR(pages[i]);
pages[i] = NULL; pages[i] = NULL;
goto err_pages; goto err_unlock;
} }
} }
...@@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
ret = sg_alloc_table_from_pages(sgt, pages + page_offset, ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret) if (ret)
goto err_pages; goto err_unlock;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret) if (ret)
...@@ -537,8 +544,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -537,8 +544,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
err_map: err_map:
sg_free_table(sgt); sg_free_table(sgt);
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_unlock: err_unlock:
dma_resv_unlock(obj->resv); dma_resv_unlock(obj->resv);
err_bo: err_bo:
......
...@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr, ...@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout) signed long timeout)
{ {
struct qxl_device *qdev; struct qxl_device *qdev;
struct qxl_release *release;
int count = 0, sc = 0;
bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout; unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock); qdev = container_of(fence->lock, struct qxl_device, release_lock);
release = container_of(fence, struct qxl_release, base);
have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
if (!wait_event_timeout(qdev->release_event, retry:
(dma_fence_is_signaled(fence) || sc++;
(qxl_io_notify_oom(qdev), 0)),
timeout)) if (dma_fence_is_signaled(fence))
return 0; goto signaled;
qxl_io_notify_oom(qdev);
for (count = 0; count < 11; count++) {
if (!qxl_queue_garbage_collect(qdev, true))
break;
if (dma_fence_is_signaled(fence))
goto signaled;
}
if (dma_fence_is_signaled(fence))
goto signaled;
if (have_drawable_releases || sc < 4) {
if (sc > 2)
/* back off */
usleep_range(500, 1000);
if (time_after(jiffies, end))
return 0;
if (have_drawable_releases && sc > 300) {
DMA_FENCE_WARN(fence,
"failed to wait on release %llu after spincount %d\n",
fence->context & ~0xf0000000, sc);
goto signaled;
}
goto retry;
}
/*
* yeah, original sync_obj_wait gave up after 3 spins when
* have_drawable_releases is not set.
*/
signaled:
cur = jiffies; cur = jiffies;
if (time_after(cur, end)) if (time_after(cur, end))
return 0; return 0;
......
...@@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) ...@@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.", [vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."}; [vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */ /*
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) * When running with SEV we always want dma mappings, because
return -EINVAL; * otherwise ttm tt pool pages will bounce through swiotlb running
* out of available space.
if (vmw_force_coherent) */
if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent; dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu) else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind; dev_priv->map_mode = vmw_dma_map_bind;
......
...@@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev, ...@@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev,
return 0; return 0;
} }
static int host1x_dma_configure(struct device *dev)
{
return of_dma_configure(dev, dev->of_node, true);
}
static const struct dev_pm_ops host1x_device_pm_ops = { static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend, .suspend = pm_generic_suspend,
.resume = pm_generic_resume, .resume = pm_generic_resume,
...@@ -369,7 +364,6 @@ const struct bus_type host1x_bus_type = { ...@@ -369,7 +364,6 @@ const struct bus_type host1x_bus_type = {
.name = "host1x", .name = "host1x",
.match = host1x_device_match, .match = host1x_device_match,
.uevent = host1x_device_uevent, .uevent = host1x_device_uevent,
.dma_configure = host1x_dma_configure,
.pm = &host1x_device_pm_ops, .pm = &host1x_device_pm_ops,
}; };
...@@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x, ...@@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.bus = &host1x_bus_type; device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev; device->dev.parent = host1x->dev;
of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms; device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, UINT_MAX); dma_set_max_seg_size(&device->dev, UINT_MAX);
......
...@@ -682,4 +682,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence) ...@@ -682,4 +682,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
return dma_fence_is_array(fence) || dma_fence_is_chain(fence); return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
} }
#define DMA_FENCE_WARN(f, fmt, args...) \
do { \
struct dma_fence *__ff = (f); \
pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
##args); \
} while (0)
#endif /* __LINUX_DMA_FENCE_H */ #endif /* __LINUX_DMA_FENCE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment