Commit 34bb7b81 authored by Thomas Hellström's avatar Thomas Hellström

drm/xe: Use xe_pm_runtime_get in xe_bo_move() if reclaim-safe.

xe_bo_move() might be called in the TTM swapout path from validation
by another TTM device. If so, we are not likely to have a RPM
reference. So iff xe_pm_runtime_get() is safe to call from reclaim,
use it instead of xe_pm_runtime_get_noresume().

Strictly this is currently needed only if handle_system_ccs is true,
but use xe_pm_runtime_get() if possible anyway to increase test
coverage.

At the same time warn if handle_system_ccs is true and we can't
call xe_pm_runtime_get() from reclaim context. This will likely trip
if someone tries to enable SRIOV on LNL, without fixing Xe SRIOV
runtime resume / suspend.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240903094232.166342-1-thomas.hellstrom@linux.intel.com
parent 8da19441
...@@ -758,7 +758,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -758,7 +758,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_assert(xe, migrate); xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
if (xe_rpm_reclaim_safe(xe)) {
/*
* We might be called through swapout in the validation path of
* another TTM device, so unconditionally acquire rpm here.
*/
xe_pm_runtime_get(xe);
} else {
drm_WARN_ON(&xe->drm, handle_system_ccs);
xe_pm_runtime_get_noresume(xe); xe_pm_runtime_get_noresume(xe);
}
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) { if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/* /*
......
...@@ -79,7 +79,14 @@ static struct lockdep_map xe_pm_runtime_nod3cold_map = { ...@@ -79,7 +79,14 @@ static struct lockdep_map xe_pm_runtime_nod3cold_map = {
}; };
#endif #endif
static bool __maybe_unused xe_rpm_reclaim_safe(const struct xe_device *xe) /**
* xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
* @xe: The xe device.
*
* Return: true if it is safe to runtime resume from reclaim context.
* false otherwise.
*/
bool xe_rpm_reclaim_safe(const struct xe_device *xe)
{ {
return !xe->d3cold.capable && !xe->info.has_sriov; return !xe->d3cold.capable && !xe->info.has_sriov;
} }
......
...@@ -31,6 +31,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe); ...@@ -31,6 +31,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
void xe_pm_assert_unbounded_bridge(struct xe_device *xe); void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold); int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe); void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe); struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
int xe_pm_module_init(void); int xe_pm_module_init(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment