Commit d59cf7bb authored by Jouni Högander's avatar Jouni Högander

drm/i915/display: Use dma_fence interfaces instead of i915_sw_fence

We are preparing for Xe driver. Xe driver doesn't have i915_sw_fence
implementation. Lets drop i915_sw_fence usage from display code and
use dma_fence interfaces directly.

For this purpose stack dma fences from related objects into new plane
state. Drm_gem_plane_helper_prepare_fb can be used for fences in new
fb. Separate local implementation is used for Stacking fences from old fb
into new plane state. Then wait for these stacked fences during atomic
commit. There is no be need for separate GPU reset handling in
intel_atomic_commit_fence_wait as the fences are signaled when GPU hang is
detected and GPU is being reset.

v4:
  - Drop to_new_plane_state suffix from add_dma_resv_fences
  - Use dma_resv_usage_rw(false) (DMA_RESV_USAGE_WRITE)
v3:
  - Rename add_fences and it's parameters
  - Remove signaled check
  - Remove waiting old_plane_state fences
v2:
  - Add fences from old fb into new_plane_state->uapi.fence rather than
    into old_plane_state->uapi.fence

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: default avatarJouni Högander <jouni.hogander@intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231031084557.1181630-1-jouni.hogander@intel.com
parent 08a57300
...@@ -331,9 +331,6 @@ void intel_atomic_state_free(struct drm_atomic_state *_state) ...@@ -331,9 +331,6 @@ void intel_atomic_state_free(struct drm_atomic_state *_state)
drm_atomic_state_default_release(&state->base); drm_atomic_state_default_release(&state->base);
kfree(state->global_objs); kfree(state->global_objs);
i915_sw_fence_fini(&state->commit_ready);
kfree(state); kfree(state);
} }
......
...@@ -31,7 +31,10 @@ ...@@ -31,7 +31,10 @@
* prepare/check/commit/cleanup steps. * prepare/check/commit/cleanup steps.
*/ */
#include <linux/dma-fence-chain.h>
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_blend.h> #include <drm/drm_blend.h>
#include <drm/drm_fourcc.h> #include <drm/drm_fourcc.h>
...@@ -1012,6 +1015,41 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) ...@@ -1012,6 +1015,41 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0; return 0;
} }
static int add_dma_resv_fences(struct dma_resv *resv,
struct drm_plane_state *new_plane_state)
{
struct dma_fence *fence = dma_fence_get(new_plane_state->fence);
struct dma_fence *new;
int ret;
ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new);
if (ret)
goto error;
if (new && fence) {
struct dma_fence_chain *chain = dma_fence_chain_alloc();
if (!chain) {
ret = -ENOMEM;
goto error;
}
dma_fence_chain_init(chain, fence, new, 1);
fence = &chain->base;
} else if (new) {
fence = new;
}
dma_fence_put(new_plane_state->fence);
new_plane_state->fence = fence;
return 0;
error:
dma_fence_put(fence);
return ret;
}
/** /**
* intel_prepare_plane_fb - Prepare fb for usage on plane * intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for * @_plane: drm plane to prepare for
...@@ -1035,7 +1073,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, ...@@ -1035,7 +1073,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
struct intel_atomic_state *state = struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state); to_intel_atomic_state(new_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct intel_plane_state *old_plane_state = struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane); intel_atomic_get_old_plane_state(state, plane);
struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
...@@ -1058,55 +1096,28 @@ intel_prepare_plane_fb(struct drm_plane *_plane, ...@@ -1058,55 +1096,28 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* can safely continue. * can safely continue.
*/ */
if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) { if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready, ret = add_dma_resv_fences(old_obj->base.resv,
old_obj->base.resv, &new_plane_state->uapi);
false, 0,
GFP_KERNEL);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
} }
if (new_plane_state->uapi.fence) { /* explicit fencing */
i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
&attr);
ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
i915_fence_timeout(dev_priv),
GFP_KERNEL);
if (ret < 0)
return ret;
}
if (!obj) if (!obj)
return 0; return 0;
ret = intel_plane_pin_fb(new_plane_state); ret = intel_plane_pin_fb(new_plane_state);
if (ret) if (ret)
return ret; return ret;
i915_gem_object_wait_priority(obj, 0, &attr); ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi);
if (!new_plane_state->uapi.fence) { /* implicit fencing */
struct dma_resv_iter cursor;
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&state->commit_ready,
obj->base.resv, false,
i915_fence_timeout(dev_priv),
GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto unpin_fb; goto unpin_fb;
dma_resv_iter_begin(&cursor, obj->base.resv, if (new_plane_state->uapi.fence) {
DMA_RESV_USAGE_WRITE); i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
dma_resv_for_each_fence_unlocked(&cursor, fence) { &attr);
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
}
dma_resv_iter_end(&cursor);
} else {
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence); new_plane_state->uapi.fence);
} }
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "g4x_dp.h" #include "g4x_dp.h"
#include "g4x_hdmi.h" #include "g4x_hdmi.h"
#include "hsw_ips.h" #include "hsw_ips.h"
#include "i915_config.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_reg.h" #include "i915_reg.h"
#include "i915_utils.h" #include "i915_utils.h"
...@@ -7057,29 +7058,22 @@ void intel_atomic_helper_free_state_worker(struct work_struct *work) ...@@ -7057,29 +7058,22 @@ void intel_atomic_helper_free_state_worker(struct work_struct *work)
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
{ {
struct wait_queue_entry wait_fence, wait_reset; struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
init_wait_entry(&wait_fence, 0); int ret, i;
init_wait_entry(&wait_reset, 0);
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset, TASK_UNINTERRUPTIBLE);
if (i915_sw_fence_done(&intel_state->commit_ready) || for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) if (new_plane_state->fence) {
ret = dma_fence_wait_timeout(new_plane_state->fence, false,
i915_fence_timeout(i915));
if (ret <= 0)
break; break;
schedule(); dma_fence_put(new_plane_state->fence);
new_plane_state->fence = NULL;
}
} }
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset);
} }
static void intel_atomic_cleanup_work(struct work_struct *work) static void intel_atomic_cleanup_work(struct work_struct *work)
...@@ -7371,32 +7365,6 @@ static void intel_atomic_commit_work(struct work_struct *work) ...@@ -7371,32 +7365,6 @@ static void intel_atomic_commit_work(struct work_struct *work)
intel_atomic_commit_tail(state); intel_atomic_commit_tail(state);
} }
static int
intel_atomic_commit_ready(struct i915_sw_fence *fence,
enum i915_sw_fence_notify notify)
{
struct intel_atomic_state *state =
container_of(fence, struct intel_atomic_state, commit_ready);
switch (notify) {
case FENCE_COMPLETE:
/* we do blocking waits in the worker, nothing to do here */
break;
case FENCE_FREE:
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_atomic_helper *helper =
&i915->display.atomic_helper;
if (llist_add(&state->freed, &helper->free_list))
queue_work(i915->unordered_wq, &helper->free_work);
break;
}
}
return NOTIFY_DONE;
}
static void intel_atomic_track_fbs(struct intel_atomic_state *state) static void intel_atomic_track_fbs(struct intel_atomic_state *state)
{ {
struct intel_plane_state *old_plane_state, *new_plane_state; struct intel_plane_state *old_plane_state, *new_plane_state;
...@@ -7419,10 +7387,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, ...@@ -7419,10 +7387,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
drm_atomic_state_get(&state->base);
i915_sw_fence_init(&state->commit_ready,
intel_atomic_commit_ready);
/* /*
* The intel_legacy_cursor_update() fast path takes care * The intel_legacy_cursor_update() fast path takes care
* of avoiding the vblank waits for simple cursor * of avoiding the vblank waits for simple cursor
...@@ -7455,7 +7419,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, ...@@ -7455,7 +7419,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) { if (ret) {
drm_dbg_atomic(&dev_priv->drm, drm_dbg_atomic(&dev_priv->drm,
"Preparing state failed with %i\n", ret); "Preparing state failed with %i\n", ret);
i915_sw_fence_commit(&state->commit_ready);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret; return ret;
} }
...@@ -7471,8 +7434,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, ...@@ -7471,8 +7434,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
struct intel_crtc *crtc; struct intel_crtc *crtc;
int i; int i;
i915_sw_fence_commit(&state->commit_ready);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_color_cleanup_commit(new_crtc_state); intel_color_cleanup_commit(new_crtc_state);
...@@ -7486,7 +7447,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, ...@@ -7486,7 +7447,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
drm_atomic_state_get(&state->base); drm_atomic_state_get(&state->base);
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&state->commit_ready);
if (nonblock && state->modeset) { if (nonblock && state->modeset) {
queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
} else if (nonblock) { } else if (nonblock) {
......
...@@ -676,8 +676,6 @@ struct intel_atomic_state { ...@@ -676,8 +676,6 @@ struct intel_atomic_state {
bool rps_interactive; bool rps_interactive;
struct i915_sw_fence commit_ready;
struct llist_node freed; struct llist_node freed;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment