Commit fc108a8b authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Add TLB invalidation fence

Fence will be signaled when TLB invalidation completion.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Suggested-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 62ad0621
......@@ -669,6 +669,7 @@ static int gt_reset(struct xe_gt *gt)
xe_uc_stop_prepare(&gt->uc);
xe_gt_pagefault_reset(gt);
xe_gt_tlb_invalidation_reset(gt);
err = xe_uc_stop(&gt->uc);
if (err)
......
......@@ -99,7 +99,7 @@ static int invalidate_tlb(struct seq_file *m, void *data)
int seqno;
int ret = 0;
seqno = xe_gt_tlb_invalidation(gt);
seqno = xe_gt_tlb_invalidation(gt, NULL);
XE_WARN_ON(seqno < 0);
if (seqno > 0)
ret = xe_gt_tlb_invalidation_wait(gt, seqno);
......
......@@ -245,7 +245,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
* defer TLB invalidate + fault response to a callback of fence
* too
*/
ret = xe_gt_tlb_invalidation(gt);
ret = xe_gt_tlb_invalidation(gt, NULL);
if (ret >= 0)
ret = 0;
}
......
......@@ -17,11 +17,27 @@ guc_to_gt(struct xe_guc *guc)
int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
{
gt->tlb_invalidation.seqno = 1;
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
return 0;
}
static int send_tlb_invalidation(struct xe_guc *guc)
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
{
struct xe_gt_tlb_invalidation_fence *fence, *next;
mutex_lock(&gt->uc.guc.ct.lock);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link) {
list_del(&fence->link);
dma_fence_signal(&fence->base);
dma_fence_put(&fence->base);
}
mutex_unlock(&gt->uc.guc.ct.lock);
}
static int send_tlb_invalidation(struct xe_guc *guc,
struct xe_gt_tlb_invalidation_fence *fence)
{
struct xe_gt *gt = guc_to_gt(guc);
u32 action[] = {
......@@ -41,6 +57,15 @@ static int send_tlb_invalidation(struct xe_guc *guc)
*/
mutex_lock(&guc->ct.lock);
seqno = gt->tlb_invalidation.seqno;
if (fence) {
/*
* FIXME: How to deal TLB invalidation timeout, right now we
* just have an endless fence which isn't ideal.
*/
fence->seqno = seqno;
list_add_tail(&fence->link,
&gt->tlb_invalidation.pending_fences);
}
action[1] = seqno;
gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
TLB_INVALIDATION_SEQNO_MAX;
......@@ -55,9 +80,10 @@ static int send_tlb_invalidation(struct xe_guc *guc)
return ret;
}
int xe_gt_tlb_invalidation(struct xe_gt *gt)
int xe_gt_tlb_invalidation(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence)
{
return send_tlb_invalidation(&gt->uc.guc);
return send_tlb_invalidation(&gt->uc.guc, fence);
}
static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
......@@ -97,8 +123,11 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
struct xe_gt_tlb_invalidation_fence *fence;
int expected_seqno;
lockdep_assert_held(&guc->ct.lock);
if (unlikely(len != 1))
return -EPROTO;
......@@ -111,5 +140,13 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
smp_wmb();
wake_up_all(&guc->ct.wq);
fence = list_first_entry_or_null(&gt->tlb_invalidation.pending_fences,
typeof(*fence), link);
if (fence && tlb_invalidation_seqno_past(gt, fence->seqno)) {
list_del(&fence->link);
dma_fence_signal(&fence->base);
dma_fence_put(&fence->base);
}
return 0;
}
......@@ -8,11 +8,15 @@
#include <linux/types.h>
#include "xe_gt_tlb_invalidation_types.h"
struct xe_gt;
struct xe_guc;
int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
int xe_gt_tlb_invalidation(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence);
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _XE_GT_TLB_INVALIDATION_TYPES_H_
#define _XE_GT_TLB_INVALIDATION_TYPES_H_
#include <linux/dma-fence.h>
/**
* struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
*
* Optionally passed to xe_gt_tlb_invalidation and will be signaled upon TLB
* invalidation completion.
*/
struct xe_gt_tlb_invalidation_fence {
/** @base: dma fence base */
struct dma_fence base;
/** @link: link into list of pending tlb fences */
struct list_head link;
/** @seqno: seqno of TLB invalidation to signal fence one */
int seqno;
};
#endif
......@@ -169,6 +169,11 @@ struct xe_gt {
* @seqno_recv: last received TLB invalidation seqno, protected by CT lock
*/
int seqno_recv;
/**
* @pending_fences: list of pending fences waiting TLB
* invaliations, protected by CT lock
*/
struct list_head pending_fences;
} tlb_invalidation;
/** @usm: unified shared memory state */
......
......@@ -3345,7 +3345,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
if (xe_pt_zap_ptes(gt, vma)) {
gt_needs_invalidate |= BIT(id);
xe_device_wmb(xe);
seqno[id] = xe_gt_tlb_invalidation(gt);
seqno[id] = xe_gt_tlb_invalidation(gt, NULL);
if (seqno[id] < 0)
return seqno[id];
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment