Commit d7b7332c authored by Jani Nikula's avatar Jani Nikula

drm/i915/request: drop kernel-doc

The documentation is closer to not being kernel-doc. Just drop the
kernel-doc /** indicators.

drivers/gpu/drm/i915/i915_request.h:176: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst
 * Request queue structure.
drivers/gpu/drm/i915/i915_request.h:477: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst
 * Returns true if seq1 is later than seq2.
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/62a7a0f6e5b33e59ef4339304d01e7ef7ee04430.1683041799.git.jani.nikula@intel.com
parent 88629fee
......@@ -172,7 +172,7 @@ enum {
I915_FENCE_FLAG_COMPOSITE,
};
/**
/*
* Request queue structure.
*
* The request queue allows us to note sequence numbers that have been emitted
......@@ -198,7 +198,7 @@ struct i915_request {
struct drm_i915_private *i915;
/**
/*
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
* context, we must increment the context's refcount, to guarantee that
......@@ -251,9 +251,9 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
/**
* @submit_work: complete submit fence from an IRQ if needed for
* locking hierarchy reasons.
/*
* complete submit fence from an IRQ if needed for locking hierarchy
* reasons.
*/
struct irq_work submit_work;
......@@ -277,35 +277,35 @@ struct i915_request {
*/
const u32 *hwsp_seqno;
/** Position in the ring of the start of the request */
/* Position in the ring of the start of the request */
u32 head;
/** Position in the ring of the start of the user packets */
/* Position in the ring of the start of the user packets */
u32 infix;
/**
/*
* Position in the ring of the start of the postfix.
* This is required to calculate the maximum available ring space
* without overwriting the postfix.
*/
u32 postfix;
/** Position in the ring of the end of the whole request */
/* Position in the ring of the end of the whole request */
u32 tail;
/** Position in the ring of the end of any workarounds after the tail */
/* Position in the ring of the end of any workarounds after the tail */
u32 wa_tail;
/** Preallocate space in the ring for the emitting the request */
/* Preallocate space in the ring for the emitting the request */
u32 reserved_space;
/** Batch buffer pointer for selftest internal use. */
/* Batch buffer pointer for selftest internal use. */
I915_SELFTEST_DECLARE(struct i915_vma *batch);
struct i915_vma_resource *batch_res;
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
/**
/*
* Additional buffers requested by userspace to be captured upon
* a GPU hang. The vma/obj on this list are protected by their
* active reference - all objects on this list must also be
......@@ -314,29 +314,29 @@ struct i915_request {
struct i915_capture_list *capture_list;
#endif
/** Time at which this request was emitted, in jiffies. */
/* Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
/** timeline->request entry for this request */
/* timeline->request entry for this request */
struct list_head link;
/** Watchdog support fields. */
/* Watchdog support fields. */
struct i915_request_watchdog {
struct llist_node link;
struct hrtimer timer;
} watchdog;
/**
* @guc_fence_link: Requests may need to be stalled when using GuC
* submission waiting for certain GuC operations to complete. If that is
* the case, stalled requests are added to a per context list of stalled
* requests. The below list_head is the link in that list. Protected by
/*
* Requests may need to be stalled when using GuC submission waiting for
* certain GuC operations to complete. If that is the case, stalled
* requests are added to a per context list of stalled requests. The
* below list_head is the link in that list. Protected by
* ce->guc_state.lock.
*/
struct list_head guc_fence_link;
/**
* @guc_prio: Priority level while the request is in flight. Differs
/*
* Priority level while the request is in flight. Differs
* from i915 scheduler priority. See comment above
* I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
* ce->guc_active.lock. Two special values (GUC_PRIO_INIT and
......@@ -348,8 +348,8 @@ struct i915_request {
#define GUC_PRIO_FINI 0xfe
u8 guc_prio;
/**
* @hucq: wait queue entry used to wait on the HuC load to complete
/*
* wait queue entry used to wait on the HuC load to complete
*/
wait_queue_entry_t hucq;
......@@ -473,7 +473,7 @@ i915_request_has_initial_breadcrumb(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
}
/**
/*
* Returns true if seq1 is later than seq2.
*/
static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment