Commit df12d1c3 authored by Michal Wajdeczko's avatar Michal Wajdeczko Committed by Daniel Vetter

drm/i915/guc: Update sizes of CTB buffers

Future GuC will require CTB buffers sizes to be multiple of 4K.
Make these changes now as this shouldn't impact us too much.
Signed-off-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Cc: John Harrison <john.c.harrison@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210603230408.54856-2-matthew.brost@intel.com
parent b43f0fc8
......@@ -38,6 +38,32 @@ static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
#define CT_PROBE_ERROR(_ct, _fmt, ...) \
i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
/**
* DOC: CTB Blob
*
* We allocate single blob to hold both CTB descriptors and buffers:
*
* +--------+-----------------------------------------------+------+
* | offset | contents | size |
* +========+===============================================+======+
* | 0x0000 | H2G `CTB Descriptor`_ (send) | |
* +--------+-----------------------------------------------+ 4K |
* | 0x0800 | G2H `CTB Descriptor`_ (recv) | |
* +--------+-----------------------------------------------+------+
* | 0x1000 | H2G `CT Buffer`_ (send) | n*4K |
* | | | |
* +--------+-----------------------------------------------+------+
* | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K |
* | + n*4K | | |
* +--------+-----------------------------------------------+------+
*
* Size of each `CT Buffer`_ must be multiple of 4K.
* As we don't expect too many messages, for now use minimum sizes.
*/
#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
#define CTB_G2H_BUFFER_SIZE (SZ_4K)
struct ct_request {
struct list_head link;
u32 fence;
......@@ -175,29 +201,7 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
GEM_BUG_ON(ct->vma);
/* We allocate 1 page to hold both descriptors and both buffers.
* ___________.....................
* |desc (SEND)| :
* |___________| PAGE/4
* :___________....................:
* |desc (RECV)| :
* |___________| PAGE/4
* :_______________________________:
* |cmds (SEND) |
* | PAGE/4
* |_______________________________|
* |cmds (RECV) |
* | PAGE/4
* |_______________________________|
*
* Each message can use a maximum of 32 dwords and we don't expect to
* have more than 1 in flight at any time, so we have enough space.
* Some logic further ahead will rely on the fact that there is only 1
* page and that it is always mapped, so if the size is changed the
* other code will need updating as well.
*/
blob_size = PAGE_SIZE;
blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
if (unlikely(err)) {
CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
......@@ -209,17 +213,17 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
/* store pointers to desc and cmds for send ctb */
desc = blob;
cmds = blob + PAGE_SIZE / 2;
cmds_size = PAGE_SIZE / 4;
cmds = blob + 2 * CTB_DESC_SIZE;
cmds_size = CTB_H2G_BUFFER_SIZE;
CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send",
ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size);
/* store pointers to desc and cmds for recv ctb */
desc = blob + PAGE_SIZE / 4;
cmds = blob + PAGE_SIZE / 4 + PAGE_SIZE / 2;
cmds_size = PAGE_SIZE / 4;
desc = blob + CTB_DESC_SIZE;
cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
cmds_size = CTB_G2H_BUFFER_SIZE;
CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv",
ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment