Commit 33ec6c9e authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Chris Wilson

drm/i915/guc: always use Command Transport Buffers

Now that we've moved the Gen9 GuC blobs to version 32 we have CTB
support on all gens, so no need to restrict the usage to Gen11+.
Note that MMIO communication is still required for CTB initialization.

v2: fix commit message nits (Michal)
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606224225.14287-1-daniele.ceraolospurio@intel.com
parent 6be306be
...@@ -2393,7 +2393,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, ...@@ -2393,7 +2393,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
* properties, so we have separate macros to test them. * properties, so we have separate macros to test them.
*/ */
#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc) #define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
......
...@@ -746,7 +746,6 @@ static const struct intel_device_info intel_cannonlake_info = { ...@@ -746,7 +746,6 @@ static const struct intel_device_info intel_cannonlake_info = {
}, \ }, \
GEN(11), \ GEN(11), \
.ddb_size = 2048, \ .ddb_size = 2048, \
.has_guc_ct = 1, \
.has_logical_ring_elsq = 1, \ .has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 } .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }
......
...@@ -112,7 +112,6 @@ enum intel_ppgtt_type { ...@@ -112,7 +112,6 @@ enum intel_ppgtt_type {
func(has_reset_engine); \ func(has_reset_engine); \
func(has_fpga_dbg); \ func(has_fpga_dbg); \
func(has_guc); \ func(has_guc); \
func(has_guc_ct); \
func(has_l3_dpf); \ func(has_l3_dpf); \
func(has_llc); \ func(has_llc); \
func(has_logical_ring_contexts); \ func(has_logical_ring_contexts); \
......
...@@ -56,7 +56,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) ...@@ -56,7 +56,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
enum forcewake_domains fw_domains = 0; enum forcewake_domains fw_domains = 0;
unsigned int i; unsigned int i;
if (HAS_GUC_CT(dev_priv) && INTEL_GEN(dev_priv) >= 11) { if (INTEL_GEN(dev_priv) >= 11) {
guc->send_regs.base = guc->send_regs.base =
i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
...@@ -232,11 +232,9 @@ int intel_guc_init(struct intel_guc *guc) ...@@ -232,11 +232,9 @@ int intel_guc_init(struct intel_guc *guc)
goto err_log; goto err_log;
GEM_BUG_ON(!guc->ads_vma); GEM_BUG_ON(!guc->ads_vma);
if (HAS_GUC_CT(dev_priv)) {
ret = intel_guc_ct_init(&guc->ct); ret = intel_guc_ct_init(&guc->ct);
if (ret) if (ret)
goto err_ads; goto err_ads;
}
/* We need to notify the guc whenever we change the GGTT */ /* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv); i915_ggtt_enable_guc(dev_priv);
...@@ -262,7 +260,6 @@ void intel_guc_fini(struct intel_guc *guc) ...@@ -262,7 +260,6 @@ void intel_guc_fini(struct intel_guc *guc)
i915_ggtt_disable_guc(dev_priv); i915_ggtt_disable_guc(dev_priv);
if (HAS_GUC_CT(dev_priv))
intel_guc_ct_fini(&guc->ct); intel_guc_ct_fini(&guc->ct);
intel_guc_ads_destroy(guc); intel_guc_ads_destroy(guc);
...@@ -430,8 +427,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, ...@@ -430,8 +427,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK); GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
/* If CT is available, we expect to use MMIO only during init/fini */ /* If CT is available, we expect to use MMIO only during init/fini */
GEM_BUG_ON(HAS_GUC_CT(dev_priv) && GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
mutex_lock(&guc->send_mutex); mutex_lock(&guc->send_mutex);
...@@ -481,33 +477,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, ...@@ -481,33 +477,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
return ret; return ret;
} }
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 msg, val;
/*
* Sample the log buffer flush related bits & clear them out now
* itself from the message identity register to minimize the
* probability of losing a flush interrupt, when there are back
* to back flush interrupts.
* There can be a new flush interrupt, for different log buffer
* type (like for ISR), whilst Host is handling one (for DPC).
* Since same bit is used in message register for ISR & DPC, it
* could happen that GuC sets the bit for 2nd interrupt but Host
* clears out the bit on handling the 1st interrupt.
*/
disable_rpm_wakeref_asserts(dev_priv);
spin_lock(&guc->irq_lock);
val = I915_READ(SOFT_SCRATCH(15));
msg = val & guc->msg_enabled_mask;
I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
spin_unlock(&guc->irq_lock);
enable_rpm_wakeref_asserts(dev_priv);
intel_guc_to_host_process_recv_msg(guc, &msg, 1);
}
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len) const u32 *payload, u32 len)
{ {
......
...@@ -165,7 +165,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, ...@@ -165,7 +165,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size); u32 *response_buf, u32 response_buf_size);
void intel_guc_to_host_event_handler(struct intel_guc *guc); void intel_guc_to_host_event_handler(struct intel_guc *guc);
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len); const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc); int intel_guc_sample_forcewake(struct intel_guc *guc);
......
...@@ -848,8 +848,6 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) ...@@ -848,8 +848,6 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
* Allocate memory required for communication via * Allocate memory required for communication via
* the CT channel. * the CT channel.
* *
* Shall only be called for platforms with HAS_GUC_CT.
*
* Return: 0 on success, a negative errno code on failure. * Return: 0 on success, a negative errno code on failure.
*/ */
int intel_guc_ct_init(struct intel_guc_ct *ct) int intel_guc_ct_init(struct intel_guc_ct *ct)
...@@ -875,8 +873,6 @@ int intel_guc_ct_init(struct intel_guc_ct *ct) ...@@ -875,8 +873,6 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
* *
* Deallocate memory required for communication via * Deallocate memory required for communication via
* the CT channel. * the CT channel.
*
* Shall only be called for platforms with HAS_GUC_CT.
*/ */
void intel_guc_ct_fini(struct intel_guc_ct *ct) void intel_guc_ct_fini(struct intel_guc_ct *ct)
{ {
...@@ -890,19 +886,14 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) ...@@ -890,19 +886,14 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
* intel_guc_ct_enable - Enable buffer based command transport. * intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct * @ct: pointer to CT struct
* *
* Shall only be called for platforms with HAS_GUC_CT.
*
* Return: 0 on success, a negative errno code on failure. * Return: 0 on success, a negative errno code on failure.
*/ */
int intel_guc_ct_enable(struct intel_guc_ct *ct) int intel_guc_ct_enable(struct intel_guc_ct *ct)
{ {
struct intel_guc *guc = ct_to_guc(ct); struct intel_guc *guc = ct_to_guc(ct);
struct drm_i915_private *i915 = guc_to_i915(guc);
struct intel_guc_ct_channel *ctch = &ct->host_channel; struct intel_guc_ct_channel *ctch = &ct->host_channel;
int err; int err;
GEM_BUG_ON(!HAS_GUC_CT(i915));
if (ctch->enabled) if (ctch->enabled)
return 0; return 0;
...@@ -920,17 +911,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) ...@@ -920,17 +911,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
/** /**
* intel_guc_ct_disable - Disable buffer based command transport. * intel_guc_ct_disable - Disable buffer based command transport.
* @ct: pointer to CT struct * @ct: pointer to CT struct
*
* Shall only be called for platforms with HAS_GUC_CT.
*/ */
void intel_guc_ct_disable(struct intel_guc_ct *ct) void intel_guc_ct_disable(struct intel_guc_ct *ct)
{ {
struct intel_guc *guc = ct_to_guc(ct); struct intel_guc *guc = ct_to_guc(ct);
struct drm_i915_private *i915 = guc_to_i915(guc);
struct intel_guc_ct_channel *ctch = &ct->host_channel; struct intel_guc_ct_channel *ctch = &ct->host_channel;
GEM_BUG_ON(!HAS_GUC_CT(i915));
if (!ctch->enabled) if (!ctch->enabled)
return; return;
......
...@@ -235,23 +235,13 @@ static void guc_disable_interrupts(struct intel_guc *guc) ...@@ -235,23 +235,13 @@ static void guc_disable_interrupts(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc) static int guc_enable_communication(struct intel_guc *guc)
{ {
struct drm_i915_private *i915 = guc_to_i915(guc);
guc_enable_interrupts(guc); guc_enable_interrupts(guc);
if (HAS_GUC_CT(i915))
return intel_guc_ct_enable(&guc->ct); return intel_guc_ct_enable(&guc->ct);
guc->send = intel_guc_send_mmio;
guc->handler = intel_guc_to_host_event_handler_mmio;
return 0;
} }
static void guc_stop_communication(struct intel_guc *guc) static void guc_stop_communication(struct intel_guc *guc)
{ {
struct drm_i915_private *i915 = guc_to_i915(guc);
if (HAS_GUC_CT(i915))
intel_guc_ct_stop(&guc->ct); intel_guc_ct_stop(&guc->ct);
guc->send = intel_guc_send_nop; guc->send = intel_guc_send_nop;
...@@ -260,9 +250,6 @@ static void guc_stop_communication(struct intel_guc *guc) ...@@ -260,9 +250,6 @@ static void guc_stop_communication(struct intel_guc *guc)
static void guc_disable_communication(struct intel_guc *guc) static void guc_disable_communication(struct intel_guc *guc)
{ {
struct drm_i915_private *i915 = guc_to_i915(guc);
if (HAS_GUC_CT(i915))
intel_guc_ct_disable(&guc->ct); intel_guc_ct_disable(&guc->ct);
guc_disable_interrupts(guc); guc_disable_interrupts(guc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment