Commit 4741da92 authored by Chris Wilson's avatar Chris Wilson

drm/i915/guc: Assert that all GGTT offsets used by the GuC are mappable

Add an assertion to the plain i915_ggtt_offset() to double check that
any offset we hand to the GuC is outside of its unmappable ranges.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161224193146.4402-1-chris@chris-wilson.co.ukReviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
parent f061ff07
...@@ -270,11 +270,11 @@ static void guc_ctx_desc_init(struct intel_guc *guc, ...@@ -270,11 +270,11 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
/* The state page is after PPHWSP */ /* The state page is after PPHWSP */
lrc->ring_lcra = lrc->ring_lcra =
i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET); (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
lrc->ring_begin = i915_ggtt_offset(ce->ring->vma); lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0; lrc->ring_current_tail_pointer_value = 0;
...@@ -290,7 +290,7 @@ static void guc_ctx_desc_init(struct intel_guc *guc, ...@@ -290,7 +290,7 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
* The doorbell, process descriptor, and workqueue are all parts * The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT * of the client object, which the GuC will reference via the GGTT
*/ */
gfx_addr = i915_ggtt_offset(client->vma); gfx_addr = guc_ggtt_offset(client->vma);
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset; client->doorbell_offset;
desc.db_trigger_cpu = desc.db_trigger_cpu =
...@@ -1226,7 +1226,7 @@ static void guc_log_create(struct intel_guc *guc) ...@@ -1226,7 +1226,7 @@ static void guc_log_create(struct intel_guc *guc)
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
} }
...@@ -1329,7 +1329,7 @@ static void guc_addon_create(struct intel_guc *guc) ...@@ -1329,7 +1329,7 @@ static void guc_addon_create(struct intel_guc *guc)
guc_policies_init(policies); guc_policies_init(policies);
ads->scheduler_policies = ads->scheduler_policies =
i915_ggtt_offset(vma) + sizeof(struct guc_ads); guc_ggtt_offset(vma) + sizeof(struct guc_ads);
/* MMIO reg state */ /* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies); reg_state = (void *)policies + sizeof(struct guc_policies);
...@@ -1495,7 +1495,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv) ...@@ -1495,7 +1495,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
/* any value greater than GUC_POWER_D0 */ /* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1; data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */ /* first page is shared data with GuC */
data[2] = i915_ggtt_offset(ctx->engine[RCS].state); data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
return intel_guc_send(guc, data, ARRAY_SIZE(data)); return intel_guc_send(guc, data, ARRAY_SIZE(data));
} }
...@@ -1522,7 +1522,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) ...@@ -1522,7 +1522,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0; data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */ /* first page is shared data with GuC */
data[2] = i915_ggtt_offset(ctx->engine[RCS].state); data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
return intel_guc_send(guc, data, ARRAY_SIZE(data)); return intel_guc_send(guc, data, ARRAY_SIZE(data));
} }
......
...@@ -220,14 +220,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv) ...@@ -220,14 +220,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
if (guc->ads_vma) { if (guc->ads_vma) {
u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
} }
/* If GuC submission is enabled, set up additional parameters here */ /* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) { if (i915.enable_guc_submission) {
u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma); u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
pgs >>= PAGE_SHIFT; pgs >>= PAGE_SHIFT;
...@@ -297,7 +297,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv, ...@@ -297,7 +297,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */ /* Set the source address for the new blob */
offset = i915_ggtt_offset(vma) + guc_fw->header_offset; offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include "i915_guc_reg.h" #include "i915_guc_reg.h"
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
#include "i915_vma.h"
struct drm_i915_gem_request; struct drm_i915_gem_request;
/* /*
...@@ -198,4 +200,11 @@ void i915_guc_register(struct drm_i915_private *dev_priv); ...@@ -198,4 +200,11 @@ void i915_guc_register(struct drm_i915_private *dev_priv);
void i915_guc_unregister(struct drm_i915_private *dev_priv); void i915_guc_unregister(struct drm_i915_private *dev_priv);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val); int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
static inline u32 guc_ggtt_offset(struct i915_vma *vma)
{
u32 offset = i915_ggtt_offset(vma);
GEM_BUG_ON(offset < GUC_WOPCM_TOP);
return offset;
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment