Commit fc488b59 authored by Fernando Pacheco's avatar Fernando Pacheco Committed by Chris Wilson

drm/i915/uc: Place uC firmware in upper range of GGTT

Currently we pin the GuC or HuC firmware image just before uploading.
Perma-pin during uC initialization instead and use the range reserved at
the top of the address space.

Moving the firmware resulted in needing to:
- use an additional pinning for the rsa signature which will be used
  during HuC auth as addresses above GUC_GGTT_TOP do not map through GTT.

v2: Remove call to set to gtt domain
    Do not restore fw gtt mapping unconditionally
    Separate out pin/unpin functions and drop usage of pin/unpin
    Use uc_fw init/fini functions to bind/unbind fw object

v3: Bind is only needed during xfer (Chris)
    Remove attempts to bind outside of xfer (Chris)
    Mark fw bind/unbind static
Signed-off-by: default avatarFernando Pacheco <fernando.pacheco@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190419230015.18121-4-fernando.pacheco@intel.com
parent 91180076
......@@ -189,9 +189,13 @@ int intel_guc_init(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
ret = guc_shared_data_create(guc);
ret = intel_uc_fw_init(&guc->fw);
if (ret)
goto err_fetch;
ret = guc_shared_data_create(guc);
if (ret)
goto err_fw;
GEM_BUG_ON(!guc->shared_data);
ret = intel_guc_log_create(&guc->log);
......@@ -220,6 +224,8 @@ int intel_guc_init(struct intel_guc *guc)
intel_guc_log_destroy(&guc->log);
err_shared:
guc_shared_data_destroy(guc);
err_fw:
intel_uc_fw_fini(&guc->fw);
err_fetch:
intel_uc_fw_cleanup_fetch(&guc->fw);
return ret;
......@@ -237,6 +243,7 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
intel_uc_fw_fini(&guc->fw);
intel_uc_fw_cleanup_fetch(&guc->fw);
}
......
......@@ -122,14 +122,16 @@ static void guc_prepare_xfer(struct intel_guc *guc)
}
/* Copy RSA signature from the fw image to HW for verification */
static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
static void guc_xfer_rsa(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *fw = &guc->fw;
struct sg_table *pages = fw->obj->mm.pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i;
sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
rsa, sizeof(rsa), guc->fw.rsa_offset);
sg_pcopy_to_buffer(pages->sgl, pages->nents,
rsa, sizeof(rsa), fw->rsa_offset);
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
......@@ -201,7 +203,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*/
static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
static int guc_xfer_ucode(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
......@@ -214,7 +216,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
......@@ -233,7 +235,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
/*
* Load the GuC firmware blob into the MinuteIA.
*/
static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
......@@ -250,9 +252,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
guc_xfer_rsa(guc, vma);
guc_xfer_rsa(guc);
ret = guc_xfer_ucode(guc, vma);
ret = guc_xfer_ucode(guc);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
......
......@@ -40,6 +40,61 @@ int intel_huc_init_misc(struct intel_huc *huc)
return 0;
}
static int intel_huc_rsa_data_create(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
struct i915_vma *vma;
void *vaddr;
/*
* HuC firmware will sit above GUC_GGTT_TOP and will not map
* through GTT. Unfortunately, this means GuC cannot perform
* the HuC auth. as the rsa offset now falls within the GuC
* inaccessible range. We resort to perma-pinning an additional
* vma within the accessible range that only contains the rsa
* signature. The GuC can use this extra pinning to perform
* the authentication since its GGTT offset will be GuC
* accessible.
*/
vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
}
huc->rsa_data = vma;
huc->rsa_data_vaddr = vaddr;
return 0;
}
static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
{
i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP);
}
int intel_huc_init(struct intel_huc *huc)
{
int err;
err = intel_huc_rsa_data_create(huc);
if (err)
return err;
return intel_uc_fw_init(&huc->fw);
}
void intel_huc_fini(struct intel_huc *huc)
{
intel_uc_fw_fini(&huc->fw);
intel_huc_rsa_data_destroy(huc);
}
/**
* intel_huc_auth() - Authenticate HuC uCode
* @huc: intel_huc structure
......@@ -55,27 +110,17 @@ int intel_huc_auth(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
struct i915_vma *vma;
u32 status;
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return -ENOEXEC;
vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
PIN_OFFSET_BIAS | i915->ggtt.pin_bias);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
goto fail;
}
ret = intel_guc_auth_huc(guc,
intel_guc_ggtt_offset(guc, vma) +
huc->fw.rsa_offset);
intel_guc_ggtt_offset(guc, huc->rsa_data));
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto fail_unpin;
goto fail;
}
/* Check authentication status, it should be done by now */
......@@ -86,14 +131,11 @@ int intel_huc_auth(struct intel_huc *huc)
2, 50, &status);
if (ret) {
DRM_ERROR("HuC: Firmware not verified %#x\n", status);
goto fail_unpin;
goto fail;
}
i915_vma_unpin(vma);
return 0;
fail_unpin:
i915_vma_unpin(vma);
fail:
huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
......
......@@ -33,10 +33,14 @@ struct intel_huc {
struct intel_uc_fw fw;
/* HuC-specific additions */
struct i915_vma *rsa_data;
void *rsa_data_vaddr;
};
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init_misc(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
......
......@@ -93,18 +93,24 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
huc_fw_select(huc_fw);
}
/**
* huc_fw_xfer() - DMA's the firmware
* @huc_fw: the firmware descriptor
* @vma: the firmware image (bound into the GGTT)
*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* Return: 0 on success, non-zero on failure
*/
static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
static void huc_xfer_rsa(struct intel_huc *huc)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
struct intel_uc_fw *fw = &huc->fw;
struct sg_table *pages = fw->obj->mm.pages;
/*
* HuC firmware image is outside GuC accessible range.
* Copy the RSA signature out of the image into
* the perma-pinned region set aside for it
*/
sg_pcopy_to_buffer(pages->sgl, pages->nents,
huc->rsa_data_vaddr, fw->rsa_size,
fw->rsa_offset);
}
static int huc_xfer_ucode(struct intel_huc *huc)
{
struct intel_uc_fw *huc_fw = &huc->fw;
struct drm_i915_private *dev_priv = huc_to_i915(huc);
struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long offset = 0;
......@@ -116,7 +122,7 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
offset = intel_uc_fw_ggtt_offset(huc_fw) +
huc_fw->header_offset;
intel_uncore_write(uncore, DMA_ADDR_0_LOW,
lower_32_bits(offset));
......@@ -150,6 +156,23 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
return ret;
}
/**
* huc_fw_xfer() - DMA's the firmware
* @huc_fw: the firmware descriptor
*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* Return: 0 on success, non-zero on failure
*/
static int huc_fw_xfer(struct intel_uc_fw *huc_fw)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
huc_xfer_rsa(huc);
return huc_xfer_ucode(huc);
}
/**
* intel_huc_fw_upload() - load HuC uCode to device
* @huc: intel_huc structure
......
......@@ -280,6 +280,7 @@ void intel_uc_fini_misc(struct drm_i915_private *i915)
int intel_uc_init(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
struct intel_huc *huc = &i915->huc;
int ret;
if (!USES_GUC(i915))
......@@ -292,19 +293,30 @@ int intel_uc_init(struct drm_i915_private *i915)
if (ret)
return ret;
if (USES_HUC(i915)) {
ret = intel_huc_init(huc);
if (ret)
goto err_guc;
}
if (USES_GUC_SUBMISSION(i915)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
ret = intel_guc_submission_init(guc);
if (ret) {
intel_guc_fini(guc);
return ret;
}
if (ret)
goto err_huc;
}
return 0;
err_huc:
if (USES_HUC(i915))
intel_huc_fini(huc);
err_guc:
intel_guc_fini(guc);
return ret;
}
void intel_uc_fini(struct drm_i915_private *i915)
......@@ -319,6 +331,9 @@ void intel_uc_fini(struct drm_i915_private *i915)
if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_fini(guc);
if (USES_HUC(i915))
intel_huc_fini(&i915->huc);
intel_guc_fini(guc);
}
......
......@@ -191,6 +191,35 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
release_firmware(fw); /* OK even if fw is NULL */
}
static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma dummy = {
.node.start = intel_uc_fw_ggtt_offset(uc_fw),
.node.size = obj->base.size,
.pages = obj->mm.pages,
.vm = &ggtt->vm,
};
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
/* uc_fw->obj cache domains were not controlled across suspend */
drm_clflush_sg(dummy.pages);
ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
}
static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
u64 start = intel_uc_fw_ggtt_offset(uc_fw);
ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
}
/**
* intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware
......@@ -201,11 +230,8 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
* Return: 0 on success, non-zero on failure.
*/
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
int (*xfer)(struct intel_uc_fw *uc_fw,
struct i915_vma *vma))
int (*xfer)(struct intel_uc_fw *uc_fw))
{
struct i915_vma *vma;
u32 ggtt_pin_bias;
int err;
DRM_DEBUG_DRIVER("%s fw load %s\n",
......@@ -219,36 +245,15 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
intel_uc_fw_type_repr(uc_fw->type),
intel_uc_fw_status_repr(uc_fw->load_status));
/* Pin object with firmware */
err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
if (err) {
DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
goto fail;
}
ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->ggtt.pin_bias;
vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
PIN_OFFSET_BIAS | ggtt_pin_bias);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
goto fail;
}
intel_uc_fw_ggtt_bind(uc_fw);
/* Call custom loader */
err = xfer(uc_fw, vma);
/*
* We keep the object pages for reuse during resume. But we can unpin it
* now that DMA has completed, so it doesn't continue to take up space.
*/
i915_vma_unpin(vma);
err = xfer(uc_fw);
if (err)
goto fail;
intel_uc_fw_ggtt_unbind(uc_fw);
uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("%s fw load %s\n",
intel_uc_fw_type_repr(uc_fw->type),
......@@ -273,6 +278,42 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
return err;
}
int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
{
int err;
if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
return -ENOEXEC;
err = i915_gem_object_pin_pages(uc_fw->obj);
if (err)
DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
return err;
}
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
return;
i915_gem_object_unpin_pages(uc_fw->obj);
}
u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
{
struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_mm_node *node = &ggtt->uc_fw;
GEM_BUG_ON(!node->allocated);
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
return lower_32_bits(node->start);
}
/**
* intel_uc_fw_cleanup_fetch - cleanup uC firmware
*
......
......@@ -27,7 +27,6 @@
struct drm_printer;
struct drm_i915_private;
struct i915_vma;
/* Home of GuC, HuC and DMC firmwares */
#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
......@@ -147,8 +146,10 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
int (*xfer)(struct intel_uc_fw *uc_fw,
struct i915_vma *vma));
int (*xfer)(struct intel_uc_fw *uc_fw));
int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw);
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment