Commit 72b96ec6 authored by Wachowski, Karol's avatar Wachowski, Karol Committed by Jacek Lawrynowicz

accel/ivpu: Make parts of FW image read-only

Implement setting specified buffer ranges as read-only.
In case if specified range is not 64K aligned and 64K contiguous
MMU600 pages are turned on, split 64K mapping to allow 4K granularity
for read-only configuration.
Signed-off-by: default avatarWachowski, Karol <karol.wachowski@intel.com>
Reviewed-by: default avatarJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Signed-off-by: default avatarJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240611120433.1012423-10-jacek.lawrynowicz@linux.intel.com
parent f1432983
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (C) 2020-2023 Intel Corporation * Copyright (C) 2020-2024 Intel Corporation
*/ */
#include <linux/firmware.h> #include <linux/firmware.h>
...@@ -123,6 +123,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea ...@@ -123,6 +123,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea
return false; return false;
} }
static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size)
{
if (addr < range_start || addr + size > range_start + range_size)
return false;
return true;
}
static int ivpu_fw_parse(struct ivpu_device *vdev) static int ivpu_fw_parse(struct ivpu_device *vdev)
{ {
struct ivpu_fw_info *fw = vdev->fw; struct ivpu_fw_info *fw = vdev->fw;
...@@ -205,10 +213,24 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) ...@@ -205,10 +213,24 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
fw_hdr->ro_section_size,
fw_hdr->image_load_address,
fw_hdr->image_size)) {
ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
return -EINVAL;
}
fw->read_only_addr = fw_hdr->ro_section_start_address;
fw->read_only_size = fw_hdr->ro_section_size;
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
fw->runtime_addr, image_load_addr, fw->entry_point); fw->runtime_addr, image_load_addr, fw->entry_point);
ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
fw->read_only_addr, fw->read_only_size);
return 0; return 0;
} }
...@@ -270,6 +292,13 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev) ...@@ -270,6 +292,13 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
return -ENOMEM; return -ENOMEM;
} }
ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
fw->read_only_size);
if (ret) {
ivpu_err(vdev, "Failed to set firmware image read-only\n");
goto err_free_fw_mem;
}
fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE, fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem_log_crit) { if (!fw->mem_log_crit) {
......
...@@ -30,6 +30,8 @@ struct ivpu_fw_info { ...@@ -30,6 +30,8 @@ struct ivpu_fw_info {
u32 dvfs_mode; u32 dvfs_mode;
u32 primary_preempt_buf_size; u32 primary_preempt_buf_size;
u32 secondary_preempt_buf_size; u32 secondary_preempt_buf_size;
u64 read_only_addr;
u32 read_only_size;
}; };
int ivpu_fw_init(struct ivpu_device *vdev); int ivpu_fw_init(struct ivpu_device *vdev);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52) #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
#define IVPU_MMU_ENTRY_FLAG_NG BIT(11) #define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
#define IVPU_MMU_ENTRY_FLAG_AF BIT(10) #define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
#define IVPU_MMU_ENTRY_FLAG_RO BIT(7)
#define IVPU_MMU_ENTRY_FLAG_USER BIT(6) #define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2) #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
...@@ -319,6 +320,91 @@ ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ct ...@@ -319,6 +320,91 @@ ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
return 0; return 0;
} }
static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO;
}
static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT;
}
static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{
u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
u64 offset = 0;
ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);
while (start + offset < end) {
ivpu_mmu_context_split_page(vdev, ctx, start + offset);
offset += IVPU_MMU_PAGE_SIZE;
}
}
int
ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
size_t size)
{
u64 end = vpu_addr + size;
size_t size_left = size;
int ret;
if (size == 0)
return 0;
if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))
return -EINVAL;
mutex_lock(&ctx->lock);
ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",
ctx->id, vpu_addr, size);
if (!ivpu_disable_mmu_cont_pages) {
/* Split 64K contiguous page at the beginning if needed */
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE))
ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);
/* Split 64K contiguous page at the end if needed */
if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE))
ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);
}
while (size_left) {
if (vpu_addr < end)
ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);
vpu_addr += IVPU_MMU_PAGE_SIZE;
size_left -= IVPU_MMU_PAGE_SIZE;
}
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
if (ret)
ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
return 0;
}
static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{ {
while (size) { while (size) {
......
...@@ -46,5 +46,7 @@ int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context * ...@@ -46,5 +46,7 @@ int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt); u64 vpu_addr, struct sg_table *sgt);
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, size_t size);
#endif /* __IVPU_MMU_CONTEXT_H__ */ #endif /* __IVPU_MMU_CONTEXT_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment