Commit 9839307c authored by Alexandre Courbot's avatar Alexandre Courbot Committed by Ben Skeggs

drm/nouveau/secboot: remove ls_ucode_mgr

This was used only locally to one function and can be replaced by ad-hoc
variables.
Signed-off-by: default avatarAlexandre Courbot <acourbot@nvidia.com>
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 88490323
...@@ -278,75 +278,44 @@ ls_ucode_img_fill_headers(struct acr_r352 *acr, struct ls_ucode_img *img, ...@@ -278,75 +278,44 @@ ls_ucode_img_fill_headers(struct acr_r352 *acr, struct ls_ucode_img *img,
} }
/** /**
* struct ls_ucode_mgr - manager for all LS falcon firmwares * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
* @count: number of managed LS falcons
* @wpr_size: size of the required WPR region in bytes
* @img_list: linked list of lsf_ucode_img
*/ */
struct ls_ucode_mgr { static int
u16 count; acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
u32 wpr_size;
struct list_head img_list;
};
static void
ls_ucode_mgr_init(struct ls_ucode_mgr *mgr)
{
memset(mgr, 0, sizeof(*mgr));
INIT_LIST_HEAD(&mgr->img_list);
}
static void
ls_ucode_mgr_cleanup(struct ls_ucode_mgr *mgr)
{
struct ls_ucode_img *img, *t;
list_for_each_entry_safe(img, t, &mgr->img_list, node) {
kfree(img->ucode_data);
kfree(img);
}
}
static void
ls_ucode_mgr_add_img(struct ls_ucode_mgr *mgr, struct ls_ucode_img *img)
{
mgr->count++;
list_add_tail(&img->node, &mgr->img_list);
}
/**
* ls_ucode_mgr_fill_headers - fill WPR and LSB headers of all managed images
*/
static void
ls_ucode_mgr_fill_headers(struct acr_r352 *acr, struct ls_ucode_mgr *mgr)
{ {
struct ls_ucode_img *img; struct ls_ucode_img *img;
struct list_head *l;
u32 count = 0;
u32 offset; u32 offset;
/* Count the number of images to manage */
list_for_each(l, imgs)
count++;
/* /*
* Start with an array of WPR headers at the base of the WPR. * Start with an array of WPR headers at the base of the WPR.
* The expectation here is that the secure falcon will do a single DMA * The expectation here is that the secure falcon will do a single DMA
* read of this array and cache it internally so it's ok to pack these. * read of this array and cache it internally so it's ok to pack these.
* Also, we add 1 to the falcon count to indicate the end of the array. * Also, we add 1 to the falcon count to indicate the end of the array.
*/ */
offset = sizeof(struct lsf_wpr_header) * (mgr->count + 1); offset = sizeof(struct lsf_wpr_header) * (count + 1);
/* /*
* Walk the managed falcons, accounting for the LSB structs * Walk the managed falcons, accounting for the LSB structs
* as well as the ucode images. * as well as the ucode images.
*/ */
list_for_each_entry(img, &mgr->img_list, node) { list_for_each_entry(img, imgs, node) {
offset = ls_ucode_img_fill_headers(acr, img, offset); offset = ls_ucode_img_fill_headers(acr, img, offset);
} }
mgr->wpr_size = offset; return offset;
} }
/** /**
* ls_ucode_mgr_write_wpr - write the WPR blob contents * ls_ucode_mgr_write_wpr - write the WPR blob contents
*/ */
static int static int
ls_ucode_mgr_write_wpr(struct acr_r352 *acr, struct ls_ucode_mgr *mgr, ls_ucode_mgr_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct nvkm_gpuobj *wpr_blob, u32 wpr_addr) struct nvkm_gpuobj *wpr_blob, u32 wpr_addr)
{ {
struct ls_ucode_img *img; struct ls_ucode_img *img;
...@@ -354,7 +323,7 @@ ls_ucode_mgr_write_wpr(struct acr_r352 *acr, struct ls_ucode_mgr *mgr, ...@@ -354,7 +323,7 @@ ls_ucode_mgr_write_wpr(struct acr_r352 *acr, struct ls_ucode_mgr *mgr,
nvkm_kmap(wpr_blob); nvkm_kmap(wpr_blob);
list_for_each_entry(img, &mgr->img_list, node) { list_for_each_entry(img, imgs, node) {
const struct acr_r352_ls_func *ls_func = const struct acr_r352_ls_func *ls_func =
acr->func->ls_func[img->falcon_id]; acr->func->ls_func[img->falcon_id];
u8 gdesc[ls_func->bl_desc_size]; u8 gdesc[ls_func->bl_desc_size];
...@@ -399,12 +368,15 @@ static int ...@@ -399,12 +368,15 @@ static int
acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
{ {
const struct nvkm_subdev *subdev = acr->base.subdev; const struct nvkm_subdev *subdev = acr->base.subdev;
struct ls_ucode_mgr mgr; struct list_head imgs;
struct ls_ucode_img *img, *t;
unsigned long managed_falcons = acr->base.managed_falcons; unsigned long managed_falcons = acr->base.managed_falcons;
int managed_count = 0;
u32 image_wpr_size;
int falcon_id; int falcon_id;
int ret; int ret;
ls_ucode_mgr_init(&mgr); INIT_LIST_HEAD(&imgs);
/* Load all LS blobs */ /* Load all LS blobs */
for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
...@@ -417,48 +389,52 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) ...@@ -417,48 +389,52 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
ret = PTR_ERR(img); ret = PTR_ERR(img);
goto cleanup; goto cleanup;
} }
ls_ucode_mgr_add_img(&mgr, img); list_add_tail(&img->node, &imgs);
managed_count++;
} }
/* /*
* Fill the WPR and LSF headers with the right offsets and compute * Fill the WPR and LSF headers with the right offsets and compute
* required WPR size * required WPR size
*/ */
ls_ucode_mgr_fill_headers(acr, &mgr); image_wpr_size = acr_r352_ls_fill_headers(acr, &imgs);
mgr.wpr_size = ALIGN(mgr.wpr_size, WPR_ALIGNMENT); image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
/* Allocate GPU object that will contain the WPR region */ /* Allocate GPU object that will contain the WPR region */
ret = nvkm_gpuobj_new(subdev->device, mgr.wpr_size, WPR_ALIGNMENT, ret = nvkm_gpuobj_new(subdev->device, image_wpr_size, WPR_ALIGNMENT,
false, NULL, &acr->ls_blob); false, NULL, &acr->ls_blob);
if (ret) if (ret)
goto cleanup; goto cleanup;
nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n", nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
mgr.count, mgr.wpr_size); managed_count, image_wpr_size);
/* If WPR address and size are not fixed, set them to fit the LS blob */ /* If WPR address and size are not fixed, set them to fit the LS blob */
if (wpr_size == 0) { if (wpr_size == 0) {
wpr_addr = acr->ls_blob->addr; wpr_addr = acr->ls_blob->addr;
wpr_size = mgr.wpr_size; wpr_size = image_wpr_size;
/* /*
* But if the WPR region is set by the bootloader, it is illegal for * But if the WPR region is set by the bootloader, it is illegal for
* the HS blob to be larger than this region. * the HS blob to be larger than this region.
*/ */
} else if (mgr.wpr_size > wpr_size) { } else if (image_wpr_size > wpr_size) {
nvkm_error(subdev, "WPR region too small for FW blob!\n"); nvkm_error(subdev, "WPR region too small for FW blob!\n");
nvkm_error(subdev, "required: %dB\n", mgr.wpr_size); nvkm_error(subdev, "required: %dB\n", image_wpr_size);
nvkm_error(subdev, "available: %dB\n", wpr_size); nvkm_error(subdev, "available: %dB\n", wpr_size);
ret = -ENOSPC; ret = -ENOSPC;
goto cleanup; goto cleanup;
} }
/* Write LS blob */ /* Write LS blob */
ret = ls_ucode_mgr_write_wpr(acr, &mgr, acr->ls_blob, wpr_addr); ret = ls_ucode_mgr_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
if (ret) if (ret)
nvkm_gpuobj_del(&acr->ls_blob); nvkm_gpuobj_del(&acr->ls_blob);
cleanup: cleanup:
ls_ucode_mgr_cleanup(&mgr); list_for_each_entry_safe(img, t, &imgs, node) {
kfree(img->ucode_data);
kfree(img);
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment