Commit a30d2ed6 authored by Borislav Petkov's avatar Borislav Petkov Committed by Luis Henriques

x86, microcode, AMD: Fix ucode patch stashing on 32-bit

commit c0a717f2 upstream.

Save the patch while we're running on the BSP instead of later, before
the initrd has been jettisoned. More importantly, on 32-bit we need to
access the physical address instead of the virtual.

This way we actually do find it on the APs instead of having to go
through the initrd each time.
Tested-by: default avatarRichard Hendershot <rshendershot@mchsi.com>
Fixes: 5335ba5c ("x86, microcode, AMD: Fix early ucode loading")
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent e134b22c
...@@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size) ...@@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size)
* load_microcode_amd() to save equivalent cpu table and microcode patches in * load_microcode_amd() to save equivalent cpu table and microcode patches in
* kernel heap memory. * kernel heap memory.
*/ */
static void apply_ucode_in_initrd(void *ucode, size_t size) static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
{ {
struct equiv_cpu_entry *eq; struct equiv_cpu_entry *eq;
size_t *cont_sz; size_t *cont_sz;
u32 *header; u32 *header;
u8 *data, **cont; u8 *data, **cont;
u8 (*patch)[PATCH_MAX_SIZE];
u16 eq_id = 0; u16 eq_id = 0;
int offset, left; int offset, left;
u32 rev, eax, ebx, ecx, edx; u32 rev, eax, ebx, ecx, edx;
...@@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) ...@@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
cont_sz = (size_t *)__pa_nodebug(&container_size); cont_sz = (size_t *)__pa_nodebug(&container_size);
cont = (u8 **)__pa_nodebug(&container); cont = (u8 **)__pa_nodebug(&container);
patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else #else
new_rev = &ucode_new_rev; new_rev = &ucode_new_rev;
cont_sz = &container_size; cont_sz = &container_size;
cont = &container; cont = &container;
patch = &amd_ucode_patch;
#endif #endif
data = ucode; data = ucode;
...@@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) ...@@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
rev = mc->hdr.patch_id; rev = mc->hdr.patch_id;
*new_rev = rev; *new_rev = rev;
/* save ucode patch */ if (save_patch)
memcpy(amd_ucode_patch, mc, memcpy(patch, mc,
min_t(u32, header[1], PATCH_MAX_SIZE)); min_t(u32, header[1], PATCH_MAX_SIZE));
} }
} }
...@@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void) ...@@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void)
*data = cp.data; *data = cp.data;
*size = cp.size; *size = cp.size;
apply_ucode_in_initrd(cp.data, cp.size); apply_ucode_in_initrd(cp.data, cp.size, true);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -263,7 +266,7 @@ void load_ucode_amd_ap(void) ...@@ -263,7 +266,7 @@ void load_ucode_amd_ap(void)
size_t *usize; size_t *usize;
void **ucode; void **ucode;
mc = (struct microcode_amd *)__pa(amd_ucode_patch); mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
__apply_microcode_amd(mc); __apply_microcode_amd(mc);
return; return;
...@@ -275,7 +278,7 @@ void load_ucode_amd_ap(void) ...@@ -275,7 +278,7 @@ void load_ucode_amd_ap(void)
if (!*ucode || !*usize) if (!*ucode || !*usize)
return; return;
apply_ucode_in_initrd(*ucode, *usize); apply_ucode_in_initrd(*ucode, *usize, false);
} }
static void __init collect_cpu_sig_on_bsp(void *arg) static void __init collect_cpu_sig_on_bsp(void *arg)
...@@ -339,7 +342,7 @@ void load_ucode_amd_ap(void) ...@@ -339,7 +342,7 @@ void load_ucode_amd_ap(void)
* AP has a different equivalence ID than BSP, looks like * AP has a different equivalence ID than BSP, looks like
* mixed-steppings silicon so go through the ucode blob anew. * mixed-steppings silicon so go through the ucode blob anew.
*/ */
apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
} }
} }
#endif #endif
...@@ -347,6 +350,7 @@ void load_ucode_amd_ap(void) ...@@ -347,6 +350,7 @@ void load_ucode_amd_ap(void)
int __init save_microcode_in_initrd_amd(void) int __init save_microcode_in_initrd_amd(void)
{ {
unsigned long cont; unsigned long cont;
int retval = 0;
enum ucode_state ret; enum ucode_state ret;
u8 *cont_va; u8 *cont_va;
u32 eax; u32 eax;
...@@ -387,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void) ...@@ -387,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void)
ret = load_microcode_amd(eax, container, container_size); ret = load_microcode_amd(eax, container, container_size);
if (ret != UCODE_OK) if (ret != UCODE_OK)
return -EINVAL; retval = -EINVAL;
/* /*
* This will be freed any msec now, stash patches for the current * This will be freed any msec now, stash patches for the current
...@@ -396,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void) ...@@ -396,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void)
container = NULL; container = NULL;
container_size = 0; container_size = 0;
return 0; return retval;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment