Commit 63c2291f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 microcode updates from Borislav Petkov:
 "This converts the late loading method to load the microcode in
  parallel (vs sequentially currently). The patch remained in linux-next
  for the maximum amount of time so that any potential and hard to debug
  fallout be minimized.

  Now cloud folks have their milliseconds back but all the normal people
  should use early loading anyway :-)"

* 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/microcode/intel: Issue the revision updated message only on the BSP
  x86/microcode: Update late microcode in parallel
  x86/microcode/amd: Fix two -Wunused-but-set-variable warnings
parents ea1f56fa 811ae8ba
...@@ -567,7 +567,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) ...@@ -567,7 +567,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
void reload_ucode_amd(void) void reload_ucode_amd(void)
{ {
struct microcode_amd *mc; struct microcode_amd *mc;
u32 rev, dummy; u32 rev, dummy __always_unused;
mc = (struct microcode_amd *)amd_ucode_patch; mc = (struct microcode_amd *)amd_ucode_patch;
...@@ -673,7 +673,7 @@ static enum ucode_state apply_microcode_amd(int cpu) ...@@ -673,7 +673,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
struct ucode_cpu_info *uci; struct ucode_cpu_info *uci;
struct ucode_patch *p; struct ucode_patch *p;
enum ucode_state ret; enum ucode_state ret;
u32 rev, dummy; u32 rev, dummy __always_unused;
BUG_ON(raw_smp_processor_id() != cpu); BUG_ON(raw_smp_processor_id() != cpu);
......
...@@ -63,11 +63,6 @@ LIST_HEAD(microcode_cache); ...@@ -63,11 +63,6 @@ LIST_HEAD(microcode_cache);
*/ */
static DEFINE_MUTEX(microcode_mutex); static DEFINE_MUTEX(microcode_mutex);
/*
* Serialize late loading so that CPUs get updated one-by-one.
*/
static DEFINE_RAW_SPINLOCK(update_lock);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
struct cpu_info_ctx { struct cpu_info_ctx {
...@@ -566,11 +561,18 @@ static int __reload_late(void *info) ...@@ -566,11 +561,18 @@ static int __reload_late(void *info)
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
return -1; return -1;
raw_spin_lock(&update_lock); /*
apply_microcode_local(&err); * On an SMT system, it suffices to load the microcode on one sibling of
raw_spin_unlock(&update_lock); * the core because the microcode engine is shared between the threads.
* Synchronization still needs to take place so that no concurrent
* loading attempts happen on multiple threads of an SMT core. See
* below.
*/
if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
apply_microcode_local(&err);
else
goto wait_for_siblings;
/* siblings return UCODE_OK because their engine got updated already */
if (err > UCODE_NFOUND) { if (err > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu); pr_warn("Error reloading microcode on CPU %d\n", cpu);
ret = -1; ret = -1;
...@@ -578,14 +580,18 @@ static int __reload_late(void *info) ...@@ -578,14 +580,18 @@ static int __reload_late(void *info)
ret = 1; ret = 1;
} }
wait_for_siblings:
if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
panic("Timeout during microcode update!\n");
/* /*
* Increase the wait timeout to a safe value here since we're * At least one thread has completed update on each core.
* serializing the microcode update and that could take a while on a * For others, simply call the update to make sure the
* large number of CPUs. And that is fine as the *actual* timeout will * per-cpu cpuinfo can be updated with right microcode
* be determined by the last CPU finished updating and thus cut short. * revision.
*/ */
if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus())) if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
panic("Timeout during microcode update!\n"); apply_microcode_local(&err);
return ret; return ret;
} }
......
...@@ -791,6 +791,7 @@ static enum ucode_state apply_microcode_intel(int cpu) ...@@ -791,6 +791,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
{ {
struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
struct microcode_intel *mc; struct microcode_intel *mc;
enum ucode_state ret; enum ucode_state ret;
static int prev_rev; static int prev_rev;
...@@ -836,7 +837,7 @@ static enum ucode_state apply_microcode_intel(int cpu) ...@@ -836,7 +837,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
return UCODE_ERROR; return UCODE_ERROR;
} }
if (rev != prev_rev) { if (bsp && rev != prev_rev) {
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
rev, rev,
mc->hdr.date & 0xffff, mc->hdr.date & 0xffff,
...@@ -852,7 +853,7 @@ static enum ucode_state apply_microcode_intel(int cpu) ...@@ -852,7 +853,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
c->microcode = rev; c->microcode = rev;
/* Update boot_cpu_data's revision too, if we're on the BSP: */ /* Update boot_cpu_data's revision too, if we're on the BSP: */
if (c->cpu_index == boot_cpu_data.cpu_index) if (bsp)
boot_cpu_data.microcode = rev; boot_cpu_data.microcode = rev;
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment