Commit 0fe5f9ca authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-urgent-2020-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 and objtool fixes from Thomas Gleixner:
 "A set of fixes for x86 and objtool:

  objtool:

   - Ignore the double UD2 which is emitted in BUG() when
     CONFIG_UBSAN_TRAP is enabled.

   - Support clang non-section symbols in objtool ORC dump

   - Fix switch table detection in .text.unlikely

   - Make the BP scratch register warning more robust.

  x86:

   - Increase microcode maximum patch size for AMD to cope with new CPUs
     which have a larger patch size.

   - Fix a crash in the resource control filesystem when the removal of
     the default resource group is attempted.

   - Preserve Code and Data Prioritization enabled state accross CPU
     hotplug.

   - Update split lock cpu matching to use the new X86_MATCH macros.

   - Change the split lock enumeration as Intel finaly decided that the
     IA32_CORE_CAPABILITIES bits are not architectural contrary to what
     the SDM claims. !@#%$^!

   - Add Tremont CPU models to the split lock detection cpu match.

   - Add a missing static attribute to make sparse happy"

* tag 'x86-urgent-2020-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/split_lock: Add Tremont family CPU models
  x86/split_lock: Bits in IA32_CORE_CAPABILITIES are not architectural
  x86/resctrl: Preserve CDP enable over CPU hotplug
  x86/resctrl: Fix invalid attempt at removing the default resource group
  x86/split_lock: Update to use X86_MATCH_INTEL_FAM6_MODEL()
  x86/umip: Make umip_insns static
  x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
  objtool: Make BP scratch register warning more robust
  objtool: Fix switch table detection in .text.unlikely
  objtool: Support Clang non-section symbols in ORC generation
  objtool: Support Clang non-section symbols in ORC dump
  objtool: Fix CONFIG_UBSAN_TRAP unreachable warnings
parents 3e0dea57 8b9a18a9
...@@ -41,7 +41,7 @@ struct microcode_amd { ...@@ -41,7 +41,7 @@ struct microcode_amd {
unsigned int mpb[0]; unsigned int mpb[0];
}; };
#define PATCH_MAX_SIZE PAGE_SIZE #define PATCH_MAX_SIZE (3 * PAGE_SIZE)
#ifdef CONFIG_MICROCODE_AMD #ifdef CONFIG_MICROCODE_AMD
extern void __init load_ucode_amd_bsp(unsigned int family); extern void __init load_ucode_amd_bsp(unsigned int family);
......
...@@ -1119,35 +1119,53 @@ void switch_to_sld(unsigned long tifn) ...@@ -1119,35 +1119,53 @@ void switch_to_sld(unsigned long tifn)
sld_update_msr(!(tifn & _TIF_SLD)); sld_update_msr(!(tifn & _TIF_SLD));
} }
#define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY}
/* /*
* The following processors have the split lock detection feature. But * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
* since they don't have the IA32_CORE_CAPABILITIES MSR, the feature cannot * only be trusted if it is confirmed that a CPU model implements a
* be enumerated. Enable it by family and model matching on these * specific feature at a particular bit position.
* processors. *
* The possible driver data field values:
*
* - 0: CPU models that are known to have the per-core split-lock detection
* feature even though they do not enumerate IA32_CORE_CAPABILITIES.
*
* - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
* bit 5 to enumerate the per-core split-lock detection feature.
*/ */
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_X), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_L), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
{} {}
}; };
void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
{ {
u64 ia32_core_caps = 0; const struct x86_cpu_id *m;
u64 ia32_core_caps;
if (c->x86_vendor != X86_VENDOR_INTEL) if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
m = x86_match_cpu(split_lock_cpu_ids);
if (!m)
return;
switch (m->driver_data) {
case 0:
break;
case 1:
if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
return; return;
if (cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) {
/* Enumerate features reported in IA32_CORE_CAPABILITIES MSR. */
rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
} else if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) { if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT))
/* Enumerate split lock detection by family and model. */ return;
if (x86_match_cpu(split_lock_cpu_ids)) break;
ia32_core_caps |= MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT; default:
return;
} }
if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
split_lock_setup(); split_lock_setup();
} }
...@@ -578,6 +578,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) ...@@ -578,6 +578,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
d->id = id; d->id = id;
cpumask_set_cpu(cpu, &d->cpu_mask); cpumask_set_cpu(cpu, &d->cpu_mask);
rdt_domain_reconfigure_cdp(r);
if (r->alloc_capable && domain_setup_ctrlval(r, d)) { if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
kfree(d); kfree(d);
return; return;
......
...@@ -601,5 +601,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); ...@@ -601,5 +601,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free); void __check_limbo(struct rdt_domain *d, bool force_free);
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r); bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r); bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */ #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
...@@ -1859,6 +1859,19 @@ static int set_cache_qos_cfg(int level, bool enable) ...@@ -1859,6 +1859,19 @@ static int set_cache_qos_cfg(int level, bool enable)
return 0; return 0;
} }
/* Restore the qos cfg state when a domain comes online */
void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
{
if (!r->alloc_capable)
return;
if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
l2_qos_cfg_update(&r->alloc_enabled);
if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
l3_qos_cfg_update(&r->alloc_enabled);
}
/* /*
* Enable or disable the MBA software controller * Enable or disable the MBA software controller
* which helps user specify bandwidth in MBps. * which helps user specify bandwidth in MBps.
...@@ -3072,7 +3085,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) ...@@ -3072,7 +3085,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
* If the rdtgroup is a mon group and parent directory * If the rdtgroup is a mon group and parent directory
* is a valid "mon_groups" directory, remove the mon group. * is a valid "mon_groups" directory, remove the mon group.
*/ */
if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) { if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
rdtgrp != &rdtgroup_default) {
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
ret = rdtgroup_ctrl_remove(kn, rdtgrp); ret = rdtgroup_ctrl_remove(kn, rdtgrp);
......
...@@ -81,7 +81,7 @@ ...@@ -81,7 +81,7 @@
#define UMIP_INST_SLDT 3 /* 0F 00 /0 */ #define UMIP_INST_SLDT 3 /* 0F 00 /0 */
#define UMIP_INST_STR 4 /* 0F 00 /1 */ #define UMIP_INST_STR 4 /* 0F 00 /1 */
const char * const umip_insns[5] = { static const char * const umip_insns[5] = {
[UMIP_INST_SGDT] = "SGDT", [UMIP_INST_SGDT] = "SGDT",
[UMIP_INST_SIDT] = "SIDT", [UMIP_INST_SIDT] = "SIDT",
[UMIP_INST_SMSW] = "SMSW", [UMIP_INST_SMSW] = "SMSW",
......
...@@ -1050,10 +1050,7 @@ static struct rela *find_jump_table(struct objtool_file *file, ...@@ -1050,10 +1050,7 @@ static struct rela *find_jump_table(struct objtool_file *file,
* it. * it.
*/ */
for (; for (;
&insn->list != &file->insn_list && &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func;
insn->sec == func->sec &&
insn->offset >= func->offset;
insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
...@@ -2008,8 +2005,8 @@ static int validate_return(struct symbol *func, struct instruction *insn, struct ...@@ -2008,8 +2005,8 @@ static int validate_return(struct symbol *func, struct instruction *insn, struct
} }
if (state->bp_scratch) { if (state->bp_scratch) {
WARN("%s uses BP as a scratch register", WARN_FUNC("BP used as a scratch register",
func->name); insn->sec, insn->offset);
return 1; return 1;
} }
...@@ -2364,14 +2361,27 @@ static bool ignore_unreachable_insn(struct instruction *insn) ...@@ -2364,14 +2361,27 @@ static bool ignore_unreachable_insn(struct instruction *insn)
!strcmp(insn->sec->name, ".altinstr_aux")) !strcmp(insn->sec->name, ".altinstr_aux"))
return true; return true;
if (!insn->func)
return false;
/*
* CONFIG_UBSAN_TRAP inserts a UD2 when it sees
* __builtin_unreachable(). The BUG() macro has an unreachable() after
* the UD2, which causes GCC's undefined trap logic to emit another UD2
* (or occasionally a JMP to UD2).
*/
if (list_prev_entry(insn, list)->dead_end &&
(insn->type == INSN_BUG ||
(insn->type == INSN_JUMP_UNCONDITIONAL &&
insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
return true;
/* /*
* Check if this (or a subsequent) instruction is related to * Check if this (or a subsequent) instruction is related to
* CONFIG_UBSAN or CONFIG_KASAN. * CONFIG_UBSAN or CONFIG_KASAN.
* *
* End the search at 5 instructions to avoid going into the weeds. * End the search at 5 instructions to avoid going into the weeds.
*/ */
if (!insn->func)
return false;
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
if (is_kasan_insn(insn) || is_ubsan_insn(insn)) if (is_kasan_insn(insn) || is_ubsan_insn(insn))
......
...@@ -66,7 +66,7 @@ int orc_dump(const char *_objname) ...@@ -66,7 +66,7 @@ int orc_dump(const char *_objname)
char *name; char *name;
size_t nr_sections; size_t nr_sections;
Elf64_Addr orc_ip_addr = 0; Elf64_Addr orc_ip_addr = 0;
size_t shstrtab_idx; size_t shstrtab_idx, strtab_idx = 0;
Elf *elf; Elf *elf;
Elf_Scn *scn; Elf_Scn *scn;
GElf_Shdr sh; GElf_Shdr sh;
...@@ -127,6 +127,8 @@ int orc_dump(const char *_objname) ...@@ -127,6 +127,8 @@ int orc_dump(const char *_objname)
if (!strcmp(name, ".symtab")) { if (!strcmp(name, ".symtab")) {
symtab = data; symtab = data;
} else if (!strcmp(name, ".strtab")) {
strtab_idx = i;
} else if (!strcmp(name, ".orc_unwind")) { } else if (!strcmp(name, ".orc_unwind")) {
orc = data->d_buf; orc = data->d_buf;
orc_size = sh.sh_size; orc_size = sh.sh_size;
...@@ -138,7 +140,7 @@ int orc_dump(const char *_objname) ...@@ -138,7 +140,7 @@ int orc_dump(const char *_objname)
} }
} }
if (!symtab || !orc || !orc_ip) if (!symtab || !strtab_idx || !orc || !orc_ip)
return 0; return 0;
if (orc_size % sizeof(*orc) != 0) { if (orc_size % sizeof(*orc) != 0) {
...@@ -159,6 +161,7 @@ int orc_dump(const char *_objname) ...@@ -159,6 +161,7 @@ int orc_dump(const char *_objname)
return -1; return -1;
} }
if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
scn = elf_getscn(elf, sym.st_shndx); scn = elf_getscn(elf, sym.st_shndx);
if (!scn) { if (!scn) {
WARN_ELF("elf_getscn"); WARN_ELF("elf_getscn");
...@@ -171,10 +174,17 @@ int orc_dump(const char *_objname) ...@@ -171,10 +174,17 @@ int orc_dump(const char *_objname)
} }
name = elf_strptr(elf, shstrtab_idx, sh.sh_name); name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
if (!name || !*name) { if (!name) {
WARN_ELF("elf_strptr"); WARN_ELF("elf_strptr");
return -1; return -1;
} }
} else {
name = elf_strptr(elf, strtab_idx, sym.st_name);
if (!name) {
WARN_ELF("elf_strptr");
return -1;
}
}
printf("%s+%llx:", name, (unsigned long long)rela.r_addend); printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
......
...@@ -88,11 +88,6 @@ static int create_orc_entry(struct elf *elf, struct section *u_sec, struct secti ...@@ -88,11 +88,6 @@ static int create_orc_entry(struct elf *elf, struct section *u_sec, struct secti
struct orc_entry *orc; struct orc_entry *orc;
struct rela *rela; struct rela *rela;
if (!insn_sec->sym) {
WARN("missing symbol for section %s", insn_sec->name);
return -1;
}
/* populate ORC data */ /* populate ORC data */
orc = (struct orc_entry *)u_sec->data->d_buf + idx; orc = (struct orc_entry *)u_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc)); memcpy(orc, o, sizeof(*orc));
...@@ -105,8 +100,32 @@ static int create_orc_entry(struct elf *elf, struct section *u_sec, struct secti ...@@ -105,8 +100,32 @@ static int create_orc_entry(struct elf *elf, struct section *u_sec, struct secti
} }
memset(rela, 0, sizeof(*rela)); memset(rela, 0, sizeof(*rela));
if (insn_sec->sym) {
rela->sym = insn_sec->sym; rela->sym = insn_sec->sym;
rela->addend = insn_off; rela->addend = insn_off;
} else {
/*
* The Clang assembler doesn't produce section symbols, so we
* have to reference the function symbol instead:
*/
rela->sym = find_symbol_containing(insn_sec, insn_off);
if (!rela->sym) {
/*
* Hack alert. This happens when we need to reference
* the NOP pad insn immediately after the function.
*/
rela->sym = find_symbol_containing(insn_sec,
insn_off - 1);
}
if (!rela->sym) {
WARN("missing symbol for insn at offset 0x%lx\n",
insn_off);
return -1;
}
rela->addend = insn_off - rela->sym->offset;
}
rela->type = R_X86_64_PC32; rela->type = R_X86_64_PC32;
rela->offset = idx * sizeof(int); rela->offset = idx * sizeof(int);
rela->sec = ip_relasec; rela->sec = ip_relasec;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment