Commit 19caf581 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "A set of x86 fixes:

   - Prevent potential NULL pointer dereferences in the HPET and HyperV
     code

   - Exclude the GART aperture from /proc/kcore to prevent kernel
     crashes on access

   - Use the correct macros for Cyrix I/O on Geode processors

   - Remove yet another kernel address printk leak

   - Announce microcode reload completion as requested by quite some
     people. Microcode loading has become popular recently.

   - Some 'Make Clang' happy fixlets

   - A few cleanups for recently added code"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/gart: Exclude GART aperture from kcore
  x86/hw_breakpoints: Make default case in hw_breakpoint_arch_parse() return an error
  x86/mm/pti: Make local symbols static
  x86/cpu/cyrix: Remove {get,set}Cx86_old macros used for Cyrix processors
  x86/cpu/cyrix: Use correct macros for Cyrix calls on Geode processors
  x86/microcode: Announce reload operation's completion
  x86/hyperv: Prevent potential NULL pointer dereference
  x86/hpet: Prevent potential NULL pointer dereference
  x86/lib: Fix indentation issue, remove extra tab
  x86/boot: Restrict header scope to make Clang happy
  x86/mm: Don't leak kernel addresses
  x86/cpufeature: Fix various quality problems in the <asm/cpu_device_hd.h> header
parents a75eda7b ffc8599a
...@@ -13,8 +13,9 @@ ...@@ -13,8 +13,9 @@
*/ */
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/compiler.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/limits.h>
#include <asm/asm.h> #include <asm/asm.h>
#include "ctype.h" #include "ctype.h"
#include "string.h" #include "string.h"
......
...@@ -103,9 +103,13 @@ static int hv_cpu_init(unsigned int cpu) ...@@ -103,9 +103,13 @@ static int hv_cpu_init(unsigned int cpu)
u64 msr_vp_index; u64 msr_vp_index;
struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
void **input_arg; void **input_arg;
struct page *pg;
input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
*input_arg = page_address(alloc_page(GFP_KERNEL)); pg = alloc_page(GFP_KERNEL);
if (unlikely(!pg))
return -ENOMEM;
*input_arg = page_address(pg);
hv_get_vp_index(msr_vp_index); hv_get_vp_index(msr_vp_index);
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CPU_DEVICE_ID #ifndef _ASM_X86_CPU_DEVICE_ID
#define _CPU_DEVICE_ID 1 #define _ASM_X86_CPU_DEVICE_ID
/* /*
* Declare drivers belonging to specific x86 CPUs * Declare drivers belonging to specific x86 CPUs
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
/* /*
* Match specific microcode revisions. * Match specific microcode revisions.
* *
...@@ -22,21 +20,22 @@ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); ...@@ -22,21 +20,22 @@ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
*/ */
struct x86_cpu_desc { struct x86_cpu_desc {
__u8 x86_family; u8 x86_family;
__u8 x86_vendor; u8 x86_vendor;
__u8 x86_model; u8 x86_model;
__u8 x86_stepping; u8 x86_stepping;
__u32 x86_microcode_rev; u32 x86_microcode_rev;
}; };
#define INTEL_CPU_DESC(mod, step, rev) { \ #define INTEL_CPU_DESC(model, stepping, revision) { \
.x86_family = 6, \ .x86_family = 6, \
.x86_vendor = X86_VENDOR_INTEL, \ .x86_vendor = X86_VENDOR_INTEL, \
.x86_model = mod, \ .x86_model = (model), \
.x86_stepping = step, \ .x86_stepping = (stepping), \
.x86_microcode_rev = rev, \ .x86_microcode_rev = (revision), \
} }
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table); extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
#endif #endif /* _ASM_X86_CPU_DEVICE_ID */
...@@ -3,19 +3,6 @@ ...@@ -3,19 +3,6 @@
* NSC/Cyrix CPU indexed register access. Must be inlined instead of * NSC/Cyrix CPU indexed register access. Must be inlined instead of
* macros to ensure correct access ordering * macros to ensure correct access ordering
* Access order is always 0x22 (=offset), 0x23 (=value) * Access order is always 0x22 (=offset), 0x23 (=value)
*
* When using the old macros a line like
* setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
* gets expanded to:
* do {
* outb((CX86_CCR2), 0x22);
* outb((({
* outb((CX86_CCR2), 0x22);
* inb(0x23);
* }) | 0x88), 0x23);
* } while (0);
*
* which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
*/ */
static inline u8 getCx86(u8 reg) static inline u8 getCx86(u8 reg)
...@@ -29,11 +16,3 @@ static inline void setCx86(u8 reg, u8 data) ...@@ -29,11 +16,3 @@ static inline void setCx86(u8 reg, u8 data)
outb(reg, 0x22); outb(reg, 0x22);
outb(data, 0x23); outb(data, 0x23);
} }
#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
#define setCx86_old(reg, data) do { \
outb((reg), 0x22); \
outb((data), 0x23); \
} while (0)
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#define pr_fmt(fmt) "AGP: " fmt #define pr_fmt(fmt) "AGP: " fmt
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kcore.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/memblock.h> #include <linux/memblock.h>
...@@ -57,7 +58,7 @@ int fallback_aper_force __initdata; ...@@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
int fix_aperture __initdata = 1; int fix_aperture __initdata = 1;
#ifdef CONFIG_PROC_VMCORE #if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
/* /*
* If the first kernel maps the aperture over e820 RAM, the kdump kernel will * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
* use the same range because it will remain configured in the northbridge. * use the same range because it will remain configured in the northbridge.
...@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1; ...@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
*/ */
static unsigned long aperture_pfn_start, aperture_page_count; static unsigned long aperture_pfn_start, aperture_page_count;
static int gart_oldmem_pfn_is_ram(unsigned long pfn) static int gart_mem_pfn_is_ram(unsigned long pfn)
{ {
return likely((pfn < aperture_pfn_start) || return likely((pfn < aperture_pfn_start) ||
(pfn >= aperture_pfn_start + aperture_page_count)); (pfn >= aperture_pfn_start + aperture_page_count));
} }
static void exclude_from_vmcore(u64 aper_base, u32 aper_order) static void __init exclude_from_core(u64 aper_base, u32 aper_order)
{ {
aperture_pfn_start = aper_base >> PAGE_SHIFT; aperture_pfn_start = aper_base >> PAGE_SHIFT;
aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram)); #ifdef CONFIG_PROC_VMCORE
WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
#endif
#ifdef CONFIG_PROC_KCORE
WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
#endif
} }
#else #else
static void exclude_from_vmcore(u64 aper_base, u32 aper_order) static void exclude_from_core(u64 aper_base, u32 aper_order)
{ {
} }
#endif #endif
...@@ -474,7 +480,7 @@ int __init gart_iommu_hole_init(void) ...@@ -474,7 +480,7 @@ int __init gart_iommu_hole_init(void)
* may have allocated the range over its e820 RAM * may have allocated the range over its e820 RAM
* and fixed up the northbridge * and fixed up the northbridge
*/ */
exclude_from_vmcore(last_aper_base, last_aper_order); exclude_from_core(last_aper_base, last_aper_order);
return 1; return 1;
} }
...@@ -520,7 +526,7 @@ int __init gart_iommu_hole_init(void) ...@@ -520,7 +526,7 @@ int __init gart_iommu_hole_init(void)
* overlap with the first kernel's memory. We can't access the * overlap with the first kernel's memory. We can't access the
* range through vmcore even though it should be part of the dump. * range through vmcore even though it should be part of the dump.
*/ */
exclude_from_vmcore(aper_alloc, aper_order); exclude_from_core(aper_alloc, aper_order);
/* Fix up the north bridges */ /* Fix up the north bridges */
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
......
...@@ -124,7 +124,7 @@ static void set_cx86_reorder(void) ...@@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* Load/Store Serialize to mem access disable (=reorder it) */ /* Load/Store Serialize to mem access disable (=reorder it) */
setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
/* set load/store serialize from 1GB to 4GB */ /* set load/store serialize from 1GB to 4GB */
ccr3 |= 0xe0; ccr3 |= 0xe0;
setCx86(CX86_CCR3, ccr3); setCx86(CX86_CCR3, ccr3);
...@@ -135,11 +135,11 @@ static void set_cx86_memwb(void) ...@@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
/* CCR2 bit 2: unlock NW bit */ /* CCR2 bit 2: unlock NW bit */
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
/* set 'Not Write-through' */ /* set 'Not Write-through' */
write_cr0(read_cr0() | X86_CR0_NW); write_cr0(read_cr0() | X86_CR0_NW);
/* CCR2 bit 2: lock NW bit and set WT1 */ /* CCR2 bit 2: lock NW bit and set WT1 */
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
} }
/* /*
...@@ -153,14 +153,14 @@ static void geode_configure(void) ...@@ -153,14 +153,14 @@ static void geode_configure(void)
local_irq_save(flags); local_irq_save(flags);
/* Suspend on halt power saving and enable #SUSP pin */ /* Suspend on halt power saving and enable #SUSP pin */
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
ccr3 = getCx86(CX86_CCR3); ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* FPU fast, DTE cache, Mem bypass */ /* FPU fast, DTE cache, Mem bypass */
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
set_cx86_memwb(); set_cx86_memwb();
...@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) ...@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
/* GXm supports extended cpuid levels 'ala' AMD */ /* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) { if (c->cpuid_level == 2) {
/* Enable cxMMX extensions (GX1 Datasheet 54) */ /* Enable cxMMX extensions (GX1 Datasheet 54) */
setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
/* /*
* GXm : 0x30 ... 0x5f GXm datasheet 51 * GXm : 0x30 ... 0x5f GXm datasheet 51
...@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) ...@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
if (dir1 > 7) { if (dir1 > 7) {
dir0_msn++; /* M II */ dir0_msn++; /* M II */
/* Enable MMX extensions (App note 108) */ /* Enable MMX extensions (App note 108) */
setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
} else { } else {
/* A 6x86MX - it has the bug. */ /* A 6x86MX - it has the bug. */
set_cpu_bug(c, X86_BUG_COMA); set_cpu_bug(c, X86_BUG_COMA);
......
...@@ -608,6 +608,8 @@ static int microcode_reload_late(void) ...@@ -608,6 +608,8 @@ static int microcode_reload_late(void)
if (ret > 0) if (ret > 0)
microcode_check(); microcode_check();
pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode);
return ret; return ret;
} }
......
...@@ -905,6 +905,8 @@ int __init hpet_enable(void) ...@@ -905,6 +905,8 @@ int __init hpet_enable(void)
return 0; return 0;
hpet_set_mapping(); hpet_set_mapping();
if (!hpet_virt_address)
return 0;
/* /*
* Read the period and check for a sane value: * Read the period and check for a sane value:
......
...@@ -354,6 +354,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, ...@@ -354,6 +354,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
#endif #endif
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return -EINVAL;
} }
/* /*
......
...@@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) ...@@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
mpf_base = base; mpf_base = base;
mpf_found = true; mpf_found = true;
pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
base, base + sizeof(*mpf) - 1, mpf); base, base + sizeof(*mpf) - 1);
memblock_reserve(base, sizeof(*mpf)); memblock_reserve(base, sizeof(*mpf));
if (mpf->physptr) if (mpf->physptr)
......
...@@ -94,7 +94,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len) ...@@ -94,7 +94,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len)
: "m" (*(unsigned long *)buff), : "m" (*(unsigned long *)buff),
"r" (zero), "0" (result)); "r" (zero), "0" (result));
--count; --count;
buff += 8; buff += 8;
} }
result = add32_with_carry(result>>32, result = add32_with_carry(result>>32,
result&0xffffffff); result&0xffffffff);
......
...@@ -77,7 +77,7 @@ static void __init pti_print_if_secure(const char *reason) ...@@ -77,7 +77,7 @@ static void __init pti_print_if_secure(const char *reason)
pr_info("%s\n", reason); pr_info("%s\n", reason);
} }
enum pti_mode { static enum pti_mode {
PTI_AUTO = 0, PTI_AUTO = 0,
PTI_FORCE_OFF, PTI_FORCE_OFF,
PTI_FORCE_ON PTI_FORCE_ON
...@@ -602,7 +602,7 @@ static void pti_clone_kernel_text(void) ...@@ -602,7 +602,7 @@ static void pti_clone_kernel_text(void)
set_memory_global(start, (end_global - start) >> PAGE_SHIFT); set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
} }
void pti_set_kernel_image_nonglobal(void) static void pti_set_kernel_image_nonglobal(void)
{ {
/* /*
* The identity map is created with PMDs, regardless of the * The identity map is created with PMDs, regardless of the
......
...@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head); ...@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
static DECLARE_RWSEM(kclist_lock); static DECLARE_RWSEM(kclist_lock);
static int kcore_need_update = 1; static int kcore_need_update = 1;
/*
* Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
* Same as oldmem_pfn_is_ram in vmcore
*/
static int (*mem_pfn_is_ram)(unsigned long pfn);
int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
{
if (mem_pfn_is_ram)
return -EBUSY;
mem_pfn_is_ram = fn;
return 0;
}
static int pfn_is_ram(unsigned long pfn)
{
if (mem_pfn_is_ram)
return mem_pfn_is_ram(pfn);
else
return 1;
}
/* This doesn't grab kclist_lock, so it should only be used at init time. */ /* This doesn't grab kclist_lock, so it should only be used at init time. */
void __init kclist_add(struct kcore_list *new, void *addr, size_t size, void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
int type) int type)
...@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) ...@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
goto out; goto out;
} }
m = NULL; /* skip the list anchor */ m = NULL; /* skip the list anchor */
} else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
if (clear_user(buffer, tsz)) {
ret = -EFAULT;
goto out;
}
} else if (m->type == KCORE_VMALLOC) { } else if (m->type == KCORE_VMALLOC) {
vread(buf, (char *)start, tsz); vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */ /* we have to zero-fill user buffer even if no read */
......
...@@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) ...@@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
m->vaddr = (unsigned long)vaddr; m->vaddr = (unsigned long)vaddr;
kclist_add(m, addr, sz, KCORE_REMAP); kclist_add(m, addr, sz, KCORE_REMAP);
} }
extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
#else #else
static inline static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment