Commit 17e0a7cb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-cleanups-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Ingo Molnar:
 "Misc cleanups, with an emphasis on removing obsolete/dead code"

* tag 'x86-cleanups-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/spinlock: Remove obsolete ticket spinlock macros and types
  x86/mm: Drop deprecated DISCONTIGMEM support for 32-bit
  x86/apb_timer: Drop unused declaration and macro
  x86/apb_timer: Drop unused TSC calibration
  x86/io_apic: Remove unused function mp_init_irq_at_boot()
  x86/mm: Stop printing BRK addresses
  x86/audit: Fix a -Wmissing-prototypes warning for ia32_classify_syscall()
  x86/nmi: Remove edac.h include leftover
  mm: Remove MPX leftovers
  x86/mm/mmap: Fix -Wmissing-prototypes warnings
  x86/early_printk: Remove unused includes
  crash_dump: Remove no longer used saved_max_pfn
  x86/smpboot: Remove the last ICPU() macro
parents bb548bed 2ca41f55
...@@ -1611,19 +1611,10 @@ config NODES_SHIFT ...@@ -1611,19 +1611,10 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables. system. Increases memory reserved to accommodate various tables.
config ARCH_HAVE_MEMORY_PRESENT
def_bool y
depends on X86_32 && DISCONTIGMEM
config ARCH_FLATMEM_ENABLE config ARCH_FLATMEM_ENABLE
def_bool y def_bool y
depends on X86_32 && !NUMA depends on X86_32 && !NUMA
config ARCH_DISCONTIGMEM_ENABLE
def_bool n
depends on NUMA && X86_32
depends on BROKEN
config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_ENABLE
def_bool y def_bool y
depends on X86_64 || NUMA || X86_32 || X86_32_NON_STANDARD depends on X86_64 || NUMA || X86_32 || X86_32_NON_STANDARD
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <asm/unistd_32.h> #include <asm/unistd_32.h>
#include <asm/audit.h>
unsigned ia32_dir_class[] = { unsigned ia32_dir_class[] = {
#include <asm-generic/audit_dir_write.h> #include <asm-generic/audit_dir_write.h>
......
...@@ -25,11 +25,7 @@ ...@@ -25,11 +25,7 @@
#define APBT_MIN_FREQ 1000000 #define APBT_MIN_FREQ 1000000
#define APBT_MMAP_SIZE 1024 #define APBT_MMAP_SIZE 1024
#define APBT_DEV_USED 1
extern void apbt_time_init(void); extern void apbt_time_init(void);
extern unsigned long apbt_quick_calibrate(void);
extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
extern void apbt_setup_secondary_clock(void); extern void apbt_setup_secondary_clock(void);
extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
...@@ -38,7 +34,6 @@ extern int sfi_mtimer_num; ...@@ -38,7 +34,6 @@ extern int sfi_mtimer_num;
#else /* CONFIG_APB_TIMER */ #else /* CONFIG_APB_TIMER */
static inline unsigned long apbt_quick_calibrate(void) {return 0; }
static inline void apbt_time_init(void) { } static inline void apbt_time_init(void) { }
#endif #endif
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_AUDIT_H
#define _ASM_X86_AUDIT_H
int ia32_classify_syscall(unsigned int syscall);
#endif /* _ASM_X86_AUDIT_H */
...@@ -14,43 +14,4 @@ extern struct pglist_data *node_data[]; ...@@ -14,43 +14,4 @@ extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid]) #define NODE_DATA(nid) (node_data[nid])
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#ifdef CONFIG_DISCONTIGMEM
/*
* generic node memory support, the following assumptions apply:
*
* 1) memory comes in 64Mb contiguous chunks which are either present or not
* 2) we will not have more than 64Gb in total
*
* for now assume that 64Gb is max amount of RAM for whole system
* 64Gb / 4096bytes/page = 16777216 pages
*/
#define MAX_NR_PAGES 16777216
#define MAX_SECTIONS 1024
#define PAGES_PER_SECTION (MAX_NR_PAGES/MAX_SECTIONS)
extern s8 physnode_map[];
static inline int pfn_to_nid(unsigned long pfn)
{
#ifdef CONFIG_NUMA
return((int) physnode_map[(pfn) / PAGES_PER_SECTION]);
#else
return 0;
#endif
}
static inline int pfn_valid(int pfn)
{
int nid = pfn_to_nid(pfn);
if (nid >= 0)
return (pfn < node_end_pfn(nid));
return 0;
}
#define early_pfn_valid(pfn) pfn_valid((pfn))
#endif /* CONFIG_DISCONTIGMEM */
#endif /* _ASM_X86_MMZONE_32_H */ #endif /* _ASM_X86_MMZONE_32_H */
...@@ -66,8 +66,7 @@ do { \ ...@@ -66,8 +66,7 @@ do { \
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* /*
* kern_addr_valid() is (1) for FLATMEM and (0) for * kern_addr_valid() is (1) for FLATMEM and (0) for SPARSEMEM
* SPARSEMEM and DISCONTIGMEM
*/ */
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
......
...@@ -3,29 +3,7 @@ ...@@ -3,29 +3,7 @@
#define _ASM_X86_SPINLOCK_TYPES_H #define _ASM_X86_SPINLOCK_TYPES_H
#include <linux/types.h> #include <linux/types.h>
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define __TICKET_LOCK_INC 2
#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
#else
#define __TICKET_LOCK_INC 1
#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
#endif
#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
#include <asm-generic/qspinlock_types.h> #include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h> #include <asm-generic/qrwlock_types.h>
#endif /* _ASM_X86_SPINLOCK_TYPES_H */ #endif /* _ASM_X86_SPINLOCK_TYPES_H */
...@@ -345,56 +345,3 @@ void __init apbt_time_init(void) ...@@ -345,56 +345,3 @@ void __init apbt_time_init(void)
apb_timer_block_enabled = 0; apb_timer_block_enabled = 0;
panic("failed to enable APB timer\n"); panic("failed to enable APB timer\n");
} }
/* called before apb_timer_enable, use early map */
unsigned long apbt_quick_calibrate(void)
{
int i, scale;
u64 old, new;
u64 t1, t2;
unsigned long khz = 0;
u32 loop, shift;
apbt_set_mapping();
dw_apb_clocksource_start(clocksource_apbt);
/* check if the timer can count down, otherwise return */
old = dw_apb_clocksource_read(clocksource_apbt);
i = 10000;
while (--i) {
if (old != dw_apb_clocksource_read(clocksource_apbt))
break;
}
if (!i)
goto failed;
/* count 16 ms */
loop = (apbt_freq / 1000) << 4;
/* restart the timer to ensure it won't get to 0 in the calibration */
dw_apb_clocksource_start(clocksource_apbt);
old = dw_apb_clocksource_read(clocksource_apbt);
old += loop;
t1 = rdtsc();
do {
new = dw_apb_clocksource_read(clocksource_apbt);
} while (new < old);
t2 = rdtsc();
shift = 5;
if (unlikely(loop >> shift == 0)) {
printk(KERN_INFO
"APBT TSC calibration failed, not enough resolution\n");
return 0;
}
scale = (int)div_u64((t2 - t1), loop >> shift);
khz = (scale * (apbt_freq / 1000)) >> shift;
printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
return khz;
failed:
return 0;
}
...@@ -154,19 +154,6 @@ static inline bool mp_is_legacy_irq(int irq) ...@@ -154,19 +154,6 @@ static inline bool mp_is_legacy_irq(int irq)
return irq >= 0 && irq < nr_legacy_irqs(); return irq >= 0 && irq < nr_legacy_irqs();
} }
/*
* Initialize all legacy IRQs and all pins on the first IOAPIC
* if we have legacy interrupt controller. Kernel boot option "pirq="
* may rely on non-legacy pins on the first IOAPIC.
*/
static inline int mp_init_irq_at_boot(int ioapic, int irq)
{
if (!nr_legacy_irqs())
return 0;
return ioapic == 0 || mp_is_legacy_irq(irq);
}
static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic) static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic)
{ {
return ioapics[ioapic].irqdomain; return ioapics[ioapic].irqdomain;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/audit.h>
static unsigned dir_class[] = { static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h> #include <asm-generic/audit_dir_write.h>
...@@ -41,7 +42,6 @@ int audit_classify_arch(int arch) ...@@ -41,7 +42,6 @@ int audit_classify_arch(int arch)
int audit_classify_syscall(int abi, unsigned syscall) int audit_classify_syscall(int abi, unsigned syscall)
{ {
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
extern int ia32_classify_syscall(unsigned);
if (abi == AUDIT_ARCH_I386) if (abi == AUDIT_ARCH_I386)
return ia32_classify_syscall(syscall); return ia32_classify_syscall(syscall);
#endif #endif
......
...@@ -910,14 +910,6 @@ static int __init parse_memmap_one(char *p) ...@@ -910,14 +910,6 @@ static int __init parse_memmap_one(char *p)
return -EINVAL; return -EINVAL;
if (!strncmp(p, "exactmap", 8)) { if (!strncmp(p, "exactmap", 8)) {
#ifdef CONFIG_CRASH_DUMP
/*
* If we are doing a crash dump, we still need to know
* the real memory size before the original memory map is
* reset.
*/
saved_max_pfn = e820__end_of_ram_pfn();
#endif
e820_table->nr_entries = 0; e820_table->nr_entries = 0;
userdef = 1; userdef = 1;
return 0; return 0;
......
...@@ -15,12 +15,9 @@ ...@@ -15,12 +15,9 @@
#include <xen/hvc-console.h> #include <xen/hvc-console.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/intel-mid.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <linux/usb/ehci_def.h> #include <linux/usb/ehci_def.h>
#include <linux/usb/xhci-dbgp.h> #include <linux/usb/xhci-dbgp.h>
#include <linux/efi.h>
#include <asm/efi.h>
#include <asm/pci_x86.h> #include <asm/pci_x86.h>
/* Simple VGA output */ /* Simple VGA output */
......
...@@ -25,10 +25,6 @@ ...@@ -25,10 +25,6 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
#endif
#include <asm/cpu_entry_area.h> #include <asm/cpu_entry_area.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/mach_traps.h> #include <asm/mach_traps.h>
......
...@@ -1857,24 +1857,25 @@ static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) ...@@ -1857,24 +1857,25 @@ static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#define ICPU(model) \ #define X86_MATCH(model) \
{X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF, 0} X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
static const struct x86_cpu_id has_knl_turbo_ratio_limits[] = { static const struct x86_cpu_id has_knl_turbo_ratio_limits[] = {
ICPU(INTEL_FAM6_XEON_PHI_KNL), X86_MATCH(XEON_PHI_KNL),
ICPU(INTEL_FAM6_XEON_PHI_KNM), X86_MATCH(XEON_PHI_KNM),
{} {}
}; };
static const struct x86_cpu_id has_skx_turbo_ratio_limits[] = { static const struct x86_cpu_id has_skx_turbo_ratio_limits[] = {
ICPU(INTEL_FAM6_SKYLAKE_X), X86_MATCH(SKYLAKE_X),
{} {}
}; };
static const struct x86_cpu_id has_glm_turbo_ratio_limits[] = { static const struct x86_cpu_id has_glm_turbo_ratio_limits[] = {
ICPU(INTEL_FAM6_ATOM_GOLDMONT), X86_MATCH(ATOM_GOLDMONT),
ICPU(INTEL_FAM6_ATOM_GOLDMONT_D), X86_MATCH(ATOM_GOLDMONT_D),
ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS), X86_MATCH(ATOM_GOLDMONT_PLUS),
{} {}
}; };
......
...@@ -121,8 +121,6 @@ __ref void *alloc_low_pages(unsigned int num) ...@@ -121,8 +121,6 @@ __ref void *alloc_low_pages(unsigned int num)
} else { } else {
pfn = pgt_buf_end; pfn = pgt_buf_end;
pgt_buf_end += num; pgt_buf_end += num;
printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
} }
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
......
...@@ -18,7 +18,9 @@ ...@@ -18,7 +18,9 @@
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/elf-randomize.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/io.h>
#include "physaddr.h" #include "physaddr.h"
......
...@@ -27,40 +27,6 @@ ...@@ -27,40 +27,6 @@
#include "numa_internal.h" #include "numa_internal.h"
#ifdef CONFIG_DISCONTIGMEM
/*
* 4) physnode_map - the mapping between a pfn and owning node
* physnode_map keeps track of the physical memory layout of a generic
* numa node on a 64Mb break (each element of the array will
* represent 64Mb of memory and will be marked by the node id. so,
* if the first gig is on node 0, and the second gig is on node 1
* physnode_map will contain:
*
* physnode_map[0-15] = 0;
* physnode_map[16-31] = 1;
* physnode_map[32- ] = -1;
*/
s8 physnode_map[MAX_SECTIONS] __read_mostly = { [0 ... (MAX_SECTIONS - 1)] = -1};
EXPORT_SYMBOL(physnode_map);
void memory_present(int nid, unsigned long start, unsigned long end)
{
unsigned long pfn;
printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n",
nid, start, end);
printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
printk(KERN_DEBUG " ");
start = round_down(start, PAGES_PER_SECTION);
end = round_up(end, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
physnode_map[pfn / PAGES_PER_SECTION] = nid;
printk(KERN_CONT "%lx ", pfn);
}
printk(KERN_CONT "\n");
}
#endif
extern unsigned long highend_pfn, highstart_pfn; extern unsigned long highend_pfn, highstart_pfn;
void __init initmem_init(void) void __init initmem_init(void)
......
...@@ -622,9 +622,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) ...@@ -622,9 +622,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
[ilog2(VM_GROWSDOWN)] = "gd", [ilog2(VM_GROWSDOWN)] = "gd",
[ilog2(VM_PFNMAP)] = "pf", [ilog2(VM_PFNMAP)] = "pf",
[ilog2(VM_DENYWRITE)] = "dw", [ilog2(VM_DENYWRITE)] = "dw",
#ifdef CONFIG_X86_INTEL_MPX
[ilog2(VM_MPX)] = "mp",
#endif
[ilog2(VM_LOCKED)] = "lo", [ilog2(VM_LOCKED)] = "lo",
[ilog2(VM_IO)] = "io", [ilog2(VM_IO)] = "io",
[ilog2(VM_SEQ_READ)] = "sr", [ilog2(VM_SEQ_READ)] = "sr",
......
...@@ -97,8 +97,6 @@ extern void unregister_oldmem_pfn_is_ram(void); ...@@ -97,8 +97,6 @@ extern void unregister_oldmem_pfn_is_ram(void);
static inline bool is_kdump_kernel(void) { return 0; } static inline bool is_kdump_kernel(void) { return 0; }
#endif /* CONFIG_CRASH_DUMP */ #endif /* CONFIG_CRASH_DUMP */
extern unsigned long saved_max_pfn;
/* Device Dump information to be filled by drivers */ /* Device Dump information to be filled by drivers */
struct vmcoredd_data { struct vmcoredd_data {
char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */ char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */
......
...@@ -329,13 +329,6 @@ extern unsigned int kobjsize(const void *objp); ...@@ -329,13 +329,6 @@ extern unsigned int kobjsize(const void *objp);
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif #endif
#if defined(CONFIG_X86_INTEL_MPX)
/* MPX specific bounds table or bounds directory */
# define VM_MPX VM_HIGH_ARCH_4
#else
# define VM_MPX VM_NONE
#endif
#ifndef VM_GROWSUP #ifndef VM_GROWSUP
# define VM_GROWSUP VM_NONE # define VM_GROWSUP VM_NONE
#endif #endif
......
...@@ -5,12 +5,6 @@ ...@@ -5,12 +5,6 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/export.h> #include <linux/export.h>
/*
* If we have booted due to a crash, max_pfn will be a very low value. We need
* to know the amount of memory that the previous kernel used.
*/
unsigned long saved_max_pfn;
/* /*
* stores the physical address of elf header of crash image * stores the physical address of elf header of crash image
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment