Commit 0c6be871 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull additional x86 fixes from Peter Anvin.

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, amd, xen: Avoid NULL pointer paravirt references
  x86, mtrr: Fix a type overflow in range_to_mtrr func
  x86, realmode: Unbreak the ia64 build of drivers/acpi/sleep.c
  x86/mm/pat: Improve scaling of pat_pagerange_is_ram()
  x86: hpet: Fix copy-and-paste mistake in earlier change
  x86/mce: Fix 32-bit build
  x86/bitops: Move BIT_64() for a wider use
parents 6a445c7f 1ab46fd3
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/realmode.h>
#define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long #define COMPILER_DEPENDENT_UINT64 unsigned long long
...@@ -116,10 +117,8 @@ static inline void acpi_disable_pci(void) ...@@ -116,10 +117,8 @@ static inline void acpi_disable_pci(void)
/* Low-level suspend routine. */ /* Low-level suspend routine. */
extern int acpi_suspend_lowlevel(void); extern int acpi_suspend_lowlevel(void);
extern const unsigned char acpi_wakeup_code[]; /* Physical address to resume after wakeup */
#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
/* early initialization routine */
extern void acpi_reserve_wakeup_memory(void);
/* /*
* Check if the CPU can handle C2 and deeper * Check if the CPU can handle C2 and deeper
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#define BIT_64(n) (U64_C(1) << (n))
/* /*
* These have to be done with inline assembly: that way the bit-setting * These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit * is guaranteed to be atomic. All bit operations return 0 if the bit
......
...@@ -1472,8 +1472,8 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) ...@@ -1472,8 +1472,8 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
rdmsrl(msrs[i], val); rdmsrl(msrs[i], val);
/* CntP bit set? */ /* CntP bit set? */
if (val & BIT(62)) { if (val & BIT_64(62)) {
val &= ~BIT(62); val &= ~BIT_64(62);
wrmsrl(msrs[i], val); wrmsrl(msrs[i], val);
} }
} }
......
...@@ -266,7 +266,7 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, ...@@ -266,7 +266,7 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
if (align > max_align) if (align > max_align)
align = max_align; align = max_align;
sizek = 1 << align; sizek = 1UL << align;
if (debug_print) { if (debug_print) {
char start_factor = 'K', size_factor = 'K'; char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base; unsigned long start_base, size_base;
......
...@@ -870,7 +870,7 @@ int __init hpet_enable(void) ...@@ -870,7 +870,7 @@ int __init hpet_enable(void)
else else
pr_warn("HPET initial state will not be saved\n"); pr_warn("HPET initial state will not be saved\n");
cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
hpet_writel(cfg, HPET_Tn_CFG(i)); hpet_writel(cfg, HPET_CFG);
if (cfg) if (cfg)
pr_warn("HPET: Unrecognized bits %#x set in global cfg\n", pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
cfg); cfg);
......
...@@ -158,13 +158,31 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) ...@@ -158,13 +158,31 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
return req_type; return req_type;
} }
struct pagerange_state {
unsigned long cur_pfn;
int ram;
int not_ram;
};
static int
pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
{
struct pagerange_state *state = arg;
state->not_ram |= initial_pfn > state->cur_pfn;
state->ram |= total_nr_pages > 0;
state->cur_pfn = initial_pfn + total_nr_pages;
return state->ram && state->not_ram;
}
static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
{ {
int ram_page = 0, not_rampage = 0; int ret = 0;
unsigned long page_nr; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct pagerange_state state = {start_pfn, 0, 0};
for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
++page_nr) {
/* /*
* For legacy reasons, physical address range in the legacy ISA * For legacy reasons, physical address range in the legacy ISA
* region is tracked as non-RAM. This will allow users of * region is tracked as non-RAM. This will allow users of
...@@ -172,17 +190,15 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) ...@@ -172,17 +190,15 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
* some of those portions are listed(or not even listed) with * some of those portions are listed(or not even listed) with
* different e820 types(RAM/reserved/..) * different e820 types(RAM/reserved/..)
*/ */
if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
page_is_ram(page_nr)) start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
ram_page = 1;
else
not_rampage = 1;
if (ram_page == not_rampage) if (start_pfn < end_pfn) {
return -1; ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
&state, pagerange_is_ram_callback);
} }
return ram_page; return (ret > 0) ? -1 : (state.ram ? 1 : 0);
} }
/* /*
......
...@@ -1116,7 +1116,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { ...@@ -1116,7 +1116,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.wbinvd = native_wbinvd, .wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe, .read_msr = native_read_msr_safe,
.rdmsr_regs = native_rdmsr_safe_regs,
.write_msr = xen_write_msr_safe, .write_msr = xen_write_msr_safe,
.wrmsr_regs = native_wrmsr_safe_regs,
.read_tsc = native_read_tsc, .read_tsc = native_read_tsc,
.read_pmc = native_read_pmc, .read_pmc = native_read_pmc,
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#include <acpi/acpi_bus.h> #include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h> #include <acpi/acpi_drivers.h>
#include <asm/realmode.h>
#include "internal.h" #include "internal.h"
#include "sleep.h" #include "sleep.h"
...@@ -93,13 +91,11 @@ static struct notifier_block tts_notifier = { ...@@ -93,13 +91,11 @@ static struct notifier_block tts_notifier = {
static int acpi_sleep_prepare(u32 acpi_state) static int acpi_sleep_prepare(u32 acpi_state)
{ {
#ifdef CONFIG_ACPI_SLEEP #ifdef CONFIG_ACPI_SLEEP
unsigned long wakeup_pa = real_mode_header->wakeup_start;
/* do we have a wakeup address for S2 and S3? */ /* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) { if (acpi_state == ACPI_STATE_S3) {
if (!wakeup_pa) if (!acpi_wakeup_address)
return -EFAULT; return -EFAULT;
acpi_set_firmware_waking_vector( acpi_set_firmware_waking_vector(acpi_wakeup_address);
(acpi_physical_address)wakeup_pa);
} }
ACPI_FLUSH_CPU_CACHE(); ACPI_FLUSH_CPU_CACHE();
......
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
#include <asm/mce.h> #include <asm/mce.h>
#define BIT_64(n) (U64_C(1) << (n))
#define EC(x) ((x) & 0xffff) #define EC(x) ((x) & 0xffff)
#define XEC(x, mask) (((x) >> 16) & mask) #define XEC(x, mask) (((x) >> 16) & mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment