Commit d8d1c351 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/mm' into x86/asm to resolve conflict and to create common base

 Conflicts:
	arch/x86/include/asm/cpufeature.h
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents cb44d0cf 1886297c
......@@ -196,3 +196,35 @@ Another, more verbose way of getting PAT related debug messages is with
"debugpat" boot parameter. With this parameter, various debug messages are
printed to dmesg log.
PAT Initialization
------------------
The following table describes how PAT is initialized under various
configurations. The PAT MSR must be updated by Linux in order to support WC
and WT attributes. Otherwise, the PAT MSR has the value programmed in it
by the firmware. Note, Xen enables WC attribute in the PAT MSR for guests.
MTRR PAT Call Sequence PAT State PAT MSR
=========================================================
E E MTRR -> PAT init Enabled OS
E D MTRR -> PAT init Disabled -
D E MTRR -> PAT disable Disabled BIOS
D D MTRR -> PAT disable Disabled -
- np/E PAT -> PAT disable Disabled BIOS
- np/D PAT -> PAT disable Disabled -
E !P/E MTRR -> PAT init Disabled BIOS
D !P/E MTRR -> PAT disable Disabled BIOS
!M !P/E MTRR stub -> PAT disable Disabled BIOS
Legend
------------------------------------------------
E Feature enabled in CPU
D Feature disabled/unsupported in CPU
np "nopat" boot option specified
!P CONFIG_X86_PAT option unset
!M CONFIG_MTRR option unset
Enabled PAT state set to enabled
Disabled PAT state set to disabled
OS PAT initializes PAT MSR with OS setting
BIOS PAT keeps PAT MSR with BIOS setting
......@@ -126,7 +126,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
/*
......
......@@ -24,6 +24,7 @@
#define _ASM_X86_MTRR_H
#include <uapi/asm/mtrr.h>
#include <asm/pat.h>
/*
......@@ -83,9 +84,12 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
static inline void mtrr_bp_init(void)
{
pat_disable("MTRRs disabled, skipping PAT initialization too.");
}
#define mtrr_ap_init() do {} while (0)
#define mtrr_bp_init() do {} while (0)
#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
......
......@@ -5,8 +5,8 @@
#include <asm/pgtable_types.h>
bool pat_enabled(void);
void pat_disable(const char *reason);
extern void pat_init(void);
void pat_init_cache_modes(u64);
extern int reserve_memtype(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
......
......@@ -444,11 +444,24 @@ static void __init print_mtrr_state(void)
pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
}
/* PAT setup for BP. We need to go through sync steps here */
void __init mtrr_bp_pat_init(void)
{
unsigned long flags;
local_irq_save(flags);
prepare_set();
pat_init();
post_set();
local_irq_restore(flags);
}
/* Grab all of the MTRR state for this CPU into *state */
bool __init get_mtrr_state(void)
{
struct mtrr_var_range *vrs;
unsigned long flags;
unsigned lo, dummy;
unsigned int i;
......@@ -481,15 +494,6 @@ bool __init get_mtrr_state(void)
mtrr_state_set = 1;
/* PAT setup for BP. We need to go through sync steps here */
local_irq_save(flags);
prepare_set();
pat_init();
post_set();
local_irq_restore(flags);
return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
}
......
......@@ -752,6 +752,9 @@ void __init mtrr_bp_init(void)
/* BIOS may override */
__mtrr_enabled = get_mtrr_state();
if (mtrr_enabled())
mtrr_bp_pat_init();
if (mtrr_cleanup(phys_addr)) {
changed_by_mtrr_cleanup = 1;
mtrr_if->set_all();
......@@ -759,8 +762,16 @@ void __init mtrr_bp_init(void)
}
}
if (!mtrr_enabled())
if (!mtrr_enabled()) {
pr_info("MTRR: Disabled\n");
/*
* PAT initialization relies on MTRR's rendezvous handler.
* Skip PAT init until the handler can initialize both
* features independently.
*/
pat_disable("MTRRs disabled, skipping PAT initialization too.");
}
}
void mtrr_ap_init(void)
......
......@@ -52,6 +52,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
bool get_mtrr_state(void);
void mtrr_bp_pat_init(void);
extern void set_mtrr_ops(const struct mtrr_ops *ops);
......
......@@ -40,11 +40,22 @@
static bool boot_cpu_done;
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
static void init_cache_modes(void);
static inline void pat_disable(const char *reason)
void pat_disable(const char *reason)
{
if (!__pat_enabled)
return;
if (boot_cpu_done) {
WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
return;
}
__pat_enabled = 0;
pr_info("x86/PAT: %s\n", reason);
init_cache_modes();
}
static int __init nopat(char *str)
......@@ -181,7 +192,7 @@ static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
* configuration.
* Using lower indices is preferred, so we start with highest index.
*/
void pat_init_cache_modes(u64 pat)
static void __init_cache_modes(u64 pat)
{
enum page_cache_mode cache;
char pat_msg[33];
......@@ -202,14 +213,11 @@ static void pat_bsp_init(u64 pat)
{
u64 tmp_pat;
if (!cpu_has_pat) {
if (!boot_cpu_has(X86_FEATURE_PAT)) {
pat_disable("PAT not supported by CPU.");
return;
}
if (!pat_enabled())
goto done;
rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
if (!tmp_pat) {
pat_disable("PAT MSR is 0, disabled.");
......@@ -218,16 +226,12 @@ static void pat_bsp_init(u64 pat)
wrmsrl(MSR_IA32_CR_PAT, pat);
done:
pat_init_cache_modes(pat);
__init_cache_modes(pat);
}
static void pat_ap_init(u64 pat)
{
if (!pat_enabled())
return;
if (!cpu_has_pat) {
if (!boot_cpu_has(X86_FEATURE_PAT)) {
/*
* If this happens we are on a secondary CPU, but switched to
* PAT on the boot CPU. We have no way to undo PAT.
......@@ -238,18 +242,32 @@ static void pat_ap_init(u64 pat)
wrmsrl(MSR_IA32_CR_PAT, pat);
}
void pat_init(void)
static void init_cache_modes(void)
{
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
u64 pat = 0;
static int init_cm_done;
if (!pat_enabled()) {
if (init_cm_done)
return;
if (boot_cpu_has(X86_FEATURE_PAT)) {
/*
* CPU supports PAT. Set PAT table to be consistent with
* PAT MSR. This case supports "nopat" boot option, and
* virtual machine environments which support PAT without
* MTRRs. In specific, Xen has unique setup to PAT MSR.
*
* If PAT MSR returns 0, it is considered invalid and emulates
* as No PAT.
*/
rdmsrl(MSR_IA32_CR_PAT, pat);
}
if (!pat) {
/*
* No PAT. Emulate the PAT table that corresponds to the two
* cache bits, PWT (Write Through) and PCD (Cache Disable). This
* setup is the same as the BIOS default setup when the system
* has PAT but the "nopat" boot option has been specified. This
* emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
* cache bits, PWT (Write Through) and PCD (Cache Disable).
* This setup is also the same as the BIOS default setup.
*
* PTE encoding:
*
......@@ -266,8 +284,34 @@ void pat_init(void)
*/
pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
}
__init_cache_modes(pat);
init_cm_done = 1;
}
/**
* pat_init - Initialize PAT MSR and PAT table
*
* This function initializes PAT MSR and PAT table with an OS-defined value
* to enable additional cache attributes, WC and WT.
*
* This function must be called on all CPUs using the specific sequence of
* operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
* procedure for PAT.
*/
void pat_init(void)
{
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
if (!pat_enabled()) {
init_cache_modes();
return;
}
} else if ((c->x86_vendor == X86_VENDOR_INTEL) &&
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
/*
......@@ -734,25 +778,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
if (file->f_flags & O_DSYNC)
pcm = _PAGE_CACHE_MODE_UC_MINUS;
#ifdef CONFIG_X86_32
/*
* On the PPro and successors, the MTRRs are used to set
* memory types for physical addresses outside main memory,
* so blindly setting UC or PWT on those pages is wrong.
* For Pentiums and earlier, the surround logic should disable
* caching for the high addresses through the KEN pin, but
* we maintain the tradition of paranoia in this code.
*/
if (!pat_enabled() &&
!(boot_cpu_has(X86_FEATURE_MTRR) ||
boot_cpu_has(X86_FEATURE_K6_MTRR) ||
boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
(pfn << PAGE_SHIFT) >= __pa(high_memory)) {
pcm = _PAGE_CACHE_MODE_UC;
}
#endif
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
cachemode2protval(pcm));
return 1;
......
......@@ -75,7 +75,6 @@
#include <asm/mach_traps.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/pat.h>
#include <asm/cpu.h>
#ifdef CONFIG_ACPI
......@@ -1511,7 +1510,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
{
struct physdev_set_iopl set_iopl;
unsigned long initrd_start = 0;
u64 pat;
int rc;
if (!xen_start_info)
......@@ -1618,13 +1616,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_start_info->nr_pages);
xen_reserve_special_pages();
/*
* Modify the cache mode translation tables to match Xen's PAT
* configuration.
*/
rdmsrl(MSR_IA32_CR_PAT, pat);
pat_init_cache_modes(pat);
/* keep using Xen gdt for now; no urgent need to change it */
#ifdef CONFIG_X86_32
......
......@@ -1732,7 +1732,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
if (args->flags & I915_MMAP_WC && !cpu_has_pat)
if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file, args->handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment