Commit 7202fdb1 authored by Borislav Petkov's avatar Borislav Petkov Committed by Ingo Molnar

x86/mm/pat: Remove pat_enabled() checks

Now that we emulate a PAT table when PAT is disabled, there's no
need for those checks anymore as the PAT abstraction will handle
those cases too.

Based on a conglomerate patch from Toshi Kani.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarToshi Kani <toshi.kani@hp.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Elliott@hp.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luis R. Rodriguez <mcgrof@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arnd@arndb.de
Cc: hch@lst.de
Cc: hmh@hmh.eng.br
Cc: jgross@suse.com
Cc: konrad.wilk@oracle.com
Cc: linux-mm <linux-mm@kvack.org>
Cc: linux-nvdimm@lists.01.org
Cc: stefan.bader@canonical.com
Cc: yigal@plexistor.com
Link: http://lkml.kernel.org/r/1433436928-31903-4-git-send-email-bp@alien8.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9cd25aac
......@@ -77,13 +77,13 @@ void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
/*
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
* PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
* MTRR is UC or WC. UC_MINUS gets the real intention, of the
* user, which is "WC if the MTRR is WC, UC if you can't do that."
* For non-PAT systems, translate non-WB request to UC- just in
* case the caller set the PWT bit to prot directly without using
* pgprot_writecombine(). UC- translates to uncached if the MTRR
* is UC or WC. UC- gets the real intention, of the user, which is
* "WC if the MTRR is WC, UC if you can't do that."
*/
if (!pat_enabled() && pgprot_val(prot) ==
(__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC)))
if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
prot = __pgprot(__PAGE_KERNEL |
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
......
......@@ -292,11 +292,8 @@ EXPORT_SYMBOL_GPL(ioremap_uc);
*/
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
{
if (pat_enabled())
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
__builtin_return_address(0));
else
return ioremap_nocache(phys_addr, size);
}
EXPORT_SYMBOL(ioremap_wc);
......
......@@ -1572,9 +1572,6 @@ int set_memory_wc(unsigned long addr, int numpages)
{
int ret;
if (!pat_enabled())
return set_memory_uc(addr, numpages);
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_MODE_WC, NULL);
if (ret)
......
......@@ -438,12 +438,8 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
if (!pat_enabled()) {
/* This is identical to page table setting without PAT */
if (new_type) {
if (req_type == _PAGE_CACHE_MODE_WC)
*new_type = _PAGE_CACHE_MODE_UC_MINUS;
else
*new_type = req_type;
}
if (new_type)
*new_type = req_type;
return 0;
}
......@@ -947,11 +943,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
pgprot_t pgprot_writecombine(pgprot_t prot)
{
if (pat_enabled())
return __pgprot(pgprot_val(prot) |
return __pgprot(pgprot_val(prot) |
cachemode2protval(_PAGE_CACHE_MODE_WC));
else
return pgprot_noncached(prot);
}
EXPORT_SYMBOL_GPL(pgprot_writecombine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment