Commit f87519e8 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: introduce max_pfn_mapped

64bit uses end_pfn_map and 32bit uses max_low_pfn. There are several
files which have #ifdef'ed defines which map either to end_pfn_map or
max_low_pfn. Replace this by a universal define and clean up all the
other instances.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3cbd09e4
......@@ -396,7 +396,7 @@ static void __init runtime_code_page_mkexec(void)
md = p;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if (md->type == EFI_RUNTIME_SERVICES_CODE &&
(end >> PAGE_SHIFT) <= end_pfn_map)
(end >> PAGE_SHIFT) <= max_pfn_mapped)
change_page_attr_addr(md->virt_addr,
md->num_pages,
PAGE_KERNEL_EXEC_NOCACHE);
......@@ -429,7 +429,7 @@ void __init efi_enter_virtual_mode(void)
continue;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if ((md->attribute & EFI_MEMORY_WB) &&
((end >> PAGE_SHIFT) <= end_pfn_map))
((end >> PAGE_SHIFT) <= max_pfn_mapped))
md->virt_addr = (unsigned long)__va(md->phys_addr);
else
md->virt_addr = (unsigned long)
......
......@@ -27,13 +27,6 @@ enum {
GPS = (1<<30)
};
#ifdef CONFIG_X86_64
# include <asm/proto.h>
# define max_mapped end_pfn_map
#else
# define max_mapped max_low_pfn
#endif
struct split_state {
long lpg, gpg, spg, exec;
long min_exec, max_exec;
......@@ -48,7 +41,7 @@ static __init int print_split(struct split_state *s)
s->lpg = s->gpg = s->spg = s->exec = 0;
s->min_exec = ~0UL;
s->max_exec = 0;
for (i = 0; i < max_mapped; ) {
for (i = 0; i < max_pfn_mapped; ) {
unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
int level;
pte_t *pte;
......@@ -97,8 +90,8 @@ static __init int print_split(struct split_state *s)
expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
if (expected != i) {
printk(KERN_ERR "CPA max_mapped %lu but expected %lu\n",
max_mapped, expected);
printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n",
max_pfn_mapped, expected);
return 1;
}
return err;
......@@ -120,22 +113,22 @@ static __init int exercise_pageattr(void)
printk(KERN_INFO "CPA exercising pageattr\n");
bm = vmalloc((max_mapped + 7) / 8);
bm = vmalloc((max_pfn_mapped + 7) / 8);
if (!bm) {
printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
return -ENOMEM;
}
memset(bm, 0, (max_mapped + 7) / 8);
memset(bm, 0, (max_pfn_mapped + 7) / 8);
failed += print_split(&sa);
srandom32(100);
for (i = 0; i < NTEST; i++) {
unsigned long pfn = random32() % max_mapped;
unsigned long pfn = random32() % max_pfn_mapped;
addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
len[i] = random32() % 100;
len[i] = min_t(unsigned long, len[i], max_mapped - pfn - 1);
len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
if (len[i] == 0)
len[i] = 1;
......
......@@ -35,8 +35,6 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define efi_ioremap(addr, size) ioremap(addr, size)
#define end_pfn_map max_low_pfn
#else /* !CONFIG_X86_32 */
#define MAX_EFI_IO_PAGES 100
......
......@@ -33,8 +33,10 @@
#ifdef CONFIG_X86_64
#include <asm/page_64.h>
#define max_pfn_mapped end_pfn_map
#else
#include <asm/page_32.h>
#define max_pfn_mapped max_low_pfn
#endif /* CONFIG_X86_64 */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment