Commit 02171b4a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm changes from Ingo Molnar:
 "This tree includes a micro-optimization that avoids cr3 switches
  during idling; it fixes corner cases and there's also small cleanups"

Fix up trivial context conflict with the percpu_xx -> this_cpu_xx
changes.

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86-64: Fix accounting in kernel_physical_mapping_init()
  x86/tlb: Clean up and unify TLB_FLUSH_ALL definition
  x86: Drop obsolete ARCH_BOOTMEM support
  x86, tlb: Switch cr3 in leave_mm() only when needed
  x86/mm: Fix the size calculation of mapping tables
parents 70311aaa 20167d34
......@@ -1238,10 +1238,6 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
config HAVE_ARCH_BOOTMEM
def_bool y
depends on X86_32 && NUMA
config HAVE_ARCH_ALLOC_REMAP
def_bool y
depends on X86_32 && NUMA
......
......@@ -61,10 +61,4 @@ static inline int pfn_valid(int pfn)
#endif /* CONFIG_DISCONTIGMEM */
#ifdef CONFIG_NEED_MULTIPLE_NODES
/* always use node 0 for bootmem on this numa platform */
#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
(NODE_DATA(0)->bdata)
#endif /* CONFIG_NEED_MULTIPLE_NODES */
#endif /* _ASM_X86_MMZONE_32_H */
......@@ -62,11 +62,7 @@ static inline void __flush_tlb_one(unsigned long addr)
__flush_tlb();
}
#ifdef CONFIG_X86_32
# define TLB_FLUSH_ALL 0xffffffff
#else
# define TLB_FLUSH_ALL -1ULL
#endif
#define TLB_FLUSH_ALL -1UL
/*
* TLB flushing:
......
......@@ -29,8 +29,14 @@ int direct_gbpages
#endif
;
static void __init find_early_table_space(unsigned long end, int use_pse,
int use_gbpages)
struct map_range {
unsigned long start;
unsigned long end;
unsigned page_size_mask;
};
static void __init find_early_table_space(struct map_range *mr, unsigned long end,
int use_pse, int use_gbpages)
{
unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
phys_addr_t base;
......@@ -55,6 +61,9 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
#ifdef CONFIG_X86_32
extra += PMD_SIZE;
#endif
/* The first 2/4M doesn't use large pages. */
extra += mr->end - mr->start;
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
......@@ -84,12 +93,6 @@ void __init native_pagetable_reserve(u64 start, u64 end)
memblock_reserve(start, end - start);
}
struct map_range {
unsigned long start;
unsigned long end;
unsigned page_size_mask;
};
#ifdef CONFIG_X86_32
#define NR_RANGE_MR 3
#else /* CONFIG_X86_64 */
......@@ -261,7 +264,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* nodes are discovered.
*/
if (!after_bootmem)
find_early_table_space(end, use_pse, use_gbpages);
find_early_table_space(&mr[0], end, use_pse, use_gbpages);
for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
......
......@@ -407,12 +407,12 @@ static unsigned long __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
unsigned long page_size_mask, pgprot_t prot)
{
unsigned long pages = 0;
unsigned long pages = 0, next;
unsigned long last_map_addr = end;
int i = pmd_index(address);
for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
for (; i < PTRS_PER_PMD; i++, address = next) {
unsigned long pte_phys;
pmd_t *pmd = pmd_page + pmd_index(address);
pte_t *pte;
......@@ -426,6 +426,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
break;
}
next = (address & PMD_MASK) + PMD_SIZE;
if (pmd_val(*pmd)) {
if (!pmd_large(*pmd)) {
spin_lock(&init_mm.page_table_lock);
......@@ -449,7 +451,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
* attributes.
*/
if (page_size_mask & (1 << PG_LEVEL_2M)) {
pages++;
last_map_addr = next;
continue;
}
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
......@@ -462,7 +464,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
pfn_pte(address >> PAGE_SHIFT,
__pgprot(pgprot_val(prot) | _PAGE_PSE)));
spin_unlock(&init_mm.page_table_lock);
last_map_addr = (address & PMD_MASK) + PMD_SIZE;
last_map_addr = next;
continue;
}
......@@ -482,11 +484,11 @@ static unsigned long __meminit
phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
unsigned long page_size_mask)
{
unsigned long pages = 0;
unsigned long pages = 0, next;
unsigned long last_map_addr = end;
int i = pud_index(addr);
for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
for (; i < PTRS_PER_PUD; i++, addr = next) {
unsigned long pmd_phys;
pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
......@@ -495,8 +497,9 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if (addr >= end)
break;
if (!after_bootmem &&
!e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
next = (addr & PUD_MASK) + PUD_SIZE;
if (!after_bootmem && !e820_any_mapped(addr, next, 0)) {
set_pud(pud, __pud(0));
continue;
}
......@@ -523,7 +526,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
* attributes.
*/
if (page_size_mask & (1 << PG_LEVEL_1G)) {
pages++;
last_map_addr = next;
continue;
}
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
......@@ -535,7 +538,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
set_pte((pte_t *)pud,
pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
spin_unlock(&init_mm.page_table_lock);
last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
last_map_addr = next;
continue;
}
......
......@@ -61,11 +61,13 @@ static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
*/
void leave_mm(int cpu)
{
struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG();
cpumask_clear_cpu(cpu,
mm_cpumask(this_cpu_read(cpu_tlbstate.active_mm)));
load_cr3(swapper_pg_dir);
if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
load_cr3(swapper_pg_dir);
}
}
EXPORT_SYMBOL_GPL(leave_mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment