Commit 86505fc0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:

 1) Double spin lock bug in sunhv serial driver, from Dan Carpenter.

 2) Use correct RSS estimate when determining whether to grow the huge
    TSB or not, from Mike Kravetz.

 3) Don't use full three level page tables for hugepages, PMD level is
    sufficient.  From Nitin Gupta.

 4) Mask out extraneous bits from TSB_TAG_ACCESS register, we only want
    the address bits.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Trim page tables for 8M hugepages
  sparc64 mm: Fix base TSB sizing when hugetlb pages are used
  sparc: serial: sunhv: fix a double lock bug
  sparc32: off by ones in BUG_ON()
  sparc: Don't leak context bits into thread->fault_address
parents 9d3bc3d4 7bc3777c
...@@ -31,14 +31,6 @@ static inline int prepare_hugepage_range(struct file *file, ...@@ -31,14 +31,6 @@ static inline int prepare_hugepage_range(struct file *file,
return 0; return 0;
} }
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
...@@ -82,4 +74,8 @@ static inline void arch_clear_hugepage_flags(struct page *page) ...@@ -82,4 +74,8 @@ static inline void arch_clear_hugepage_flags(struct page *page)
{ {
} }
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
#endif /* _ASM_SPARC64_HUGETLB_H */ #endif /* _ASM_SPARC64_HUGETLB_H */
...@@ -92,7 +92,8 @@ struct tsb_config { ...@@ -92,7 +92,8 @@ struct tsb_config {
typedef struct { typedef struct {
spinlock_t lock; spinlock_t lock;
unsigned long sparc64_ctx_val; unsigned long sparc64_ctx_val;
unsigned long huge_pte_count; unsigned long hugetlb_pte_count;
unsigned long thp_pte_count;
struct tsb_config tsb_block[MM_NUM_TSBS]; struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
} mm_context_t; } mm_context_t;
......
...@@ -395,7 +395,7 @@ static inline unsigned long __pte_huge_mask(void) ...@@ -395,7 +395,7 @@ static inline unsigned long __pte_huge_mask(void)
static inline pte_t pte_mkhuge(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte)
{ {
return __pte(pte_val(pte) | __pte_huge_mask()); return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask());
} }
static inline bool is_hugetlb_pte(pte_t pte) static inline bool is_hugetlb_pte(pte_t pte)
...@@ -403,6 +403,11 @@ static inline bool is_hugetlb_pte(pte_t pte) ...@@ -403,6 +403,11 @@ static inline bool is_hugetlb_pte(pte_t pte)
return !!(pte_val(pte) & __pte_huge_mask()); return !!(pte_val(pte) & __pte_huge_mask());
} }
static inline bool is_hugetlb_pmd(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_mkhuge(pmd_t pmd)
{ {
......
...@@ -203,7 +203,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -203,7 +203,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
* We have to propagate the 4MB bit of the virtual address * We have to propagate the 4MB bit of the virtual address
* because we are fabricating 8MB pages using 4MB hw pages. * because we are fabricating 8MB pages using 4MB hw pages.
*/ */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
brz,pn REG1, FAIL_LABEL; \ brz,pn REG1, FAIL_LABEL; \
sethi %uhi(_PAGE_PMD_HUGE), REG2; \ sethi %uhi(_PAGE_PMD_HUGE), REG2; \
......
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
/* PROT ** ICACHE line 2: More real fault processing */ /* PROT ** ICACHE line 2: More real fault processing */
ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
srlx %g5, PAGE_SHIFT, %g5
sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
nop nop
nop nop
nop
nop
/* PROT ** ICACHE line 3: Unused... */ /* PROT ** ICACHE line 3: Unused... */
nop nop
......
...@@ -165,7 +165,7 @@ void irq_link(unsigned int irq) ...@@ -165,7 +165,7 @@ void irq_link(unsigned int irq)
p = &irq_table[irq]; p = &irq_table[irq];
pil = p->pil; pil = p->pil;
BUG_ON(pil > SUN4D_MAX_IRQ); BUG_ON(pil >= SUN4D_MAX_IRQ);
p->next = irq_map[pil]; p->next = irq_map[pil];
irq_map[pil] = p; irq_map[pil] = p;
...@@ -182,7 +182,7 @@ void irq_unlink(unsigned int irq) ...@@ -182,7 +182,7 @@ void irq_unlink(unsigned int irq)
spin_lock_irqsave(&irq_map_lock, flags); spin_lock_irqsave(&irq_map_lock, flags);
p = &irq_table[irq]; p = &irq_table[irq];
BUG_ON(p->pil > SUN4D_MAX_IRQ); BUG_ON(p->pil >= SUN4D_MAX_IRQ);
pnext = &irq_map[p->pil]; pnext = &irq_map[p->pil];
while (*pnext != p) while (*pnext != p)
pnext = &(*pnext)->next; pnext = &(*pnext)->next;
......
...@@ -20,6 +20,10 @@ kvmap_itlb: ...@@ -20,6 +20,10 @@ kvmap_itlb:
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_IMMU, %g4 ldxa [%g4] ASI_IMMU, %g4
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g4 here.
*/
/* sun4v_itlb_miss branches here with the missing virtual /* sun4v_itlb_miss branches here with the missing virtual
* address already loaded into %g4 * address already loaded into %g4
*/ */
...@@ -128,6 +132,10 @@ kvmap_dtlb: ...@@ -128,6 +132,10 @@ kvmap_dtlb:
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g4 ldxa [%g4] ASI_DMMU, %g4
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g4 here.
*/
/* sun4v_dtlb_miss branches here with the missing virtual /* sun4v_dtlb_miss branches here with the missing virtual
* address already loaded into %g4 * address already loaded into %g4
*/ */
...@@ -251,6 +259,10 @@ kvmap_dtlb_longpath: ...@@ -251,6 +259,10 @@ kvmap_dtlb_longpath:
nop nop
.previous .previous
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g5 here.
*/
be,pt %xcc, sparc64_realfault_common be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4 mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline ba,pt %xcc, winfix_trampoline
......
...@@ -29,13 +29,17 @@ ...@@ -29,13 +29,17 @@
*/ */
tsb_miss_dtlb: tsb_miss_dtlb:
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
ba,pt %xcc, tsb_miss_page_table_walk
ldxa [%g4] ASI_DMMU, %g4 ldxa [%g4] ASI_DMMU, %g4
srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk
sllx %g4, PAGE_SHIFT, %g4
tsb_miss_itlb: tsb_miss_itlb:
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
ba,pt %xcc, tsb_miss_page_table_walk
ldxa [%g4] ASI_IMMU, %g4 ldxa [%g4] ASI_IMMU, %g4
srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk
sllx %g4, PAGE_SHIFT, %g4
/* At this point we have: /* At this point we have:
* %g1 -- PAGE_SIZE TSB entry address * %g1 -- PAGE_SIZE TSB entry address
...@@ -284,6 +288,10 @@ tsb_do_dtlb_fault: ...@@ -284,6 +288,10 @@ tsb_do_dtlb_fault:
nop nop
.previous .previous
/* Clear context ID bits. */
srlx %g5, PAGE_SHIFT, %g5
sllx %g5, PAGE_SHIFT, %g5
be,pt %xcc, sparc64_realfault_common be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4 mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline ba,pt %xcc, winfix_trampoline
......
...@@ -111,8 +111,8 @@ static unsigned int get_user_insn(unsigned long tpc) ...@@ -111,8 +111,8 @@ static unsigned int get_user_insn(unsigned long tpc)
if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
goto out_irq_enable; goto out_irq_enable;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (pmd_trans_huge(*pmdp)) { if (is_hugetlb_pmd(*pmdp)) {
pa = pmd_pfn(*pmdp) << PAGE_SHIFT; pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
pa += tpc & ~HPAGE_MASK; pa += tpc & ~HPAGE_MASK;
...@@ -476,14 +476,14 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -476,14 +476,14 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm); mm_rss = get_mm_rss(mm);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
#endif #endif
if (unlikely(mm_rss > if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_BASE, mm_rss); tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss = mm->context.huge_pte_count; mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
if (unlikely(mm_rss > if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/mman.h> #include <asm/mman.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -131,23 +132,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, ...@@ -131,23 +132,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
/* We must align the address, because our caller will run
* set_huge_pte_at() on whatever we return, which writes out
* all of the sub-ptes for the hugepage range. So we have
* to give it the first such sub-pte.
*/
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr); pud = pud_alloc(mm, pgd, addr);
if (pud) { if (pud)
pmd = pmd_alloc(mm, pud, addr); pte = (pte_t *)pmd_alloc(mm, pud, addr);
if (pmd)
pte = pte_alloc_map(mm, pmd, addr);
}
return pte; return pte;
} }
...@@ -155,19 +146,13 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -155,19 +146,13 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) { if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) { if (!pud_none(*pud))
pmd = pmd_offset(pud, addr); pte = (pte_t *)pmd_offset(pud, addr);
if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);
}
} }
return pte; return pte;
} }
...@@ -175,70 +160,143 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -175,70 +160,143 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
int i; pte_t orig;
pte_t orig[2];
unsigned long nptes;
if (!pte_present(*ptep) && pte_present(entry)) if (!pte_present(*ptep) && pte_present(entry))
mm->context.huge_pte_count++; mm->context.hugetlb_pte_count++;
addr &= HPAGE_MASK; addr &= HPAGE_MASK;
orig = *ptep;
nptes = 1 << HUGETLB_PAGE_ORDER;
orig[0] = *ptep;
orig[1] = *(ptep + nptes / 2);
for (i = 0; i < nptes; i++) {
*ptep = entry; *ptep = entry;
ptep++;
addr += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
}
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
addr -= REAL_HPAGE_SIZE; maybe_tlb_batch_add(mm, addr, ptep, orig, 0);
ptep -= nptes / 2; maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0);
maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
addr -= REAL_HPAGE_SIZE;
ptep -= nptes / 2;
maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
} }
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
pte_t entry; pte_t entry;
int i;
unsigned long nptes;
entry = *ptep; entry = *ptep;
if (pte_present(entry)) if (pte_present(entry))
mm->context.huge_pte_count--; mm->context.hugetlb_pte_count--;
addr &= HPAGE_MASK; addr &= HPAGE_MASK;
nptes = 1 << HUGETLB_PAGE_ORDER;
for (i = 0; i < nptes; i++) {
*ptep = __pte(0UL); *ptep = __pte(0UL);
addr += PAGE_SIZE;
ptep++;
}
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
addr -= REAL_HPAGE_SIZE;
ptep -= nptes / 2;
maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
addr -= REAL_HPAGE_SIZE;
ptep -= nptes / 2;
maybe_tlb_batch_add(mm, addr, ptep, entry, 0); maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0);
return entry; return entry;
} }
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return 0; return !pmd_none(pmd) &&
(pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
} }
int pud_huge(pud_t pud) int pud_huge(pud_t pud)
{ {
return 0; return 0;
} }
static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
atomic_long_dec(&tlb->mm->nr_ptes);
}
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pmd_t *pmd;
unsigned long next;
unsigned long start;
start = addr;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd))
continue;
if (is_hugetlb_pmd(*pmd))
pmd_clear(pmd);
else
hugetlb_free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
start &= PUD_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PUD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
mm_dec_nr_pmds(tlb->mm);
}
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pud_t *pud;
unsigned long next;
unsigned long start;
start = addr;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling);
} while (pud++, addr = next, addr != end);
start &= PGDIR_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PGDIR_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
}
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
unsigned long next;
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
}
...@@ -346,10 +346,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * ...@@ -346,10 +346,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
is_hugetlb_pte(pte)) {
/* We are fabricating 8MB pages using 4MB real hw pages. */
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
address, pte_val(pte)); address, pte_val(pte));
else } else
#endif #endif
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
address, pte_val(pte)); address, pte_val(pte));
......
...@@ -175,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -175,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
if (pmd_val(pmd) & _PAGE_PMD_HUGE) if (pmd_val(pmd) & _PAGE_PMD_HUGE)
mm->context.huge_pte_count++; mm->context.thp_pte_count++;
else else
mm->context.huge_pte_count--; mm->context.thp_pte_count--;
/* Do not try to allocate the TSB hash table if we /* Do not try to allocate the TSB hash table if we
* don't have one already. We have various locks held * don't have one already. We have various locks held
......
...@@ -470,7 +470,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) ...@@ -470,7 +470,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
unsigned long huge_pte_count; unsigned long total_huge_pte_count;
#endif #endif
unsigned int i; unsigned int i;
...@@ -479,12 +479,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -479,12 +479,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm->context.sparc64_ctx_val = 0UL; mm->context.sparc64_ctx_val = 0UL;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
/* We reset it to zero because the fork() page copying /* We reset them to zero because the fork() page copying
* will re-increment the counters as the parent PTEs are * will re-increment the counters as the parent PTEs are
* copied into the child address space. * copied into the child address space.
*/ */
huge_pte_count = mm->context.huge_pte_count; total_huge_pte_count = mm->context.hugetlb_pte_count +
mm->context.huge_pte_count = 0; mm->context.thp_pte_count;
mm->context.hugetlb_pte_count = 0;
mm->context.thp_pte_count = 0;
#endif #endif
/* copy_mm() copies over the parent's mm_struct before calling /* copy_mm() copies over the parent's mm_struct before calling
...@@ -500,8 +502,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -500,8 +502,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (unlikely(huge_pte_count)) if (unlikely(total_huge_pte_count))
tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
#endif #endif
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
......
...@@ -490,12 +490,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig ...@@ -490,12 +490,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
locked = spin_trylock_irqsave(&port->lock, flags); locked = spin_trylock_irqsave(&port->lock, flags);
else else
spin_lock_irqsave(&port->lock, flags); spin_lock_irqsave(&port->lock, flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&port->lock);
} else
spin_lock(&port->lock);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (*s == '\n') if (*s == '\n')
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment