Commit 76811263 authored by Nitin Gupta's avatar Nitin Gupta Committed by David S. Miller

sparc64: Fix memory corruption when THP is enabled

The memory corruption was happening due to incorrect
TLB/TSB flushing of hugepages.
Reported-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarNitin Gupta <nitin.m.gupta@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9ae34dbd
...@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, ...@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
if (pte_val(*pte) & _PAGE_VALID) { if (pte_val(*pte) & _PAGE_VALID) {
bool exec = pte_exec(*pte); bool exec = pte_exec(*pte);
tlb_batch_add_one(mm, vaddr, exec, false); tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
} }
pte++; pte++;
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
...@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pte_t orig_pte = __pte(pmd_val(orig)); pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte); bool exec = pte_exec(orig_pte);
tlb_batch_add_one(mm, addr, exec, true); tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
true); REAL_HPAGE_SHIFT);
} else { } else {
tlb_batch_pmd_scan(mm, addr, orig); tlb_batch_pmd_scan(mm, addr, orig);
} }
......
...@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb) ...@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
if (tb->hugepage_shift < HPAGE_SHIFT) { if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
...@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, ...@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
if (hugepage_shift < HPAGE_SHIFT) { if (hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment