Commit 8b65bb57 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc fixes from David Miller:
 "Several fixes here, mostly having to due with either build errors or
  memory corruptions depending upon whether you have THP enabled or not"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc: remove unused wp_works_ok macro
  sparc32: Export vac_cache_size to fix build error
  sparc64: Fix memory corruption when THP is enabled
  sparc64: Fix kernel panic due to erroneous #ifdef surrounding pmd_write()
  arch/sparc: Avoid DCTI Couples
  sparc64: kern_addr_valid regression
  sparc64: Add support for 2G hugepages
  sparc64: Fix size check in huge_pte_alloc
parents 542380a2 86e1066f
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define HPAGE_SHIFT 23 #define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22 #define REAL_HPAGE_SHIFT 22
#define HPAGE_2GB_SHIFT 31
#define HPAGE_256MB_SHIFT 28 #define HPAGE_256MB_SHIFT 28
#define HPAGE_64K_SHIFT 16 #define HPAGE_64K_SHIFT 16
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
...@@ -27,7 +28,7 @@ ...@@ -27,7 +28,7 @@
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#define HUGE_MAX_HSTATE 3 #define HUGE_MAX_HSTATE 4
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd) ...@@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return pte_pfn(pte); return pte_pfn(pte);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMD_WRITE
static inline unsigned long pmd_dirty(pmd_t pmd) static inline unsigned long pmd_write(pmd_t pmd)
{ {
pte_t pte = __pte(pmd_val(pmd)); pte_t pte = __pte(pmd_val(pmd));
return pte_dirty(pte); return pte_write(pte);
} }
static inline unsigned long pmd_young(pmd_t pmd) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long pmd_dirty(pmd_t pmd)
{ {
pte_t pte = __pte(pmd_val(pmd)); pte_t pte = __pte(pmd_val(pmd));
return pte_young(pte); return pte_dirty(pte);
} }
static inline unsigned long pmd_write(pmd_t pmd) static inline unsigned long pmd_young(pmd_t pmd)
{ {
pte_t pte = __pte(pmd_val(pmd)); pte_t pte = __pte(pmd_val(pmd));
return pte_write(pte); return pte_young(pte);
} }
static inline unsigned long pmd_trans_huge(pmd_t pmd) static inline unsigned long pmd_trans_huge(pmd_t pmd)
......
...@@ -18,12 +18,6 @@ ...@@ -18,12 +18,6 @@
#include <asm/signal.h> #include <asm/signal.h>
#include <asm/page.h> #include <asm/page.h>
/*
* The sparc has no problems with write protection
*/
#define wp_works_ok 1
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... /* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
* That one page is used to protect kernel from intruders, so that * That one page is used to protect kernel from intruders, so that
* we can make our access_ok test faster * we can make our access_ok test faster
......
...@@ -18,10 +18,6 @@ ...@@ -18,10 +18,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/page.h> #include <asm/page.h>
/* The sparc has no problems with write protection */
#define wp_works_ok 1
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
/* /*
* User lives in his very own context, and cannot reference us. Note * User lives in his very own context, and cannot reference us. Note
* that TASK_SIZE is a misnomer, it really gives maximum user virtual * that TASK_SIZE is a misnomer, it really gives maximum user virtual
......
...@@ -96,6 +96,7 @@ sparc64_boot: ...@@ -96,6 +96,7 @@ sparc64_boot:
andn %g1, PSTATE_AM, %g1 andn %g1, PSTATE_AM, %g1
wrpr %g1, 0x0, %pstate wrpr %g1, 0x0, %pstate
ba,a,pt %xcc, 1f ba,a,pt %xcc, 1f
nop
.globl prom_finddev_name, prom_chosen_path, prom_root_node .globl prom_finddev_name, prom_chosen_path, prom_root_node
.globl prom_getprop_name, prom_mmu_name, prom_peer_name .globl prom_getprop_name, prom_mmu_name, prom_peer_name
...@@ -613,6 +614,7 @@ niagara_tlb_fixup: ...@@ -613,6 +614,7 @@ niagara_tlb_fixup:
nop nop
ba,a,pt %xcc, 80f ba,a,pt %xcc, 80f
nop
niagara4_patch: niagara4_patch:
call niagara4_patch_copyops call niagara4_patch_copyops
nop nop
...@@ -622,6 +624,7 @@ niagara4_patch: ...@@ -622,6 +624,7 @@ niagara4_patch:
nop nop
ba,a,pt %xcc, 80f ba,a,pt %xcc, 80f
nop
niagara2_patch: niagara2_patch:
call niagara2_patch_copyops call niagara2_patch_copyops
...@@ -632,6 +635,7 @@ niagara2_patch: ...@@ -632,6 +635,7 @@ niagara2_patch:
nop nop
ba,a,pt %xcc, 80f ba,a,pt %xcc, 80f
nop
niagara_patch: niagara_patch:
call niagara_patch_copyops call niagara_patch_copyops
......
...@@ -82,6 +82,7 @@ do_stdfmna: ...@@ -82,6 +82,7 @@ do_stdfmna:
call handle_stdfmna call handle_stdfmna
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap ba,a,pt %xcc, rtrap
nop
.size do_stdfmna,.-do_stdfmna .size do_stdfmna,.-do_stdfmna
.type breakpoint_trap,#function .type breakpoint_trap,#function
......
...@@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 ...@@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
bne,pt %xcc, user_rtt_fill_32bit bne,pt %xcc, user_rtt_fill_32bit
wrpr %g1, %cwp wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit ba,a,pt %xcc, user_rtt_fill_64bit
nop
user_rtt_fill_fixup_dax: user_rtt_fill_fixup_dax:
ba,pt %xcc, user_rtt_fill_fixup_common ba,pt %xcc, user_rtt_fill_fixup_common
......
...@@ -86,6 +86,7 @@ __spitfire_cee_trap_continue: ...@@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
rd %pc, %g7 rd %pc, %g7
ba,a,pt %xcc, 2f ba,a,pt %xcc, 2f
nop
1: ba,pt %xcc, etrap_irq 1: ba,pt %xcc, etrap_irq
rd %pc, %g7 rd %pc, %g7
......
...@@ -352,6 +352,7 @@ sun4v_mna: ...@@ -352,6 +352,7 @@ sun4v_mna:
call sun4v_do_mna call sun4v_do_mna
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap ba,a,pt %xcc, rtrap
nop
/* Privileged Action. */ /* Privileged Action. */
sun4v_privact: sun4v_privact:
......
...@@ -92,6 +92,7 @@ user_rtt_fill_fixup_common: ...@@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
call sun4v_data_access_exception call sun4v_data_access_exception
nop nop
ba,a,pt %xcc, rtrap ba,a,pt %xcc, rtrap
nop
1: call spitfire_data_access_exception 1: call spitfire_data_access_exception
nop nop
......
...@@ -152,6 +152,8 @@ fill_fixup_dax: ...@@ -152,6 +152,8 @@ fill_fixup_dax:
call sun4v_data_access_exception call sun4v_data_access_exception
nop nop
ba,a,pt %xcc, rtrap ba,a,pt %xcc, rtrap
nop
1: call spitfire_data_access_exception 1: call spitfire_data_access_exception
nop nop
ba,a,pt %xcc, rtrap ba,a,pt %xcc, rtrap
nop
...@@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
blu 170f blu 170f
nop nop
ba,a,pt %xcc, 180f ba,a,pt %xcc, 180f
nop
4: /* 32 <= low bits < 48 */ 4: /* 32 <= low bits < 48 */
blu 150f blu 150f
nop nop
ba,a,pt %xcc, 160f ba,a,pt %xcc, 160f
nop
5: /* 0 < low bits < 32 */ 5: /* 0 < low bits < 32 */
blu,a 6f blu,a 6f
cmp %g2, 8 cmp %g2, 8
...@@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
blu 130f blu 130f
nop nop
ba,a,pt %xcc, 140f ba,a,pt %xcc, 140f
nop
6: /* 0 < low bits < 16 */ 6: /* 0 < low bits < 16 */
bgeu 120f bgeu 120f
nop nop
...@@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %o2, 85f brz,pt %o2, 85f
sub %o0, %o1, GLOBAL_SPARE sub %o0, %o1, GLOBAL_SPARE
ba,a,pt %XCC, 90f ba,a,pt %XCC, 90f
nop
.align 64 .align 64
75: /* 16 < len <= 64 */ 75: /* 16 < len <= 64 */
......
...@@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
bne,pt %icc, 1b bne,pt %icc, 1b
EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
ba,a,pt %icc, .Lexit ba,a,pt %icc, .Lexit
nop
.size FUNC_NAME, .-FUNC_NAME .size FUNC_NAME, .-FUNC_NAME
...@@ -102,4 +102,5 @@ NG4bzero: ...@@ -102,4 +102,5 @@ NG4bzero:
bne,pt %icc, 1b bne,pt %icc, 1b
add %o0, 0x30, %o0 add %o0, 0x30, %o0
ba,a,pt %icc, .Lpostloop ba,a,pt %icc, .Lpostloop
nop
.size NG4bzero,.-NG4bzero .size NG4bzero,.-NG4bzero
...@@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ ...@@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
brz,pt %i2, 85f brz,pt %i2, 85f
sub %o0, %i1, %i3 sub %o0, %i1, %i3
ba,a,pt %XCC, 90f ba,a,pt %XCC, 90f
nop
.align 64 .align 64
70: /* 16 < len <= 64 */ 70: /* 16 < len <= 64 */
......
...@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) ...@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
switch (shift) { switch (shift) {
case HPAGE_2GB_SHIFT:
hugepage_size = _PAGE_SZ2GB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE;
break;
case HPAGE_256MB_SHIFT: case HPAGE_256MB_SHIFT:
hugepage_size = _PAGE_SZ256MB_4V; hugepage_size = _PAGE_SZ256MB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE; pte_val(entry) |= _PAGE_PMD_HUGE;
...@@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) ...@@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
unsigned int shift; unsigned int shift;
switch (tte_szbits) { switch (tte_szbits) {
case _PAGE_SZ2GB_4V:
shift = HPAGE_2GB_SHIFT;
break;
case _PAGE_SZ256MB_4V: case _PAGE_SZ256MB_4V:
shift = HPAGE_256MB_SHIFT; shift = HPAGE_256MB_SHIFT;
break; break;
...@@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, ...@@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
if (!pmd) if (!pmd)
return NULL; return NULL;
if (sz == PMD_SHIFT) if (sz >= PMD_SIZE)
pte = (pte_t *)pmd; pte = (pte_t *)pmd;
else else
pte = pte_alloc_map(mm, pmd, addr); pte = pte_alloc_map(mm, pmd, addr);
......
...@@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string) ...@@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
hugepage_shift = ilog2(hugepage_size); hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) { switch (hugepage_shift) {
case HPAGE_2GB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_2GB;
hv_pgsz_idx = HV_PGSZ_IDX_2GB;
break;
case HPAGE_256MB_SHIFT: case HPAGE_256MB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_256MB; hv_pgsz_mask = HV_PGSZ_MASK_256MB;
hv_pgsz_idx = HV_PGSZ_IDX_256MB; hv_pgsz_idx = HV_PGSZ_IDX_256MB;
...@@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr) ...@@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
if ((long)addr < 0L) { if ((long)addr < 0L) {
unsigned long pa = __pa(addr); unsigned long pa = __pa(addr);
if ((addr >> max_phys_bits) != 0UL) if ((pa >> max_phys_bits) != 0UL)
return false; return false;
return pfn_valid(pa >> PAGE_SHIFT); return pfn_valid(pa >> PAGE_SHIFT);
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
enum mbus_module srmmu_modtype; enum mbus_module srmmu_modtype;
static unsigned int hwbug_bitmask; static unsigned int hwbug_bitmask;
int vac_cache_size; int vac_cache_size;
EXPORT_SYMBOL(vac_cache_size);
int vac_line_size; int vac_line_size;
extern struct resource sparc_iomap; extern struct resource sparc_iomap;
......
...@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, ...@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
if (pte_val(*pte) & _PAGE_VALID) { if (pte_val(*pte) & _PAGE_VALID) {
bool exec = pte_exec(*pte); bool exec = pte_exec(*pte);
tlb_batch_add_one(mm, vaddr, exec, false); tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
} }
pte++; pte++;
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
...@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pte_t orig_pte = __pte(pmd_val(orig)); pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte); bool exec = pte_exec(orig_pte);
tlb_batch_add_one(mm, addr, exec, true); tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
true); REAL_HPAGE_SHIFT);
} else { } else {
tlb_batch_pmd_scan(mm, addr, orig); tlb_batch_pmd_scan(mm, addr, orig);
} }
......
...@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb) ...@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
if (tb->hugepage_shift < HPAGE_SHIFT) { if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
...@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, ...@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
if (hugepage_shift < HPAGE_SHIFT) { if (hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment