Commit 21f9debf authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:
 "Some 32-bit kgdb cleanups from Sam Ravnborg, and a hugepage TLB flush
  overhead fix on 64-bit from Nitin Gupta"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Reduce TLB flushes during hugepte changes
  aeroflex/greth: fix warning about unused variable
  openprom: fix warning
  sparc32: drop superfluous cast in calls to __nocache_pa()
  sparc32: fix build with STRICT_MM_TYPECHECKS
  sparc32: use proper prototype for trapbase
  sparc32: drop local prototype in kgdb_32
  sparc32: drop hardcoding trap_level in kgdb_trap
parents bd28b145 24e49ee3
...@@ -43,10 +43,10 @@ ...@@ -43,10 +43,10 @@
nop; nop;
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB
#define KGDB_TRAP(num) \ #define KGDB_TRAP(num) \
b kgdb_trap_low; \ mov num, %l7; \
rd %psr,%l0; \ b kgdb_trap_low; \
nop; \ rd %psr,%l0; \
nop; nop;
#else #else
#define KGDB_TRAP(num) \ #define KGDB_TRAP(num) \
......
...@@ -28,10 +28,10 @@ enum regnames { ...@@ -28,10 +28,10 @@ enum regnames {
#define NUMREGBYTES ((GDB_CSR + 1) * 4) #define NUMREGBYTES ((GDB_CSR + 1) * 4)
#else #else
#define NUMREGBYTES ((GDB_Y + 1) * 8) #define NUMREGBYTES ((GDB_Y + 1) * 8)
#endif
struct pt_regs; struct pt_regs;
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs); asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs);
#endif
void arch_kgdb_breakpoint(void); void arch_kgdb_breakpoint(void);
......
...@@ -69,7 +69,6 @@ typedef struct { unsigned long iopgprot; } iopgprot_t; ...@@ -69,7 +69,6 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define __pte(x) ((pte_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } ) #define __iopte(x) ((iopte_t) { (x) } )
/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __ctxd(x) ((ctxd_t) { (x) } ) #define __ctxd(x) ((ctxd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
...@@ -97,7 +96,6 @@ typedef unsigned long iopgprot_t; ...@@ -97,7 +96,6 @@ typedef unsigned long iopgprot_t;
#define __pte(x) (x) #define __pte(x) (x)
#define __iopte(x) (x) #define __iopte(x) (x)
/* #define __pmd(x) (x) */ /* XXX later */
#define __pgd(x) (x) #define __pgd(x) (x)
#define __ctxd(x) (x) #define __ctxd(x) (x)
#define __pgprot(x) (x) #define __pgprot(x) (x)
......
...@@ -29,9 +29,9 @@ static inline void free_pgd_fast(pgd_t *pgd) ...@@ -29,9 +29,9 @@ static inline void free_pgd_fast(pgd_t *pgd)
static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ {
unsigned long pa = __nocache_pa((unsigned long)pmdp); unsigned long pa = __nocache_pa(pmdp);
set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (pa >> 4))); set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4))));
} }
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
......
...@@ -298,7 +298,7 @@ static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space) ...@@ -298,7 +298,7 @@ static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
#define pgprot_noncached pgprot_noncached #define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot) static inline pgprot_t pgprot_noncached(pgprot_t prot)
{ {
prot &= ~__pgprot(SRMMU_CACHE); pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
return prot; return prot;
} }
......
...@@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) ...@@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
#define pgprot_noncached pgprot_noncached #define pgprot_noncached pgprot_noncached
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline pte_t pte_mkhuge(pte_t pte) static inline unsigned long __pte_huge_mask(void)
{ {
unsigned long mask; unsigned long mask;
...@@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte) ...@@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
: "=r" (mask) : "=r" (mask)
: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
return __pte(pte_val(pte) | mask); return mask;
}
static inline pte_t pte_mkhuge(pte_t pte)
{
return __pte(pte_val(pte) | __pte_huge_mask());
}
static inline bool is_hugetlb_pte(pte_t pte)
{
return !!(pte_val(pte) & __pte_huge_mask());
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_mkhuge(pmd_t pmd)
{ {
...@@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) ...@@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
return __pmd(pte_val(pte)); return __pmd(pte_val(pte));
} }
#endif #endif
#else
static inline bool is_hugetlb_pte(pte_t pte)
{
return false;
}
#endif #endif
static inline pte_t pte_mkdirty(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte)
...@@ -856,6 +872,19 @@ static inline unsigned long pud_pfn(pud_t pud) ...@@ -856,6 +872,19 @@ static inline unsigned long pud_pfn(pud_t pud)
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm); pte_t *ptep, pte_t orig, int fullmm);
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm)
{
/* It is more efficient to let flush_tlb_kernel_range()
* handle init_mm tlb flushes.
*
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, unsigned long addr,
...@@ -872,15 +901,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -872,15 +901,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t orig = *ptep; pte_t orig = *ptep;
*ptep = pte; *ptep = pte;
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
/* It is more efficient to let flush_tlb_kernel_range()
* handle init_mm tlb flushes.
*
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm);
} }
#define set_pte_at(mm,addr,ptep,pte) \ #define set_pte_at(mm,addr,ptep,pte) \
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define TLB_BATCH_NR 192 #define TLB_BATCH_NR 192
struct tlb_batch { struct tlb_batch {
bool huge;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long tlb_nr; unsigned long tlb_nr;
unsigned long active; unsigned long active;
...@@ -16,7 +17,7 @@ struct tlb_batch { ...@@ -16,7 +17,7 @@ struct tlb_batch {
void flush_tsb_kernel_range(unsigned long start, unsigned long end); void flush_tsb_kernel_range(unsigned long start, unsigned long end);
void flush_tsb_user(struct tlb_batch *tb); void flush_tsb_user(struct tlb_batch *tb);
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
/* TLB flush operations. */ /* TLB flush operations. */
......
...@@ -1225,20 +1225,18 @@ breakpoint_trap: ...@@ -1225,20 +1225,18 @@ breakpoint_trap:
RESTORE_ALL RESTORE_ALL
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB
.align 4 ENTRY(kgdb_trap_low)
.globl kgdb_trap_low
.type kgdb_trap_low,#function
kgdb_trap_low:
rd %wim,%l3 rd %wim,%l3
SAVE_ALL SAVE_ALL
wr %l0, PSR_ET, %psr wr %l0, PSR_ET, %psr
WRITE_PAUSE WRITE_PAUSE
mov %l7, %o0 ! trap_level
call kgdb_trap call kgdb_trap
add %sp, STACKFRAME_SZ, %o0 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
RESTORE_ALL RESTORE_ALL
.size kgdb_trap_low,.-kgdb_trap_low ENDPROC(kgdb_trap_low)
#endif #endif
.align 4 .align 4
......
...@@ -127,6 +127,7 @@ extern unsigned int t_nmi[]; ...@@ -127,6 +127,7 @@ extern unsigned int t_nmi[];
extern unsigned int linux_trap_ipi15_sun4d[]; extern unsigned int linux_trap_ipi15_sun4d[];
extern unsigned int linux_trap_ipi15_sun4m[]; extern unsigned int linux_trap_ipi15_sun4m[];
extern struct tt_entry trapbase;
extern struct tt_entry trapbase_cpu1; extern struct tt_entry trapbase_cpu1;
extern struct tt_entry trapbase_cpu2; extern struct tt_entry trapbase_cpu2;
extern struct tt_entry trapbase_cpu3; extern struct tt_entry trapbase_cpu3;
......
...@@ -12,7 +12,8 @@ ...@@ -12,7 +12,8 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
extern unsigned long trapbase; #include "kernel.h"
#include "entry.h"
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{ {
...@@ -133,21 +134,19 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ...@@ -133,21 +134,19 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
return -1; return -1;
} }
extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type); asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
asmlinkage void kgdb_trap(struct pt_regs *regs)
{ {
unsigned long flags; unsigned long flags;
if (user_mode(regs)) { if (user_mode(regs)) {
do_hw_interrupt(regs, 0xfd); do_hw_interrupt(regs, trap_level);
return; return;
} }
flushw_all(); flushw_all();
local_irq_save(flags); local_irq_save(flags);
kgdb_handle_exception(0x172, SIGTRAP, 0, regs); kgdb_handle_exception(trap_level, SIGTRAP, 0, regs);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -68,8 +68,6 @@ struct screen_info screen_info = { ...@@ -68,8 +68,6 @@ struct screen_info screen_info = {
* prints out pretty messages and returns. * prints out pretty messages and returns.
*/ */
extern unsigned long trapbase;
/* Pretty sick eh? */ /* Pretty sick eh? */
static void prom_sync_me(void) static void prom_sync_me(void)
{ {
...@@ -300,7 +298,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -300,7 +298,7 @@ void __init setup_arch(char **cmdline_p)
int i; int i;
unsigned long highest_paddr; unsigned long highest_paddr;
sparc_ttable = (struct tt_entry *) &trapbase; sparc_ttable = &trapbase;
/* Initialize PROM console and command line. */ /* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs(); *cmdline_p = prom_getbootargs();
......
...@@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
int i; int i;
pte_t orig[2];
unsigned long nptes;
if (!pte_present(*ptep) && pte_present(entry)) if (!pte_present(*ptep) && pte_present(entry))
mm->context.huge_pte_count++; mm->context.huge_pte_count++;
addr &= HPAGE_MASK; addr &= HPAGE_MASK;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
set_pte_at(mm, addr, ptep, entry); nptes = 1 << HUGETLB_PAGE_ORDER;
orig[0] = *ptep;
orig[1] = *(ptep + nptes / 2);
for (i = 0; i < nptes; i++) {
*ptep = entry;
ptep++; ptep++;
addr += PAGE_SIZE; addr += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE; pte_val(entry) += PAGE_SIZE;
} }
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
addr -= REAL_HPAGE_SIZE;
ptep -= nptes / 2;
maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
addr -= REAL_HPAGE_SIZE;
ptep -= nptes / 2;
maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
} }
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
...@@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
{ {
pte_t entry; pte_t entry;
int i; int i;
unsigned long nptes;
entry = *ptep; entry = *ptep;
if (pte_present(entry)) if (pte_present(entry))
mm->context.huge_pte_count--; mm->context.huge_pte_count--;
addr &= HPAGE_MASK; addr &= HPAGE_MASK;
nptes = 1 << HUGETLB_PAGE_ORDER;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { for (i = 0; i < nptes; i++) {
pte_clear(mm, addr, ptep); *ptep = __pte(0UL);
addr += PAGE_SIZE; addr += PAGE_SIZE;
ptep++; ptep++;
} }
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
addr -= REAL_HPAGE_SIZE;
ptep -= nptes / 2;
maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
addr -= REAL_HPAGE_SIZE;
ptep -= nptes / 2;
maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
return entry; return entry;
} }
......
...@@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde ...@@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
tsb_insert(tsb, tag, tte); tsb_insert(tsb, tag, tte);
} }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline bool is_hugetlb_pte(pte_t pte)
{
if ((tlb_type == hypervisor &&
(pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
(tlb_type != hypervisor &&
(pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
return true;
return false;
}
#endif
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{ {
struct mm_struct *mm; struct mm_struct *mm;
......
...@@ -133,7 +133,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); ...@@ -133,7 +133,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
set_bit(scan, iounit->bmap); set_bit(scan, iounit->bmap);
sbus_writel(iopte, &iounit->page_table[scan]); sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
} }
IOD(("%08lx\n", vaddr)); IOD(("%08lx\n", vaddr));
return vaddr; return vaddr;
...@@ -228,7 +228,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon ...@@ -228,7 +228,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
iopte = iounit->page_table + i; iopte = iounit->page_table + i;
sbus_writel(MKIOPTE(__pa(page)), iopte); sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
} }
addr += PAGE_SIZE; addr += PAGE_SIZE;
va += PAGE_SIZE; va += PAGE_SIZE;
......
...@@ -107,17 +107,22 @@ static inline int srmmu_pmd_none(pmd_t pmd) ...@@ -107,17 +107,22 @@ static inline int srmmu_pmd_none(pmd_t pmd)
/* XXX should we hyper_flush_whole_icache here - Anton */ /* XXX should we hyper_flush_whole_icache here - Anton */
static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } {
pte_t pte;
pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
set_pte((pte_t *)ctxp, pte);
}
void pmd_set(pmd_t *pmdp, pte_t *ptep) void pmd_set(pmd_t *pmdp, pte_t *ptep)
{ {
unsigned long ptp; /* Physical address, shifted right by 4 */ unsigned long ptp; /* Physical address, shifted right by 4 */
int i; int i;
ptp = __nocache_pa((unsigned long) ptep) >> 4; ptp = __nocache_pa(ptep) >> 4;
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
} }
} }
...@@ -128,8 +133,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) ...@@ -128,8 +133,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
} }
} }
...@@ -911,7 +916,7 @@ void __init srmmu_paging_init(void) ...@@ -911,7 +916,7 @@ void __init srmmu_paging_init(void)
/* ctx table has to be physically aligned to its size */ /* ctx table has to be physically aligned to its size */
srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t)); srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
for (i = 0; i < num_contexts; i++) for (i = 0; i < num_contexts; i++)
srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
......
...@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void) ...@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
} }
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec) bool exec, bool huge)
{ {
struct tlb_batch *tb = &get_cpu_var(tlb_batch); struct tlb_batch *tb = &get_cpu_var(tlb_batch);
unsigned long nr; unsigned long nr;
...@@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, ...@@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
} }
if (!tb->active) { if (!tb->active) {
flush_tsb_user_page(mm, vaddr); flush_tsb_user_page(mm, vaddr, huge);
global_flush_tlb_page(mm, vaddr); global_flush_tlb_page(mm, vaddr);
goto out; goto out;
} }
if (nr == 0) if (nr == 0) {
tb->mm = mm; tb->mm = mm;
tb->huge = huge;
}
if (tb->huge != huge) {
flush_tlb_pending();
tb->huge = huge;
nr = 0;
}
tb->vaddrs[nr] = vaddr; tb->vaddrs[nr] = vaddr;
tb->tlb_nr = ++nr; tb->tlb_nr = ++nr;
...@@ -104,6 +112,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, ...@@ -104,6 +112,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm) pte_t *ptep, pte_t orig, int fullmm)
{ {
bool huge = is_hugetlb_pte(orig);
if (tlb_type != hypervisor && if (tlb_type != hypervisor &&
pte_dirty(orig)) { pte_dirty(orig)) {
unsigned long paddr, pfn = pte_pfn(orig); unsigned long paddr, pfn = pte_pfn(orig);
...@@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, ...@@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
no_cache_flush: no_cache_flush:
if (!fullmm) if (!fullmm)
tlb_batch_add_one(mm, vaddr, pte_exec(orig)); tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
...@@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, ...@@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
if (pte_val(*pte) & _PAGE_VALID) { if (pte_val(*pte) & _PAGE_VALID) {
bool exec = pte_exec(*pte); bool exec = pte_exec(*pte);
tlb_batch_add_one(mm, vaddr, exec); tlb_batch_add_one(mm, vaddr, exec, false);
} }
pte++; pte++;
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
...@@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pte_t orig_pte = __pte(pmd_val(orig)); pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte); bool exec = pte_exec(orig_pte);
tlb_batch_add_one(mm, addr, exec); tlb_batch_add_one(mm, addr, exec, true);
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
true);
} else { } else {
tlb_batch_pmd_scan(mm, addr, orig); tlb_batch_pmd_scan(mm, addr, orig);
} }
......
...@@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb) ...@@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; if (!tb->huge) {
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
base = __pa(base); if (tlb_type == cheetah_plus || tlb_type == hypervisor)
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries); base = __pa(base);
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
}
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
...@@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb) ...@@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
} }
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge)
{ {
unsigned long nentries, base, flags; unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; if (!huge) {
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
base = __pa(base); if (tlb_type == cheetah_plus || tlb_type == hypervisor)
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); base = __pa(base);
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
}
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
......
...@@ -1323,7 +1323,7 @@ static inline int phy_aneg_done(struct phy_device *phydev) ...@@ -1323,7 +1323,7 @@ static inline int phy_aneg_done(struct phy_device *phydev)
static int greth_mdio_init(struct greth_private *greth) static int greth_mdio_init(struct greth_private *greth)
{ {
int ret, phy; int ret;
unsigned long timeout; unsigned long timeout;
greth->mdio = mdiobus_alloc(); greth->mdio = mdiobus_alloc();
......
...@@ -383,20 +383,12 @@ static struct device_node *get_node(phandle n, DATA *data) ...@@ -383,20 +383,12 @@ static struct device_node *get_node(phandle n, DATA *data)
} }
/* Copy in a whole string from userspace into kernelspace. */ /* Copy in a whole string from userspace into kernelspace. */
static int copyin_string(char __user *user, size_t len, char **ptr) static char * copyin_string(char __user *user, size_t len)
{ {
char *tmp;
if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0) if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0)
return -EINVAL; return ERR_PTR(-EINVAL);
tmp = memdup_user_nul(user, len);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
*ptr = tmp; return memdup_user_nul(user, len);
return 0;
} }
/* /*
...@@ -415,9 +407,9 @@ static int opiocget(void __user *argp, DATA *data) ...@@ -415,9 +407,9 @@ static int opiocget(void __user *argp, DATA *data)
dp = get_node(op.op_nodeid, data); dp = get_node(op.op_nodeid, data);
err = copyin_string(op.op_name, op.op_namelen, &str); str = copyin_string(op.op_name, op.op_namelen);
if (err) if (IS_ERR(str))
return err; return PTR_ERR(str);
pval = of_get_property(dp, str, &len); pval = of_get_property(dp, str, &len);
err = 0; err = 0;
...@@ -440,7 +432,7 @@ static int opiocnextprop(void __user *argp, DATA *data) ...@@ -440,7 +432,7 @@ static int opiocnextprop(void __user *argp, DATA *data)
struct device_node *dp; struct device_node *dp;
struct property *prop; struct property *prop;
char *str; char *str;
int err, len; int len;
if (copy_from_user(&op, argp, sizeof(op))) if (copy_from_user(&op, argp, sizeof(op)))
return -EFAULT; return -EFAULT;
...@@ -449,9 +441,9 @@ static int opiocnextprop(void __user *argp, DATA *data) ...@@ -449,9 +441,9 @@ static int opiocnextprop(void __user *argp, DATA *data)
if (!dp) if (!dp)
return -EINVAL; return -EINVAL;
err = copyin_string(op.op_name, op.op_namelen, &str); str = copyin_string(op.op_name, op.op_namelen);
if (err) if (IS_ERR(str))
return err; return PTR_ERR(str);
if (str[0] == '\0') { if (str[0] == '\0') {
prop = dp->properties; prop = dp->properties;
...@@ -494,14 +486,14 @@ static int opiocset(void __user *argp, DATA *data) ...@@ -494,14 +486,14 @@ static int opiocset(void __user *argp, DATA *data)
if (!dp) if (!dp)
return -EINVAL; return -EINVAL;
err = copyin_string(op.op_name, op.op_namelen, &str); str = copyin_string(op.op_name, op.op_namelen);
if (err) if (IS_ERR(str))
return err; return PTR_ERR(str);
err = copyin_string(op.op_buf, op.op_buflen, &tmp); tmp = copyin_string(op.op_buf, op.op_buflen);
if (err) { if (IS_ERR(tmp)) {
kfree(str); kfree(str);
return err; return PTR_ERR(tmp);
} }
err = of_set_property(dp, str, tmp, op.op_buflen); err = of_set_property(dp, str, tmp, op.op_buflen);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment