Commit 00acc832 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/sparc-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 282388bc 86ca2ffe
...@@ -434,14 +434,13 @@ void flush_thread(void) ...@@ -434,14 +434,13 @@ void flush_thread(void)
if (test_thread_flag(TIF_32BIT)) { if (test_thread_flag(TIF_32BIT)) {
struct mm_struct *mm = t->task->mm; struct mm_struct *mm = t->task->mm;
pgd_t *pgd0 = &mm->pgd[0]; pgd_t *pgd0 = &mm->pgd[0];
pud_t *pud0 = pud_offset(pgd0, 0);
if (pgd_none(*pgd0)) { if (pud_none(*pud0)) {
pmd_t *page = pmd_alloc_one_fast(NULL, 0); pmd_t *page = pmd_alloc_one(mm, 0);
if (!page) pud_set(pud0, page);
page = pmd_alloc_one(NULL, 0);
pgd_set(pgd0, page);
} }
pgd_cache = ((unsigned long) pgd_val(*pgd0)) << 11UL; pgd_cache = ((unsigned long) pud_val(*pud0)) << 11UL;
} }
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
......
...@@ -151,6 +151,7 @@ int prom_callback(long *args) ...@@ -151,6 +151,7 @@ int prom_callback(long *args)
struct task_struct *p; struct task_struct *p;
struct mm_struct *mm = NULL; struct mm_struct *mm = NULL;
pgd_t *pgdp; pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
...@@ -166,7 +167,10 @@ int prom_callback(long *args) ...@@ -166,7 +167,10 @@ int prom_callback(long *args)
pgdp = pgd_offset(mm, va); pgdp = pgd_offset(mm, va);
if (pgd_none(*pgdp)) if (pgd_none(*pgdp))
goto done; goto done;
pmdp = pmd_offset(pgdp, va); pudp = pud_offset(pgdp, va);
if (pud_none(*pudp))
goto done;
pmdp = pmd_offset(pudp, va);
if (pmd_none(*pmdp)) if (pmd_none(*pmdp))
goto done; goto done;
...@@ -208,6 +212,7 @@ int prom_callback(long *args) ...@@ -208,6 +212,7 @@ int prom_callback(long *args)
* vmalloc or prom_inherited mapping. * vmalloc or prom_inherited mapping.
*/ */
pgd_t *pgdp; pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
int error; int error;
...@@ -221,7 +226,10 @@ int prom_callback(long *args) ...@@ -221,7 +226,10 @@ int prom_callback(long *args)
pgdp = pgd_offset_k(va); pgdp = pgd_offset_k(va);
if (pgd_none(*pgdp)) if (pgd_none(*pgdp))
goto done; goto done;
pmdp = pmd_offset(pgdp, va); pudp = pud_offset(pgdp, va);
if (pud_none(*pudp))
goto done;
pmdp = pmd_offset(pudp, va);
if (pmd_none(*pmdp)) if (pmd_none(*pmdp))
goto done; goto done;
......
...@@ -857,7 +857,8 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, ...@@ -857,7 +857,8 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
/* Flush instruction space. */ /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0])); unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address); pgd_t *pgdp = pgd_offset(current->mm, address);
pmd_t *pmdp = pmd_offset(pgdp, address); pud_t *pudp = pud_offset(pgdp, address);
pmd_t *pmdp = pmd_offset(pudp, address);
pte_t *ptep; pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
...@@ -1268,7 +1269,8 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, ...@@ -1268,7 +1269,8 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
/* Flush instruction space. */ /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0])); unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address); pgd_t *pgdp = pgd_offset(current->mm, address);
pmd_t *pmdp = pmd_offset(pgdp, address); pud_t *pudp = pud_offset(pgdp, address);
pmd_t *pmdp = pmd_offset(pudp, address);
pte_t *ptep; pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
......
...@@ -7,8 +7,22 @@ ...@@ -7,8 +7,22 @@
#include <asm/asi.h> #include <asm/asi.h>
.text .text
.align 64
/* We use these stubs for the uncommon case
* of contention on the atomic value. This is
* so that we can keep the main fast path 8
* instructions long and thus fit into a single
* L2 cache line.
*/
__atomic_add_membar:
ba,pt %xcc, __atomic_add
membar #StoreLoad | #StoreStore
__atomic_sub_membar:
ba,pt %xcc, __atomic_sub
membar #StoreLoad | #StoreStore
.align 64
.globl __atomic_add .globl __atomic_add
.type __atomic_add,#function .type __atomic_add,#function
__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
...@@ -16,10 +30,10 @@ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -16,10 +30,10 @@ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
add %g5, %o0, %g7 add %g5, %o0, %g7
cas [%o1], %g5, %g7 cas [%o1], %g5, %g7
cmp %g5, %g7 cmp %g5, %g7
bne,pn %icc, __atomic_add bne,pn %icc, __atomic_add_membar
membar #StoreLoad | #StoreStore add %g7, %o0, %g7
retl retl
add %g7, %o0, %o0 sra %g7, 0, %o0
.size __atomic_add, .-__atomic_add .size __atomic_add, .-__atomic_add
.globl __atomic_sub .globl __atomic_sub
...@@ -29,10 +43,10 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -29,10 +43,10 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */
sub %g5, %o0, %g7 sub %g5, %o0, %g7
cas [%o1], %g5, %g7 cas [%o1], %g5, %g7
cmp %g5, %g7 cmp %g5, %g7
bne,pn %icc, __atomic_sub bne,pn %icc, __atomic_sub_membar
membar #StoreLoad | #StoreStore sub %g7, %o0, %g7
retl retl
sub %g7, %o0, %o0 sra %g7, 0, %o0
.size __atomic_sub, .-__atomic_sub .size __atomic_sub, .-__atomic_sub
.globl __atomic64_add .globl __atomic64_add
......
...@@ -175,6 +175,7 @@ static void bad_kernel_pc(struct pt_regs *regs) ...@@ -175,6 +175,7 @@ static void bad_kernel_pc(struct pt_regs *regs)
static unsigned int get_user_insn(unsigned long tpc) static unsigned int get_user_insn(unsigned long tpc)
{ {
pgd_t *pgdp = pgd_offset(current->mm, tpc); pgd_t *pgdp = pgd_offset(current->mm, tpc);
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep, pte; pte_t *ptep, pte;
unsigned long pa; unsigned long pa;
...@@ -183,7 +184,10 @@ static unsigned int get_user_insn(unsigned long tpc) ...@@ -183,7 +184,10 @@ static unsigned int get_user_insn(unsigned long tpc)
if (pgd_none(*pgdp)) if (pgd_none(*pgdp))
goto outret; goto outret;
pmdp = pmd_offset(pgdp, tpc); pudp = pud_offset(pgdp, tpc);
if (pud_none(*pudp))
goto outret;
pmdp = pmd_offset(pudp, tpc);
if (pmd_none(*pmdp)) if (pmd_none(*pmdp))
goto outret; goto outret;
......
...@@ -96,6 +96,27 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne ...@@ -96,6 +96,27 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne
return 0; return 0;
} }
static inline int io_remap_pud_range(pud_t * pud, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PUD_MASK;
end = address + size;
if (end > PUD_SIZE)
end = PUD_SIZE;
offset -= address;
do {
pmd_t *pmd = pmd_alloc(current->mm, pud, address);
if (!pud)
return -ENOMEM;
io_remap_pmd_range(pmd, address, end - address, address + offset, prot, space);
address = (address + PUD_SIZE) & PUD_MASK;
pud++;
} while (address < end);
return 0;
}
int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space) int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
{ {
int error = 0; int error = 0;
...@@ -111,11 +132,11 @@ int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned ...@@ -111,11 +132,11 @@ int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
while (from < end) { while (from < end) {
pmd_t *pmd = pmd_alloc(current->mm, dir, from); pud_t *pud = pud_alloc(current->mm, dir, from);
error = -ENOMEM; error = -ENOMEM;
if (!pmd) if (!pud)
break; break;
error = io_remap_pmd_range(pmd, from, end - from, offset + from, prot, space); error = io_remap_pud_range(pud, from, end - from, offset + from, prot, space);
if (error) if (error)
break; break;
from = (from + PGDIR_SIZE) & PGDIR_MASK; from = (from + PGDIR_SIZE) & PGDIR_MASK;
......
...@@ -24,30 +24,38 @@ ...@@ -24,30 +24,38 @@
static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (pgd) { if (pgd) {
pmd = pmd_alloc(mm, pgd, addr); pud = pud_offset(pgd, addr);
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (pmd) if (pmd)
pte = pte_alloc_map(mm, pmd, addr); pte = pte_alloc_map(mm, pmd, addr);
} }
}
return pte; return pte;
} }
static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (pgd) { if (pgd) {
pmd = pmd_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (pud) {
pmd = pmd_offset(pud, addr);
if (pmd) if (pmd)
pte = pte_offset_map(pmd, addr); pte = pte_offset_map(pmd, addr);
} }
}
return pte; return pte;
} }
......
...@@ -1462,7 +1462,8 @@ void __init paging_init(void) ...@@ -1462,7 +1462,8 @@ void __init paging_init(void)
memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
/* Now can init the kernel/bad page tables. */ /* Now can init the kernel/bad page tables. */
pgd_set(&swapper_pg_dir[0], swapper_pmd_dir + (shift / sizeof(pgd_t))); pud_set(pud_offset(&swapper_pg_dir[0], 0),
swapper_pmd_dir + (shift / sizeof(pgd_t)));
sparc64_vpte_patchme1[0] |= sparc64_vpte_patchme1[0] |=
(((unsigned long)pgd_val(init_mm.pgd[0])) >> 10); (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
......
...@@ -21,11 +21,14 @@ extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page); ...@@ -21,11 +21,14 @@ extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE) #define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage); extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
/* GROSS, defining this makes gcc pass these types as aggregates, /* Unlike sparc32, sparc64's parameter passing API is more
* and thus on the stack, turn this crap off... -DaveM * sane in that structures which as small enough are passed
* in registers instead of on the stack. Thus, setting
* STRICT_MM_TYPECHECKS does not generate worse code so
* let's enable it to get the type checking.
*/ */
/* #define STRICT_MM_TYPECHECKS */ #define STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS
/* These are used to make use of C type-checking.. */ /* These are used to make use of C type-checking.. */
...@@ -33,25 +36,19 @@ typedef struct { unsigned long pte; } pte_t; ...@@ -33,25 +36,19 @@ typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t; typedef struct { unsigned long iopte; } iopte_t;
typedef struct { unsigned int pmd; } pmd_t; typedef struct { unsigned int pmd; } pmd_t;
typedef struct { unsigned int pgd; } pgd_t; typedef struct { unsigned int pgd; } pgd_t;
typedef struct { unsigned long ctxd; } ctxd_t;
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long iopgprot; } iopgprot_t;
#define pte_val(x) ((x).pte) #define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte) #define iopte_val(x) ((x).iopte)
#define pmd_val(x) ((x).pmd) #define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd) #define pgd_val(x) ((x).pgd)
#define ctxd_val(x) ((x).ctxd)
#define pgprot_val(x) ((x).pgprot) #define pgprot_val(x) ((x).pgprot)
#define iopgprot_val(x) ((x).iopgprot)
#define __pte(x) ((pte_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } ) #define __iopte(x) ((iopte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __ctxd(x) ((ctxd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
#define __iopgprot(x) ((iopgprot_t) { (x) } )
#else #else
/* .. while these make it easier on the compiler */ /* .. while these make it easier on the compiler */
...@@ -59,25 +56,19 @@ typedef unsigned long pte_t; ...@@ -59,25 +56,19 @@ typedef unsigned long pte_t;
typedef unsigned long iopte_t; typedef unsigned long iopte_t;
typedef unsigned int pmd_t; typedef unsigned int pmd_t;
typedef unsigned int pgd_t; typedef unsigned int pgd_t;
typedef unsigned long ctxd_t;
typedef unsigned long pgprot_t; typedef unsigned long pgprot_t;
typedef unsigned long iopgprot_t;
#define pte_val(x) (x) #define pte_val(x) (x)
#define iopte_val(x) (x) #define iopte_val(x) (x)
#define pmd_val(x) (x) #define pmd_val(x) (x)
#define pgd_val(x) (x) #define pgd_val(x) (x)
#define ctxd_val(x) (x)
#define pgprot_val(x) (x) #define pgprot_val(x) (x)
#define iopgprot_val(x) (x)
#define __pte(x) (x) #define __pte(x) (x)
#define __iopte(x) (x) #define __iopte(x) (x)
#define __pmd(x) (x) #define __pmd(x) (x)
#define __pgd(x) (x) #define __pgd(x) (x)
#define __ctxd(x) (x)
#define __pgprot(x) (x) #define __pgprot(x) (x)
#define __iopgprot(x) (x)
#endif /* (STRICT_MM_TYPECHECKS) */ #endif /* (STRICT_MM_TYPECHECKS) */
......
...@@ -133,7 +133,7 @@ static __inline__ void free_pgd_slow(pgd_t *pgd) ...@@ -133,7 +133,7 @@ static __inline__ void free_pgd_slow(pgd_t *pgd)
#define DCACHE_COLOR(address) 0 #define DCACHE_COLOR(address) 0
#endif #endif
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{ {
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
* the SpitFire page tables. * the SpitFire page tables.
*/ */
#include <asm-generic/4level-fixup.h> #include <asm-generic/pgtable-nopud.h>
#include <linux/config.h> #include <linux/config.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
...@@ -263,23 +263,23 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) ...@@ -263,23 +263,23 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
} }
#define pmd_set(pmdp, ptep) \ #define pmd_set(pmdp, ptep) \
(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
#define pgd_set(pgdp, pmdp) \ #define pud_set(pudp, pmdp) \
(pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL)) (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
#define __pmd_page(pmd) \ #define __pmd_page(pmd) \
((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL))) ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pgd_page(pgd) \ #define pud_page(pud) \
((unsigned long) __va((((unsigned long)pgd_val(pgd))<<11UL))) ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
#define pte_none(pte) (!pte_val(pte)) #define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (0) #define pmd_bad(pmd) (0)
#define pmd_present(pmd) (pmd_val(pmd) != 0U) #define pmd_present(pmd) (pmd_val(pmd) != 0U)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)
#define pgd_none(pgd) (!pgd_val(pgd)) #define pud_none(pud) (!pud_val(pud))
#define pgd_bad(pgd) (0) #define pud_bad(pud) (0)
#define pgd_present(pgd) (pgd_val(pgd) != 0U) #define pud_present(pud) (pud_val(pud) != 0U)
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0U) #define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
/* The following only work if pte_present() is true. /* The following only work if pte_present() is true.
* Undefined behaviour if not.. * Undefined behaviour if not..
...@@ -313,8 +313,8 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) ...@@ -313,8 +313,8 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
#define pmd_offset(dir, address) \ #define pmd_offset(pudp, address) \
((pmd_t *) pgd_page(*(dir)) + \ ((pmd_t *) pud_page(*(pudp)) + \
(((address) >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1))) (((address) >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
...@@ -384,6 +384,7 @@ static __inline__ unsigned long ...@@ -384,6 +384,7 @@ static __inline__ unsigned long
sun4u_get_pte (unsigned long addr) sun4u_get_pte (unsigned long addr)
{ {
pgd_t *pgdp; pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
...@@ -392,7 +393,8 @@ sun4u_get_pte (unsigned long addr) ...@@ -392,7 +393,8 @@ sun4u_get_pte (unsigned long addr)
if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
return prom_virt_to_phys(addr, NULL); return prom_virt_to_phys(addr, NULL);
pgdp = pgd_offset_k(addr); pgdp = pgd_offset_k(addr);
pmdp = pmd_offset(pgdp, addr); pudp = pud_offset(pgdp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_kernel(pmdp, addr); ptep = pte_offset_kernel(pmdp, addr);
return pte_val(*ptep) & _PAGE_PADDR; return pte_val(*ptep) & _PAGE_PADDR;
} }
......
...@@ -121,6 +121,7 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) ...@@ -121,6 +121,7 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
#define pte_free_tlb(mp,ptepage) pte_free(ptepage) #define pte_free_tlb(mp,ptepage) pte_free(ptepage)
#define pmd_free_tlb(mp,pmdp) pmd_free(pmdp) #define pmd_free_tlb(mp,pmdp) pmd_free(pmdp)
#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
#define tlb_migrate_finish(mm) do { } while (0) #define tlb_migrate_finish(mm) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0)
......
...@@ -2096,7 +2096,6 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -2096,7 +2096,6 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
} }
#ifndef __ARCH_HAS_4LEVEL_HACK #ifndef __ARCH_HAS_4LEVEL_HACK
#if (PTRS_PER_PUD > 1)
/* /*
* Allocate page upper directory. * Allocate page upper directory.
* *
...@@ -2125,12 +2124,10 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr ...@@ -2125,12 +2124,10 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
goto out; goto out;
} }
pgd_populate(mm, pgd, new); pgd_populate(mm, pgd, new);
out: out:
return pud_offset(pgd, address); return pud_offset(pgd, address);
} }
#endif
#if (PTRS_PER_PMD > 1)
/* /*
* Allocate page middle directory. * Allocate page middle directory.
* *
...@@ -2159,10 +2156,9 @@ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr ...@@ -2159,10 +2156,9 @@ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr
goto out; goto out;
} }
pud_populate(mm, pud, new); pud_populate(mm, pud, new);
out: out:
return pmd_offset(pud, address); return pmd_offset(pud, address);
} }
#endif
#else #else
pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment