Commit 08179b47 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:

 - Optimize parisc page table locks by using the existing
   page_table_lock

 - Export argv0-preserve flag in binfmt_misc for usage in qemu-user

 - Fix interrupt table (IVT) checksum so firmware will call crash
   handler (HPMC)

 - Increase IRQ stack to 64kb on 64-bit kernel

 - Switch to common devmem_is_allowed() implementation

 - Minor fix to get_whan()

* 'parisc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  binfmt_misc: pass binfmt_misc flags to the interpreter
  parisc: Optimize per-pagetable spinlocks
  parisc: Replace test_ti_thread_flag() with test_tsk_thread_flag()
  parisc: Bump 64-bit IRQ stack size to 64 KB
  parisc: Fix IVT checksum calculation wrt HPMC
  parisc: Use the generic devmem_is_allowed()
  parisc: Drop out of get_whan() if task is running again
parents 2671fe5e 2347961b
...@@ -34,6 +34,7 @@ config PARISC ...@@ -34,6 +34,7 @@ config PARISC
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_CPU_DEVICES select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
...@@ -310,6 +311,16 @@ config IRQSTACKS ...@@ -310,6 +311,16 @@ config IRQSTACKS
for handling hard and soft interrupts. This can help avoid for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks. overflowing the process kernel stacks.
config TLB_PTLOCK
bool "Use page table locks in TLB fault handler"
depends on SMP
default n
help
Select this option to enable page table locking in the TLB
fault handler. This ensures that page table entries are
updated consistently on SMP machines at the expense of some
loss in performance.
config HOTPLUG_CPU config HOTPLUG_CPU
bool bool
default y if SMP default y if SMP
......
...@@ -179,7 +179,7 @@ static __inline__ void __user *arch_compat_alloc_user_space(long len) ...@@ -179,7 +179,7 @@ static __inline__ void __user *arch_compat_alloc_user_space(long len)
static inline int __is_compat_task(struct task_struct *t) static inline int __is_compat_task(struct task_struct *t)
{ {
return test_ti_thread_flag(task_thread_info(t), TIF_32BIT); return test_tsk_thread_flag(t, TIF_32BIT);
} }
static inline int is_compat_task(void) static inline int is_compat_task(void)
......
...@@ -321,4 +321,6 @@ extern void iowrite64be(u64 val, void __iomem *addr); ...@@ -321,4 +321,6 @@ extern void iowrite64be(u64 val, void __iomem *addr);
*/ */
#define xlate_dev_kmem_ptr(p) p #define xlate_dev_kmem_ptr(p) p
extern int devmem_is_allowed(unsigned long pfn);
#endif #endif
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/spinlock.h>
#include <asm-generic/mm_hooks.h> #include <asm-generic/mm_hooks.h>
/* on PA-RISC, we actually have enough contexts to justify an allocator /* on PA-RISC, we actually have enough contexts to justify an allocator
...@@ -50,6 +51,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, ...@@ -50,6 +51,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
struct mm_struct *next, struct task_struct *tsk) struct mm_struct *next, struct task_struct *tsk)
{ {
if (prev != next) { if (prev != next) {
#ifdef CONFIG_TLB_PTLOCK
/* put physical address of page_table_lock in cr28 (tr4)
for TLB faults */
spinlock_t *pgd_lock = &next->page_table_lock;
mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
#endif
mtctl(__pa(next->pgd), 25); mtctl(__pa(next->pgd), 25);
load_context(next->context); load_context(next->context);
} }
......
...@@ -112,7 +112,7 @@ extern int npmem_ranges; ...@@ -112,7 +112,7 @@ extern int npmem_ranges;
#else #else
#define BITS_PER_PTE_ENTRY 2 #define BITS_PER_PTE_ENTRY 2
#define BITS_PER_PMD_ENTRY 2 #define BITS_PER_PMD_ENTRY 2
#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY #define BITS_PER_PGD_ENTRY 2
#endif #endif
#define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY) #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY)
#define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY)
......
...@@ -15,47 +15,23 @@ ...@@ -15,47 +15,23 @@
#define __HAVE_ARCH_PGD_FREE #define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h> #include <asm-generic/pgalloc.h>
/* Allocate the top level pgd (page directory) /* Allocate the top level pgd (page directory) */
*
* Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
* allocate the first pmd adjacent to the pgd. This means that we can
* subtract a constant offset to get to it. The pmd and pgd sizes are
* arranged so that a single pmd covers 4GB (giving a full 64-bit
* process access to 8TB) so our lookups are effectively L2 for the
* first 4GB of the kernel (i.e. for all ILP32 processes and all the
* kernel for machines with under 4GB of memory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, pgd_t *pgd;
PGD_ALLOC_ORDER);
pgd_t *actual_pgd = pgd;
if (likely(pgd != NULL)) { pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); if (unlikely(pgd == NULL))
#if CONFIG_PGTABLE_LEVELS == 3 return NULL;
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it memset(pgd, 0, PAGE_SIZE << PGD_ORDER);
* with PxD_FLAG_ATTACHED as a signal to the system that this
* pmd entry may not be cleared. */ return pgd;
set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
* a signal that this pmd may not be freed */
set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
#endif
}
spin_lock_init(pgd_spinlock(actual_pgd));
return actual_pgd;
} }
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{ {
#if CONFIG_PGTABLE_LEVELS == 3 free_pages((unsigned long)pgd, PGD_ORDER);
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
} }
#if CONFIG_PGTABLE_LEVELS == 3 #if CONFIG_PGTABLE_LEVELS == 3
...@@ -70,41 +46,25 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ...@@ -70,41 +46,25 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); pmd_t *pmd;
pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
if (likely(pmd))
memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER);
return pmd;
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{ {
if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
/*
* This is the permanent pmd attached to the pgd;
* cannot free it.
* Increment the counter to compensate for the decrement
* done by generic mm code.
*/
mm_inc_nr_pmds(mm);
return;
}
free_pages((unsigned long)pmd, PMD_ORDER); free_pages((unsigned long)pmd, PMD_ORDER);
} }
#endif #endif
static inline void static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{ {
#if CONFIG_PGTABLE_LEVELS == 3 set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
/* preserve the gateway marker if this is the beginning of + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
else
#endif
set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
} }
#define pmd_populate(mm, pmd, pte_page) \ #define pmd_populate(mm, pmd, pte_page) \
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cache.h> #include <asm/cache.h>
static inline spinlock_t *pgd_spinlock(pgd_t *);
/* /*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >= * memory. For the return value to be meaningful, ADDR must be >=
...@@ -42,12 +40,8 @@ static inline spinlock_t *pgd_spinlock(pgd_t *); ...@@ -42,12 +40,8 @@ static inline spinlock_t *pgd_spinlock(pgd_t *);
/* This is for the serialization of PxTLB broadcasts. At least on the N class /* This is for the serialization of PxTLB broadcasts. At least on the N class
* systems, only one PxTLB inter processor broadcast can be active at any one * systems, only one PxTLB inter processor broadcast can be active at any one
* time on the Merced bus. * time on the Merced bus. */
* PTE updates are protected by locks in the PMD.
*/
extern spinlock_t pa_tlb_flush_lock; extern spinlock_t pa_tlb_flush_lock;
extern spinlock_t pa_swapper_pg_lock;
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
extern int pa_serialize_tlb_flushes; extern int pa_serialize_tlb_flushes;
#else #else
...@@ -86,18 +80,16 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -86,18 +80,16 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
* within a page table are directly modified. Thus, the following * within a page table are directly modified. Thus, the following
* hook is made available. * hook is made available.
*/ */
#define set_pte(pteptr, pteval) \ #define set_pte(pteptr, pteval) \
do{ \ do { \
*(pteptr) = (pteval); \ *(pteptr) = (pteval); \
} while(0) barrier(); \
} while(0)
#define set_pte_at(mm, addr, ptep, pteval) \
do { \ #define set_pte_at(mm, addr, pteptr, pteval) \
unsigned long flags; \ do { \
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\ *(pteptr) = (pteval); \
set_pte(ptep, pteval); \ purge_tlb_entries(mm, addr); \
purge_tlb_entries(mm, addr); \
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
} while (0) } while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
...@@ -120,12 +112,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -120,12 +112,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if CONFIG_PGTABLE_LEVELS == 3 #if CONFIG_PGTABLE_LEVELS == 3
#define PGD_ORDER 1 /* Number of pages per pgd */ #define PMD_ORDER 1
#define PMD_ORDER 1 /* Number of pages per pmd */ #define PGD_ORDER 0
#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
#else #else
#define PGD_ORDER 1 /* Number of pages per pgd */ #define PGD_ORDER 1
#define PGD_ALLOC_ORDER (PGD_ORDER + 1)
#endif #endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory /* Definitions for 3rd level (we use PLD here for Page Lower directory
...@@ -240,11 +230,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -240,11 +230,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
* able to effectively address 40/42/44-bits of physical address space * able to effectively address 40/42/44-bits of physical address space
* depending on 4k/16k/64k PAGE_SIZE */ * depending on 4k/16k/64k PAGE_SIZE */
#define _PxD_PRESENT_BIT 31 #define _PxD_PRESENT_BIT 31
#define _PxD_ATTACHED_BIT 30 #define _PxD_VALID_BIT 30
#define _PxD_VALID_BIT 29
#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
#define PxD_FLAG_MASK (0xf) #define PxD_FLAG_MASK (0xf)
#define PxD_FLAG_SHIFT (4) #define PxD_FLAG_SHIFT (4)
...@@ -326,23 +314,10 @@ extern unsigned long *empty_zero_page; ...@@ -326,23 +314,10 @@ extern unsigned long *empty_zero_page;
#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
#if CONFIG_PGTABLE_LEVELS == 3
/* The first entry of the permanent pmd is not there if it contains
* the gateway marker */
#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
#else
#define pmd_none(x) (!pmd_val(x)) #define pmd_none(x) (!pmd_val(x))
#endif
#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
static inline void pmd_clear(pmd_t *pmd) { static inline void pmd_clear(pmd_t *pmd) {
#if CONFIG_PGTABLE_LEVELS == 3
if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the entry pointing to the permanent pmd
* attached to the pgd; cannot clear it */
set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED));
else
#endif
set_pmd(pmd, __pmd(0)); set_pmd(pmd, __pmd(0));
} }
...@@ -358,12 +333,6 @@ static inline void pmd_clear(pmd_t *pmd) { ...@@ -358,12 +333,6 @@ static inline void pmd_clear(pmd_t *pmd) {
#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID)) #define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID))
#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT) #define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT)
static inline void pud_clear(pud_t *pud) { static inline void pud_clear(pud_t *pud) {
#if CONFIG_PGTABLE_LEVELS == 3
if(pud_flag(*pud) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pud; cannot
* free it */
return;
#endif
set_pud(pud, __pud(0)); set_pud(pud, __pud(0));
} }
#endif #endif
...@@ -456,32 +425,18 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); ...@@ -456,32 +425,18 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
{
if (unlikely(pgd == swapper_pg_dir))
return &pa_swapper_pg_lock;
return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
}
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{ {
pte_t pte; pte_t pte;
unsigned long flags;
if (!pte_young(*ptep)) if (!pte_young(*ptep))
return 0; return 0;
spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
pte = *ptep; pte = *ptep;
if (!pte_young(pte)) { if (!pte_young(pte)) {
spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 0; return 0;
} }
set_pte(ptep, pte_mkold(pte)); set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 1; return 1;
} }
...@@ -489,24 +444,16 @@ struct mm_struct; ...@@ -489,24 +444,16 @@ struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
pte_t old_pte; pte_t old_pte;
unsigned long flags;
spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
old_pte = *ptep; old_pte = *ptep;
set_pte(ptep, __pte(0)); set_pte_at(mm, addr, ptep, __pte(0));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
return old_pte; return old_pte;
} }
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
unsigned long flags; set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
} }
#define pte_same(A,B) (pte_val(A) == pte_val(B)) #define pte_same(A,B) (pte_val(A) == pte_val(B))
......
...@@ -268,7 +268,6 @@ int main(void) ...@@ -268,7 +268,6 @@ int main(void)
DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD); DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD); DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE); DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT)); DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT); DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
......
...@@ -35,10 +35,9 @@ ...@@ -35,10 +35,9 @@
.level 2.0 .level 2.0
#endif #endif
.import pa_tlb_lock,data /* Get aligned page_table_lock address for this mm from cr28/tr4 */
.macro load_pa_tlb_lock reg .macro get_ptl reg
mfctl %cr25,\reg mfctl %cr28,\reg
addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
.endm .endm
/* space_to_prot macro creates a prot id from a space id */ /* space_to_prot macro creates a prot id from a space id */
...@@ -407,7 +406,9 @@ ...@@ -407,7 +406,9 @@
# endif # endif
#endif #endif
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
#if CONFIG_PGTABLE_LEVELS < 3
copy %r0,\pte copy %r0,\pte
#endif
ldw,s \index(\pmd),\pmd ldw,s \index(\pmd),\pmd
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
...@@ -417,38 +418,23 @@ ...@@ -417,38 +418,23 @@
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
.endm .endm
/* Look up PTE in a 3-Level scheme. /* Look up PTE in a 3-Level scheme. */
*
* Here we implement a Hybrid L2/L3 scheme: we allocate the
* first pmd adjacent to the pgd. This means that we can
* subtract a constant offset to get to it. The pmd and pgd
* sizes are arranged so that a single pmd covers 4GB (giving
* a full LP64 process access to 8TB) so our lookups are
* effectively L2 for the first 4GB of the kernel (i.e. for
* all ILP32 processes and all the kernel for machines with
* under 4GB of memory) */
.macro L3_ptep pgd,pte,index,va,fault .macro L3_ptep pgd,pte,index,va,fault
#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ #if CONFIG_PGTABLE_LEVELS == 3
copy %r0,\pte
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldw,s \index(\pgd),\pgd ldw,s \index(\pgd),\pgd
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 shld \pgd,PxD_VALUE_SHIFT,\pgd
shld \pgd,PxD_VALUE_SHIFT,\index
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
copy \index,\pgd
extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
#endif #endif
L2_ptep \pgd,\pte,\index,\va,\fault L2_ptep \pgd,\pte,\index,\va,\fault
.endm .endm
/* Acquire pa_tlb_lock lock and check page is present. */ /* Acquire page_table_lock and check page is present. */
.macro tlb_lock spc,ptp,pte,tmp,tmp1,fault .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault
#ifdef CONFIG_SMP #ifdef CONFIG_TLB_PTLOCK
98: cmpib,COND(=),n 0,\spc,2f 98: cmpib,COND(=),n 0,\spc,2f
load_pa_tlb_lock \tmp get_ptl \tmp
1: LDCW 0(\tmp),\tmp1 1: LDCW 0(\tmp),\tmp1
cmpib,COND(=) 0,\tmp1,1b cmpib,COND(=) 0,\tmp1,1b
nop nop
...@@ -463,26 +449,26 @@ ...@@ -463,26 +449,26 @@
3: 3:
.endm .endm
/* Release pa_tlb_lock lock without reloading lock address. /* Release page_table_lock without reloading lock address.
Note that the values in the register spc are limited to Note that the values in the register spc are limited to
NR_SPACE_IDS (262144). Thus, the stw instruction always NR_SPACE_IDS (262144). Thus, the stw instruction always
stores a nonzero value even when register spc is 64 bits. stores a nonzero value even when register spc is 64 bits.
We use an ordered store to ensure all prior accesses are We use an ordered store to ensure all prior accesses are
performed prior to releasing the lock. */ performed prior to releasing the lock. */
.macro tlb_unlock0 spc,tmp .macro ptl_unlock0 spc,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_TLB_PTLOCK
98: or,COND(=) %r0,\spc,%r0 98: or,COND(=) %r0,\spc,%r0
stw,ma \spc,0(\tmp) stw,ma \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif #endif
.endm .endm
/* Release pa_tlb_lock lock. */ /* Release page_table_lock. */
.macro tlb_unlock1 spc,tmp .macro ptl_unlock1 spc,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_TLB_PTLOCK
98: load_pa_tlb_lock \tmp 98: get_ptl \tmp
ptl_unlock0 \spc,\tmp
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
tlb_unlock0 \spc,\tmp
#endif #endif
.endm .endm
...@@ -1165,14 +1151,14 @@ dtlb_miss_20w: ...@@ -1165,14 +1151,14 @@ dtlb_miss_20w:
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot idtlbt pte,prot
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1191,14 +1177,14 @@ nadtlb_miss_20w: ...@@ -1191,14 +1177,14 @@ nadtlb_miss_20w:
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot idtlbt pte,prot
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1219,7 +1205,7 @@ dtlb_miss_11: ...@@ -1219,7 +1205,7 @@ dtlb_miss_11:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot make_insert_tlb_11 spc,pte,prot
...@@ -1232,7 +1218,7 @@ dtlb_miss_11: ...@@ -1232,7 +1218,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1252,7 +1238,7 @@ nadtlb_miss_11: ...@@ -1252,7 +1238,7 @@ nadtlb_miss_11:
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot make_insert_tlb_11 spc,pte,prot
...@@ -1265,7 +1251,7 @@ nadtlb_miss_11: ...@@ -1265,7 +1251,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1285,7 +1271,7 @@ dtlb_miss_20: ...@@ -1285,7 +1271,7 @@ dtlb_miss_20:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
...@@ -1294,7 +1280,7 @@ dtlb_miss_20: ...@@ -1294,7 +1280,7 @@ dtlb_miss_20:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1313,7 +1299,7 @@ nadtlb_miss_20: ...@@ -1313,7 +1299,7 @@ nadtlb_miss_20:
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
...@@ -1322,7 +1308,7 @@ nadtlb_miss_20: ...@@ -1322,7 +1308,7 @@ nadtlb_miss_20:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1422,14 +1408,14 @@ itlb_miss_20w: ...@@ -1422,14 +1408,14 @@ itlb_miss_20w:
L3_ptep ptp,pte,t0,va,itlb_fault L3_ptep ptp,pte,t0,va,itlb_fault
tlb_lock spc,ptp,pte,t0,t1,itlb_fault ptl_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot iitlbt pte,prot
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1446,14 +1432,14 @@ naitlb_miss_20w: ...@@ -1446,14 +1432,14 @@ naitlb_miss_20w:
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot iitlbt pte,prot
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1474,7 +1460,7 @@ itlb_miss_11: ...@@ -1474,7 +1460,7 @@ itlb_miss_11:
L2_ptep ptp,pte,t0,va,itlb_fault L2_ptep ptp,pte,t0,va,itlb_fault
tlb_lock spc,ptp,pte,t0,t1,itlb_fault ptl_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot make_insert_tlb_11 spc,pte,prot
...@@ -1487,7 +1473,7 @@ itlb_miss_11: ...@@ -1487,7 +1473,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1498,7 +1484,7 @@ naitlb_miss_11: ...@@ -1498,7 +1484,7 @@ naitlb_miss_11:
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot make_insert_tlb_11 spc,pte,prot
...@@ -1511,7 +1497,7 @@ naitlb_miss_11: ...@@ -1511,7 +1497,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1532,7 +1518,7 @@ itlb_miss_20: ...@@ -1532,7 +1518,7 @@ itlb_miss_20:
L2_ptep ptp,pte,t0,va,itlb_fault L2_ptep ptp,pte,t0,va,itlb_fault
tlb_lock spc,ptp,pte,t0,t1,itlb_fault ptl_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
...@@ -1541,7 +1527,7 @@ itlb_miss_20: ...@@ -1541,7 +1527,7 @@ itlb_miss_20:
iitlbt pte,prot iitlbt pte,prot
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1552,7 +1538,7 @@ naitlb_miss_20: ...@@ -1552,7 +1538,7 @@ naitlb_miss_20:
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
update_accessed ptp,pte,t0,t1 update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
...@@ -1561,7 +1547,7 @@ naitlb_miss_20: ...@@ -1561,7 +1547,7 @@ naitlb_miss_20:
iitlbt pte,prot iitlbt pte,prot
tlb_unlock1 spc,t0 ptl_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1584,14 +1570,14 @@ dbit_trap_20w: ...@@ -1584,14 +1570,14 @@ dbit_trap_20w:
L3_ptep ptp,pte,t0,va,dbit_fault L3_ptep ptp,pte,t0,va,dbit_fault
tlb_lock spc,ptp,pte,t0,t1,dbit_fault ptl_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1 update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot idtlbt pte,prot
tlb_unlock0 spc,t0 ptl_unlock0 spc,t0
rfir rfir
nop nop
#else #else
...@@ -1604,7 +1590,7 @@ dbit_trap_11: ...@@ -1604,7 +1590,7 @@ dbit_trap_11:
L2_ptep ptp,pte,t0,va,dbit_fault L2_ptep ptp,pte,t0,va,dbit_fault
tlb_lock spc,ptp,pte,t0,t1,dbit_fault ptl_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1 update_dirty ptp,pte,t1
make_insert_tlb_11 spc,pte,prot make_insert_tlb_11 spc,pte,prot
...@@ -1617,7 +1603,7 @@ dbit_trap_11: ...@@ -1617,7 +1603,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock0 spc,t0 ptl_unlock0 spc,t0
rfir rfir
nop nop
...@@ -1628,7 +1614,7 @@ dbit_trap_20: ...@@ -1628,7 +1614,7 @@ dbit_trap_20:
L2_ptep ptp,pte,t0,va,dbit_fault L2_ptep ptp,pte,t0,va,dbit_fault
tlb_lock spc,ptp,pte,t0,t1,dbit_fault ptl_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1 update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot,t1 make_insert_tlb spc,pte,prot,t1
...@@ -1637,7 +1623,7 @@ dbit_trap_20: ...@@ -1637,7 +1623,7 @@ dbit_trap_20:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock0 spc,t0 ptl_unlock0 spc,t0
rfir rfir
nop nop
#endif #endif
......
...@@ -289,13 +289,3 @@ os_hpmc_6: ...@@ -289,13 +289,3 @@ os_hpmc_6:
b . b .
nop nop
.align 16 /* make function length multiple of 16 bytes */ .align 16 /* make function length multiple of 16 bytes */
.os_hpmc_end:
__INITRODATA
.globl os_hpmc_size
.align 4
.type os_hpmc_size, @object
.size os_hpmc_size, 4
os_hpmc_size:
.word .os_hpmc_end-.os_hpmc
...@@ -373,7 +373,11 @@ static inline int eirr_to_irq(unsigned long eirr) ...@@ -373,7 +373,11 @@ static inline int eirr_to_irq(unsigned long eirr)
/* /*
* IRQ STACK - used for irq handler * IRQ STACK - used for irq handler
*/ */
#ifdef CONFIG_64BIT
#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */
#else
#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */ #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
#endif
union irq_stack_union { union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
......
...@@ -260,6 +260,8 @@ get_wchan(struct task_struct *p) ...@@ -260,6 +260,8 @@ get_wchan(struct task_struct *p)
do { do {
if (unwind_once(&info) < 0) if (unwind_once(&info) < 0)
return 0; return 0;
if (p->state == TASK_RUNNING)
return 0;
ip = info.ip; ip = info.ip;
if (!in_sched_functions(ip)) if (!in_sched_functions(ip))
return ip; return ip;
......
...@@ -798,14 +798,13 @@ void notrace handle_interruption(int code, struct pt_regs *regs) ...@@ -798,14 +798,13 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
void __init initialize_ivt(const void *iva) void __init initialize_ivt(const void *iva)
{ {
extern u32 os_hpmc_size;
extern const u32 os_hpmc[]; extern const u32 os_hpmc[];
int i; int i;
u32 check = 0; u32 check = 0;
u32 *ivap; u32 *ivap;
u32 *hpmcp; u32 *hpmcp;
u32 length, instr; u32 instr;
if (strcmp((const char *)iva, "cows can fly")) if (strcmp((const char *)iva, "cows can fly"))
panic("IVT invalid"); panic("IVT invalid");
...@@ -836,18 +835,14 @@ void __init initialize_ivt(const void *iva) ...@@ -836,18 +835,14 @@ void __init initialize_ivt(const void *iva)
/* Setup IVA and compute checksum for HPMC handler */ /* Setup IVA and compute checksum for HPMC handler */
ivap[6] = (u32)__pa(os_hpmc); ivap[6] = (u32)__pa(os_hpmc);
length = os_hpmc_size;
ivap[7] = length;
hpmcp = (u32 *)os_hpmc; hpmcp = (u32 *)os_hpmc;
for (i=0; i<length/4; i++)
check += *hpmcp++;
for (i=0; i<8; i++) for (i=0; i<8; i++)
check += ivap[i]; check += ivap[i];
ivap[5] = -check; ivap[5] = -check;
pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
} }
......
...@@ -142,24 +142,17 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -142,24 +142,17 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
unsigned long flags;
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
__set_huge_pte_at(mm, addr, ptep, entry); __set_huge_pte_at(mm, addr, ptep, entry);
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
} }
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
unsigned long flags;
pte_t entry; pte_t entry;
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
entry = *ptep; entry = *ptep;
__set_huge_pte_at(mm, addr, ptep, __pte(0)); __set_huge_pte_at(mm, addr, ptep, __pte(0));
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return entry; return entry;
} }
...@@ -168,29 +161,23 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -168,29 +161,23 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
void huge_ptep_set_wrprotect(struct mm_struct *mm, void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
unsigned long flags;
pte_t old_pte; pte_t old_pte;
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
old_pte = *ptep; old_pte = *ptep;
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
} }
int huge_ptep_set_access_flags(struct vm_area_struct *vma, int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned long addr, pte_t *ptep,
pte_t pte, int dirty) pte_t pte, int dirty)
{ {
unsigned long flags;
int changed; int changed;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
changed = !pte_same(*ptep, pte); changed = !pte_same(*ptep, pte);
if (changed) { if (changed) {
__set_huge_pte_at(mm, addr, ptep, pte); __set_huge_pte_at(mm, addr, ptep, pte);
} }
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return changed; return changed;
} }
......
...@@ -37,11 +37,6 @@ extern int data_start; ...@@ -37,11 +37,6 @@ extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
#if CONFIG_PGTABLE_LEVELS == 3 #if CONFIG_PGTABLE_LEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
#endif #endif
...@@ -559,6 +554,11 @@ void __init mem_init(void) ...@@ -559,6 +554,11 @@ void __init mem_init(void)
BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
> BITS_PER_LONG); > BITS_PER_LONG);
#if CONFIG_PGTABLE_LEVELS == 3
BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD);
#else
BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
#endif
high_memory = __va((max_pfn << PAGE_SHIFT)); high_memory = __va((max_pfn << PAGE_SHIFT));
set_max_mapnr(max_low_pfn); set_max_mapnr(max_low_pfn);
......
...@@ -186,6 +186,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, ...@@ -186,6 +186,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
unsigned char k_rand_bytes[16]; unsigned char k_rand_bytes[16];
int items; int items;
elf_addr_t *elf_info; elf_addr_t *elf_info;
elf_addr_t flags = 0;
int ei_index; int ei_index;
const struct cred *cred = current_cred(); const struct cred *cred = current_cred();
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -260,7 +261,9 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, ...@@ -260,7 +261,9 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
NEW_AUX_ENT(AT_BASE, interp_load_addr); NEW_AUX_ENT(AT_BASE, interp_load_addr);
NEW_AUX_ENT(AT_FLAGS, 0); if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0)
flags |= AT_FLAGS_PRESERVE_ARGV0;
NEW_AUX_ENT(AT_FLAGS, flags);
NEW_AUX_ENT(AT_ENTRY, e_entry); NEW_AUX_ENT(AT_ENTRY, e_entry);
NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
......
...@@ -506,6 +506,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, ...@@ -506,6 +506,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
char __user *u_platform, *u_base_platform, *p; char __user *u_platform, *u_base_platform, *p;
int loop; int loop;
int nr; /* reset for each csp adjustment */ int nr; /* reset for each csp adjustment */
unsigned long flags = 0;
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions /* In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
...@@ -648,7 +649,9 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, ...@@ -648,7 +649,9 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec_params->hdr.e_phnum); NEW_AUX_ENT(AT_PHNUM, exec_params->hdr.e_phnum);
NEW_AUX_ENT(AT_BASE, interp_params->elfhdr_addr); NEW_AUX_ENT(AT_BASE, interp_params->elfhdr_addr);
NEW_AUX_ENT(AT_FLAGS, 0); if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0)
flags |= AT_FLAGS_PRESERVE_ARGV0;
NEW_AUX_ENT(AT_FLAGS, flags);
NEW_AUX_ENT(AT_ENTRY, exec_params->entry_addr); NEW_AUX_ENT(AT_ENTRY, exec_params->entry_addr);
NEW_AUX_ENT(AT_UID, (elf_addr_t) from_kuid_munged(cred->user_ns, cred->uid)); NEW_AUX_ENT(AT_UID, (elf_addr_t) from_kuid_munged(cred->user_ns, cred->uid));
NEW_AUX_ENT(AT_EUID, (elf_addr_t) from_kuid_munged(cred->user_ns, cred->euid)); NEW_AUX_ENT(AT_EUID, (elf_addr_t) from_kuid_munged(cred->user_ns, cred->euid));
......
...@@ -153,7 +153,9 @@ static int load_misc_binary(struct linux_binprm *bprm) ...@@ -153,7 +153,9 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
goto ret; goto ret;
if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) { if (fmt->flags & MISC_FMT_PRESERVE_ARGV0) {
bprm->interp_flags |= BINPRM_FLAGS_PRESERVE_ARGV0;
} else {
retval = remove_arg_zero(bprm); retval = remove_arg_zero(bprm);
if (retval) if (retval)
goto ret; goto ret;
......
...@@ -73,6 +73,10 @@ struct linux_binprm { ...@@ -73,6 +73,10 @@ struct linux_binprm {
#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 #define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) #define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
/* preserve argv0 for the interpreter */
#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3
#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT)
/* Function parameter for binfmt->coredump */ /* Function parameter for binfmt->coredump */
struct coredump_params { struct coredump_params {
const kernel_siginfo_t *siginfo; const kernel_siginfo_t *siginfo;
......
...@@ -18,4 +18,8 @@ struct pt_regs; ...@@ -18,4 +18,8 @@ struct pt_regs;
/* sizeof(linux_binprm->buf) */ /* sizeof(linux_binprm->buf) */
#define BINPRM_BUF_SIZE 256 #define BINPRM_BUF_SIZE 256
/* preserve argv0 for the interpreter */
#define AT_FLAGS_PRESERVE_ARGV0_BIT 0
#define AT_FLAGS_PRESERVE_ARGV0 (1 << AT_FLAGS_PRESERVE_ARGV0_BIT)
#endif /* _UAPI_LINUX_BINFMTS_H */ #endif /* _UAPI_LINUX_BINFMTS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment