Commit a7e79840 authored by Richard Kuo's avatar Richard Kuo Committed by Linus Torvalds

Hexagon: Add page table header files & etc.

Signed-off-by: default avatarRichard Kuo <rkuo@codeaurora.org>
Signed-off-by: default avatarLinas Vepstas <linas@codeaurora.org>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b7f3785f
/*
* Fixmap support for Hexagon - enough to support highmem features
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
/*
* A lot of the fixmap info is already in mem-layout.h
*/
#include <asm/mem-layout.h>
/*
* Full fixmap support involves set_fixmap() functions, but
* these may not be needed if all we're after is an area for
* highmem kernel mappings.
*/
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/**
* fix_to_virt -- "index to address" translation.
*
* If anyone tries to use the idx directly without translation,
* we catch the bug with a NULL-deference kernel oops. Illegal
* ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* This branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
(vaddr)), (vaddr)), (vaddr))
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_MMU_H
#define _ASM_MMU_H
#include <asm/vdso.h>
/*
* Architecture-specific state for a mm_struct.
* For the Hexagon Virtual Machine, it can be a copy
* of the pointer to the page table base.
*/
struct mm_context {
unsigned long long generation;
unsigned long ptbase;
struct hexagon_vdso *vdso;
};
typedef struct mm_context mm_context_t;
#endif
/*
* MM context support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_MMU_CONTEXT_H
#define _ASM_MMU_CONTEXT_H
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/mem-layout.h>
static inline void destroy_context(struct mm_struct *mm)
{
}
/*
* VM port hides all TLB management, so "lazy TLB" isn't very
* meaningful. Even for ports to architectures with visble TLBs,
* this is almost invariably a null function.
*/
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
/*
* Architecture-specific actions, if any, for memory map deactivation.
*/
static inline void deactivate_mm(struct task_struct *tsk,
struct mm_struct *mm)
{
}
/**
* init_new_context - initialize context related info for new mm_struct instance
* @tsk: pointer to a task struct
* @mm: pointer to a new mm struct
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
/* mm->context is set up by pgd_alloc */
return 0;
}
/*
* Switch active mm context
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int l1;
/*
* For virtual machine, we have to update system map if it's been
* touched.
*/
if (next->context.generation < prev->context.generation) {
for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++)
next->pgd[l1] = init_mm.pgd[l1];
next->context.generation = prev->context.generation;
}
__vmnewmap((void *)next->context.ptbase);
}
/*
* Activate new memory map for task
*/
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
unsigned long flags;
local_irq_save(flags);
switch_mm(prev, next, current_thread_info()->task);
local_irq_restore(flags);
}
/* Generic hooks for arch_dup_mmap and arch_exit_mmap */
#include <asm-generic/mm_hooks.h>
#endif
/*
* Page management definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PAGE_H
#define _ASM_PAGE_H
#include <linux/const.h>
/* This is probably not the most graceful way to handle this. */
#ifdef CONFIG_PAGE_SIZE_4KB
#define PAGE_SHIFT 12
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
#define PAGE_SHIFT 14
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
#define PAGE_SHIFT 16
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
#endif
#ifdef CONFIG_PAGE_SIZE_256KB
#define PAGE_SHIFT 18
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
#endif
#ifdef CONFIG_PAGE_SIZE_1MB
#define PAGE_SHIFT 20
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
#endif
/*
* These should be defined in hugetlb.h, but apparently not.
* "Huge" for us should be 4MB or 16MB, which are both represented
* in L1 PTE's. Right now, it's set up for 4MB.
*/
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT 22
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE-1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
#define HVM_HUGEPAGE_SIZE 0x5
#endif
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/*
* This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in.
*/
#include <linux/pfn.h>
/*
* We implement a two-level architecture-specific page table structure.
* Null intermediate page table level (pmd, pud) definitions will come from
* asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
/*
* We need a __pa and a __va routine for kernel space.
* MIPS says they're only used during mem_init.
* also, check if we need a PHYS_OFFSET.
*/
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
/* The "page frame" descriptor is defined in linux/mm.h */
struct page;
/* Returns page frame descriptor for virtual address. */
#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
/* Default vm area behavior is non-executable. */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/* Need to not use a define for linesize; may move this to another file. */
static inline void clear_page(void *page)
{
/* This can only be done on pages with L1 WB cache */
asm volatile(
" loop0(1f,%1);\n"
"1: { dczeroa(%0);\n"
" %0 = add(%0,#32); }:endloop0\n"
: "+r" (page)
: "r" (PAGE_SIZE/32)
: "lc0", "sa0", "memory"
);
}
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
/*
* Under assumption that kernel always "sees" user map...
*/
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/*
* page_to_phys - convert page to physical address
* @page - pointer to page entry in mem_map
*/
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/*
* For port to Hexagon Virtual Machine, MAYBE we check for attempts
* to reference reserved HVM space, but in any case, the VM will be
* protected.
*/
#define kern_addr_valid(addr) (1)
#include <asm-generic/memory_model.h>
/* XXX Todo: implement assembly-optimized version of getorder. */
#include <asm-generic/getorder.h>
#endif /* ifdef __ASSEMBLY__ */
#endif /* ifdef __KERNEL__ */
#endif
/*
* Page table support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
#include <asm/mem-layout.h>
#include <asm/atomic.h>
#define check_pgt_cache() do {} while (0)
extern unsigned long long kmap_generation;
/*
* Page table creation interface
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
/*
* There may be better ways to do this, but to ensure
* that new address spaces always contain the kernel
* base mapping, and to ensure that the user area is
* initially marked invalid, initialize the new map
* map with a copy of the kernel's persistent map.
*/
memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t *));
mm->context.generation = kmap_generation;
/* Physical version is what is passed to virtual machine on switch */
mm->context.ptbase = __pa(pgd);
return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *pte;
pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
if (pte)
pgtable_page_ctor(pte);
return pte;
}
/* _kernel variant gets to use a different allocator */
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
return (pte_t *) __get_free_page(flags);
}
static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
pgtable_page_dtor(pte);
__free_page(pte);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte)
{
/*
* Conveniently, zero in 3 LSB means indirect 4K page table.
* Not so convenient when you're trying to vary the page size.
*/
set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
HEXAGON_L1_PTE_SIZE));
}
/*
* Other architectures seem to have ways of making all processes
* share the same pmd's for their kernel mappings, but the v0.3
* Hexagon VM spec has a "monolithic" L1 table for user and kernel
* segments. We track "generations" of the kernel map to minimize
* overhead, and update the "slave" copies of the kernel mappings
* as part of switch_mm. However, we still need to update the
* kernel map of the active thread who's calling pmd_populate_kernel...
*/
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
extern spinlock_t kmap_gen_lock;
pmd_t *ppmd;
int pmdindex;
spin_lock(&kmap_gen_lock);
kmap_generation++;
mm->context.generation = kmap_generation;
current->active_mm->context.generation = kmap_generation;
spin_unlock(&kmap_gen_lock);
set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
/*
* Now the "slave" copy of the current thread.
* This is pointer arithmetic, not byte addresses!
*/
pmdindex = (pgd_t *)pmd - mm->pgd;
ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
if (pmdindex > max_kernel_seg)
max_kernel_seg = pmdindex;
}
#define __pte_free_tlb(tlb, pte, addr) \
do { \
pgtable_page_dtor((pte)); \
tlb_remove_page((tlb), (pte)); \
} while (0)
#endif
This diff is collapsed.
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
void __init pgtable_cache_init(void)
{
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment