Commit e27fc7e4 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Patch sun4v code sequences properly on module load.
  sparc: Kill custom io_remap_pfn_range().
  sparc: Stash orig_i0 into %g6 instead of %g2
  sparc: Fix handling of orig_i0 wrt. debugging when restarting syscalls.
  sparc: sigutil: Include <linux/errno.h>
parents b6844523 0b64120c
...@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap; ...@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
#define kern_addr_valid(addr) \ #define kern_addr_valid(addr) \
(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap)) (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
extern int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
/* /*
* For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
* its high 4 bits. These macros/functions put it there or get it from there. * its high 4 bits. These macros/functions put it there or get it from there.
...@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, ...@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn) (pfn & 0x0fffffffUL) #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
unsigned long, pgprot_t);
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
unsigned long long offset, space, phys_base;
offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
space = GET_IOSPACE(pfn);
phys_base = offset | (space << 32ULL);
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
({ \ ({ \
......
...@@ -757,10 +757,6 @@ static inline bool kern_addr_valid(unsigned long addr) ...@@ -757,10 +757,6 @@ static inline bool kern_addr_valid(unsigned long addr)
extern int page_in_phys_avail(unsigned long paddr); extern int page_in_phys_avail(unsigned long paddr);
extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn,
unsigned long size, pgprot_t prot);
/* /*
* For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
* its high 4 bits. These macros/functions put it there or get it from there. * its high 4 bits. These macros/functions put it there or get it from there.
...@@ -769,6 +765,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, ...@@ -769,6 +765,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
unsigned long, pgprot_t);
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
int space = GET_IOSPACE(pfn);
unsigned long phys_base;
phys_base = offset | (((unsigned long) space) << 32UL);
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/* We provide our own get_unmapped_area to cope with VA holes and /* We provide our own get_unmapped_area to cope with VA holes and
......
...@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
extern void fpload(unsigned long *fpregs, unsigned long *fsr); extern void fpload(unsigned long *fpregs, unsigned long *fsr);
#else /* CONFIG_SPARC32 */ #else /* CONFIG_SPARC32 */
#include <asm/trap_block.h>
struct popc_3insn_patch_entry { struct popc_3insn_patch_entry {
unsigned int addr; unsigned int addr;
unsigned int insns[3]; unsigned int insns[3];
...@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch, ...@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
__popc_6insn_patch_end; __popc_6insn_patch_end;
extern void __init per_cpu_patch(void); extern void __init per_cpu_patch(void);
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
struct sun4v_1insn_patch_entry *);
extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
struct sun4v_2insn_patch_entry *);
extern void __init sun4v_patch(void); extern void __init sun4v_patch(void);
extern void __init boot_cpu_id_too_large(int cpu); extern void __init boot_cpu_id_too_large(int cpu);
extern unsigned int dcache_parity_tl1_occurred; extern unsigned int dcache_parity_tl1_occurred;
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include "entry.h"
#ifdef CONFIG_SPARC64 #ifdef CONFIG_SPARC64
#include <linux/jump_label.h> #include <linux/jump_label.h>
...@@ -203,6 +205,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs, ...@@ -203,6 +205,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
} }
#ifdef CONFIG_SPARC64 #ifdef CONFIG_SPARC64
static void do_patch_sections(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs)
{
const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
sun4v_1insn = s;
if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
sun4v_2insn = s;
}
if (sun4v_1insn && tlb_type == hypervisor) {
void *p = (void *) sun4v_1insn->sh_addr;
sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
}
if (sun4v_2insn && tlb_type == hypervisor) {
void *p = (void *) sun4v_2insn->sh_addr;
sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
}
}
int module_finalize(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *me)
...@@ -210,6 +235,8 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -210,6 +235,8 @@ int module_finalize(const Elf_Ehdr *hdr,
/* make jump label nops */ /* make jump label nops */
jump_label_apply_nops(me); jump_label_apply_nops(me);
do_patch_sections(hdr, sechdrs);
/* Cheetah's I-cache is fully coherent. */ /* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
unsigned long va; unsigned long va;
......
...@@ -234,40 +234,50 @@ void __init per_cpu_patch(void) ...@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
} }
} }
void __init sun4v_patch(void) void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
struct sun4v_1insn_patch_entry *end)
{ {
extern void sun4v_hvapi_init(void); while (start < end) {
struct sun4v_1insn_patch_entry *p1; unsigned long addr = start->addr;
struct sun4v_2insn_patch_entry *p2;
if (tlb_type != hypervisor)
return;
p1 = &__sun4v_1insn_patch; *(unsigned int *) (addr + 0) = start->insn;
while (p1 < &__sun4v_1insn_patch_end) {
unsigned long addr = p1->addr;
*(unsigned int *) (addr + 0) = p1->insn;
wmb(); wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0)); __asm__ __volatile__("flush %0" : : "r" (addr + 0));
p1++; start++;
} }
}
p2 = &__sun4v_2insn_patch; void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
while (p2 < &__sun4v_2insn_patch_end) { struct sun4v_2insn_patch_entry *end)
unsigned long addr = p2->addr; {
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *) (addr + 0) = p2->insns[0]; *(unsigned int *) (addr + 0) = start->insns[0];
wmb(); wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0)); __asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = p2->insns[1]; *(unsigned int *) (addr + 4) = start->insns[1];
wmb(); wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4)); __asm__ __volatile__("flush %0" : : "r" (addr + 4));
p2++; start++;
} }
}
void __init sun4v_patch(void)
{
extern void sun4v_hvapi_init(void);
if (tlb_type != hypervisor)
return;
sun4v_patch_1insn_range(&__sun4v_1insn_patch,
&__sun4v_1insn_patch_end);
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
sun4v_hvapi_init(); sun4v_hvapi_init();
} }
......
...@@ -822,21 +822,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs ...@@ -822,21 +822,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
* want to handle. Thus you cannot kill init even with a SIGKILL even by * want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake. * mistake.
*/ */
void do_signal32(sigset_t *oldset, struct pt_regs * regs, void do_signal32(sigset_t *oldset, struct pt_regs * regs)
int restart_syscall, unsigned long orig_i0)
{ {
struct k_sigaction ka; struct k_sigaction ka;
unsigned long orig_i0;
int restart_syscall;
siginfo_t info; siginfo_t info;
int signr; int signr;
signr = get_signal_to_deliver(&info, &ka, regs, NULL); signr = get_signal_to_deliver(&info, &ka, regs, NULL);
/* If the debugger messes with the program counter, it clears restart_syscall = 0;
* the "in syscall" bit, directing us to not perform a syscall orig_i0 = 0;
* restart. if (pt_regs_is_syscall(regs) &&
*/ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
if (restart_syscall && !pt_regs_is_syscall(regs)) restart_syscall = 1;
restart_syscall = 0; orig_i0 = regs->u_regs[UREG_G6];
}
if (signr > 0) { if (signr > 0) {
if (restart_syscall) if (restart_syscall)
......
...@@ -519,10 +519,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) ...@@ -519,10 +519,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
siginfo_t info; siginfo_t info;
int signr; int signr;
/* It's a lot of work and synchronization to add a new ptrace
* register for GDB to save and restore in order to get
* orig_i0 correct for syscall restarts when debugging.
*
* Although it should be the case that most of the global
* registers are volatile across a system call, glibc already
* depends upon that fact that we preserve them. So we can't
* just use any global register to save away the orig_i0 value.
*
* In particular %g2, %g3, %g4, and %g5 are all assumed to be
* preserved across a system call trap by various pieces of
* code in glibc.
*
* %g7 is used as the "thread register". %g6 is not used in
* any fixed manner. %g6 is used as a scratch register and
* a compiler temporary, but it's value is never used across
* a system call. Therefore %g6 is usable for orig_i0 storage.
*/
if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
restart_syscall = 1; regs->u_regs[UREG_G6] = orig_i0;
else
restart_syscall = 0;
if (test_thread_flag(TIF_RESTORE_SIGMASK)) if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask; oldset = &current->saved_sigmask;
...@@ -535,8 +551,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) ...@@ -535,8 +551,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
* the software "in syscall" bit, directing us to not perform * the software "in syscall" bit, directing us to not perform
* a syscall restart. * a syscall restart.
*/ */
if (restart_syscall && !pt_regs_is_syscall(regs)) restart_syscall = 0;
restart_syscall = 0; if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
restart_syscall = 1;
orig_i0 = regs->u_regs[UREG_G6];
}
if (signr > 0) { if (signr > 0) {
if (restart_syscall) if (restart_syscall)
......
...@@ -529,11 +529,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) ...@@ -529,11 +529,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
siginfo_t info; siginfo_t info;
int signr; int signr;
/* It's a lot of work and synchronization to add a new ptrace
* register for GDB to save and restore in order to get
* orig_i0 correct for syscall restarts when debugging.
*
* Although it should be the case that most of the global
* registers are volatile across a system call, glibc already
* depends upon that fact that we preserve them. So we can't
* just use any global register to save away the orig_i0 value.
*
* In particular %g2, %g3, %g4, and %g5 are all assumed to be
* preserved across a system call trap by various pieces of
* code in glibc.
*
* %g7 is used as the "thread register". %g6 is not used in
* any fixed manner. %g6 is used as a scratch register and
* a compiler temporary, but it's value is never used across
* a system call. Therefore %g6 is usable for orig_i0 storage.
*/
if (pt_regs_is_syscall(regs) && if (pt_regs_is_syscall(regs) &&
(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
restart_syscall = 1; regs->u_regs[UREG_G6] = orig_i0;
} else
restart_syscall = 0;
if (current_thread_info()->status & TS_RESTORE_SIGMASK) if (current_thread_info()->status & TS_RESTORE_SIGMASK)
oldset = &current->saved_sigmask; oldset = &current->saved_sigmask;
...@@ -542,22 +558,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) ...@@ -542,22 +558,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) { if (test_thread_flag(TIF_32BIT)) {
extern void do_signal32(sigset_t *, struct pt_regs *, extern void do_signal32(sigset_t *, struct pt_regs *);
int restart_syscall, do_signal32(oldset, regs);
unsigned long orig_i0);
do_signal32(oldset, regs, restart_syscall, orig_i0);
return; return;
} }
#endif #endif
signr = get_signal_to_deliver(&info, &ka, regs, NULL); signr = get_signal_to_deliver(&info, &ka, regs, NULL);
/* If the debugger messes with the program counter, it clears restart_syscall = 0;
* the software "in syscall" bit, directing us to not perform if (pt_regs_is_syscall(regs) &&
* a syscall restart. (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
*/ restart_syscall = 1;
if (restart_syscall && !pt_regs_is_syscall(regs)) orig_i0 = regs->u_regs[UREG_G6];
restart_syscall = 0; }
if (signr > 0) { if (signr > 0) {
if (restart_syscall) if (restart_syscall)
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/errno.h>
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
#include <asm/fpumacro.h> #include <asm/fpumacro.h>
......
...@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o ...@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
obj-y += fault_$(BITS).o obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o obj-y += init_$(BITS).o
obj-$(CONFIG_SPARC32) += loadmmu.o obj-$(CONFIG_SPARC32) += loadmmu.o
obj-y += generic_$(BITS).o
obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC_LEON)+= leon_mm.o obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
......
/*
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space.
*
* They use a pgprot that sets PAGE_IO and does not check the
* mem_map table as this is independent of normal memory.
*/
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
address += PAGE_SIZE;
offset += PAGE_SIZE;
pte++;
} while (address < end);
}
static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
offset -= address;
do {
pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
if (!pte)
return -ENOMEM;
io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
return 0;
}
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
int error = 0;
pgd_t * dir;
unsigned long beg = from;
unsigned long end = from + size;
struct mm_struct *mm = vma->vm_mm;
int space = GET_IOSPACE(pfn);
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
/* See comment in mm/memory.c remap_pfn_range */
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
vma->vm_pgoff = (offset >> PAGE_SHIFT) |
((unsigned long)space << 28UL);
offset -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
while (from < end) {
pmd_t *pmd = pmd_alloc(mm, dir, from);
error = -ENOMEM;
if (!pmd)
break;
error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
if (error)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
flush_tlb_range(vma, beg, end);
return error;
}
EXPORT_SYMBOL(io_remap_pfn_range);
/*
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/export.h>
#include <linux/pagemap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
/* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space.
*
* They use a pgprot that sets PAGE_IO and does not check the
* mem_map table as this is independent of normal memory.
*/
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
unsigned long address,
unsigned long size,
unsigned long offset, pgprot_t prot,
int space)
{
unsigned long end;
/* clear hack bit that was used as a write_combine side-effect flag */
offset &= ~0x1UL;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t entry;
unsigned long curend = address + PAGE_SIZE;
entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
if (!(address & 0xffff)) {
if (PAGE_SIZE < (4 * 1024 * 1024) &&
!(address & 0x3fffff) &&
!(offset & 0x3ffffe) &&
end >= address + 0x400000) {
entry = mk_pte_io(offset, prot, space,
4 * 1024 * 1024);
curend = address + 0x400000;
offset += 0x400000;
} else if (PAGE_SIZE < (512 * 1024) &&
!(address & 0x7ffff) &&
!(offset & 0x7fffe) &&
end >= address + 0x80000) {
entry = mk_pte_io(offset, prot, space,
512 * 1024 * 1024);
curend = address + 0x80000;
offset += 0x80000;
} else if (PAGE_SIZE < (64 * 1024) &&
!(offset & 0xfffe) &&
end >= address + 0x10000) {
entry = mk_pte_io(offset, prot, space,
64 * 1024);
curend = address + 0x10000;
offset += 0x10000;
} else
offset += PAGE_SIZE;
} else
offset += PAGE_SIZE;
if (pte_write(entry))
entry = pte_mkdirty(entry);
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, address, pte, entry);
address += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
pte++;
} while (address < curend);
} while (address < end);
}
static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
offset -= address;
do {
pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
if (!pte)
return -ENOMEM;
io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
pte_unmap(pte);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
return 0;
}
static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PUD_MASK;
end = address + size;
if (end > PUD_SIZE)
end = PUD_SIZE;
offset -= address;
do {
pmd_t *pmd = pmd_alloc(mm, pud, address);
if (!pud)
return -ENOMEM;
io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
address = (address + PUD_SIZE) & PUD_MASK;
pud++;
} while (address < end);
return 0;
}
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
int error = 0;
pgd_t * dir;
unsigned long beg = from;
unsigned long end = from + size;
struct mm_struct *mm = vma->vm_mm;
int space = GET_IOSPACE(pfn);
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
unsigned long phys_base;
phys_base = offset | (((unsigned long) space) << 32UL);
/* See comment in mm/memory.c remap_pfn_range */
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
vma->vm_pgoff = phys_base >> PAGE_SHIFT;
offset -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
while (from < end) {
pud_t *pud = pud_alloc(mm, dir, from);
error = -ENOMEM;
if (!pud)
break;
error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
if (error)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
flush_tlb_range(vma, beg, end);
return error;
}
EXPORT_SYMBOL(io_remap_pfn_range);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment