Commit 9edef286 authored by Paul Mundt's avatar Paul Mundt

sh: uncached mapping helpers.

This adds some helper routines for uncached mapping support. This
simplifies some of the cases where we need to check the uncached mapping
boundaries in addition to giving us a centralized location for building
more complex manipulation on top of.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 51becfd9
...@@ -50,13 +50,22 @@ extern unsigned long shm_align_mask; ...@@ -50,13 +50,22 @@ extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn; extern unsigned long max_low_pfn, min_low_pfn;
extern unsigned long memory_start, memory_end; extern unsigned long memory_start, memory_end;
#ifdef CONFIG_UNCACHED_MAPPING
extern unsigned long uncached_start, uncached_end;
extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#endif
static inline unsigned long static inline unsigned long
pages_do_alias(unsigned long addr1, unsigned long addr2) pages_do_alias(unsigned long addr1, unsigned long addr2)
{ {
return (addr1 ^ addr2) & shm_align_mask; return (addr1 ^ addr2) & shm_align_mask;
} }
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from); extern void copy_page(void *to, void *from);
...@@ -135,6 +144,14 @@ typedef struct page *pgtable_t; ...@@ -135,6 +144,14 @@ typedef struct page *pgtable_t;
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#endif #endif
#ifdef CONFIG_UNCACHED_MAPPING
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
#else
#define UNCAC_ADDR(addr) ((addr))
#define CAC_ADDR(addr) ((addr))
#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
......
...@@ -139,15 +139,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs) ...@@ -139,15 +139,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
{ {
unsigned long pc = instruction_pointer(regs); unsigned long pc = instruction_pointer(regs);
#ifdef CONFIG_UNCACHED_MAPPING if (virt_addr_uncached(pc))
/* return CAC_ADDR(pc);
* If PC points in to the uncached mapping, fix it up and hand
* back the cached equivalent.
*/
if ((pc >= (memory_start + cached_to_uncached)) &&
(pc < (memory_start + cached_to_uncached + uncached_size)))
pc -= cached_to_uncached;
#endif
return pc; return pc;
} }
......
...@@ -152,6 +152,7 @@ ENTRY(_stext) ...@@ -152,6 +152,7 @@ ENTRY(_stext)
mov #0, r10 mov #0, r10
#ifdef CONFIG_UNCACHED_MAPPING
/* /*
* Uncached mapping * Uncached mapping
*/ */
...@@ -171,6 +172,7 @@ ENTRY(_stext) ...@@ -171,6 +172,7 @@ ENTRY(_stext)
add r4, r1 add r4, r1
add r4, r3 add r4, r3
add #1, r10 add #1, r10
#endif
/* /*
* Iterate over all of the available sizes from largest to * Iterate over all of the available sizes from largest to
...@@ -216,6 +218,7 @@ ENTRY(_stext) ...@@ -216,6 +218,7 @@ ENTRY(_stext)
__PMB_ITER_BY_SIZE(64) __PMB_ITER_BY_SIZE(64)
__PMB_ITER_BY_SIZE(16) __PMB_ITER_BY_SIZE(16)
#ifdef CONFIG_UNCACHED_MAPPING
/* /*
* Now that we can access it, update cached_to_uncached and * Now that we can access it, update cached_to_uncached and
* uncached_size. * uncached_size.
...@@ -228,6 +231,7 @@ ENTRY(_stext) ...@@ -228,6 +231,7 @@ ENTRY(_stext)
shll16 r7 shll16 r7
shll8 r7 shll8 r7
mov.l r7, @r0 mov.l r7, @r0
#endif
/* /*
* Clear the remaining PMB entries. * Clear the remaining PMB entries.
...@@ -306,7 +310,9 @@ ENTRY(stack_start) ...@@ -306,7 +310,9 @@ ENTRY(stack_start)
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
.LMMUCR: .long MMUCR .LMMUCR: .long MMUCR
.LMEMORY_SIZE: .long __MEMORY_SIZE
#ifdef CONFIG_UNCACHED_MAPPING
.Lcached_to_uncached: .long cached_to_uncached .Lcached_to_uncached: .long cached_to_uncached
.Luncached_size: .long uncached_size .Luncached_size: .long uncached_size
.LMEMORY_SIZE: .long __MEMORY_SIZE #endif
#endif #endif
...@@ -36,6 +36,7 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o ...@@ -36,6 +36,7 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
# Special flags for fault_64.o. This puts restrictions on the number of # Special flags for fault_64.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file. # caller-save registers that the compiler can target when building this file.
......
...@@ -26,21 +26,6 @@ ...@@ -26,21 +26,6 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD]; pgd_t swapper_pg_dir[PTRS_PER_PGD];
#ifdef CONFIG_UNCACHED_MAPPING
/*
* This is the offset of the uncached section from its cached alias.
*
* Legacy platforms handle trivial transitions between cached and
* uncached segments by making use of the 1:1 mapping relationship in
* 512MB lowmem, others via a special uncached mapping.
*
* Default value only valid in 29 bit mode, in 32bit mode this will be
* updated by the early PMB initialization code.
*/
unsigned long cached_to_uncached = 0x20000000;
unsigned long uncached_size = SZ_512M;
#endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
static pte_t *__get_pte_phys(unsigned long addr) static pte_t *__get_pte_phys(unsigned long addr)
{ {
...@@ -260,7 +245,7 @@ void __init mem_init(void) ...@@ -260,7 +245,7 @@ void __init mem_init(void)
memset(empty_zero_page, 0, PAGE_SIZE); memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE);
/* Initialize the vDSO */ uncached_init();
vsyscall_init(); vsyscall_init();
codesize = (unsigned long) &_etext - (unsigned long) &_text; codesize = (unsigned long) &_etext - (unsigned long) &_text;
...@@ -303,9 +288,7 @@ void __init mem_init(void) ...@@ -303,9 +288,7 @@ void __init mem_init(void)
((unsigned long)high_memory - (unsigned long)memory_start) >> 20, ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
#ifdef CONFIG_UNCACHED_MAPPING #ifdef CONFIG_UNCACHED_MAPPING
(unsigned long)memory_start + cached_to_uncached, uncached_start, uncached_end, uncached_size >> 20,
(unsigned long)memory_start + cached_to_uncached + uncached_size,
uncached_size >> 20,
#endif #endif
(unsigned long)&__init_begin, (unsigned long)&__init_end, (unsigned long)&__init_begin, (unsigned long)&__init_end,
......
#include <linux/init.h>
#include <asm/sizes.h>
#include <asm/page.h>
/*
* This is the offset of the uncached section from its cached alias.
*
* Legacy platforms handle trivial transitions between cached and
* uncached segments by making use of the 1:1 mapping relationship in
* 512MB lowmem, others via a special uncached mapping.
*
* Default value only valid in 29 bit mode, in 32bit mode this will be
* updated by the early PMB initialization code.
*/
unsigned long cached_to_uncached = SZ_512M;
unsigned long uncached_size = SZ_512M;
unsigned long uncached_start, uncached_end;
int virt_addr_uncached(unsigned long kaddr)
{
return (kaddr >= uncached_start) && (kaddr < uncached_end);
}
void __init uncached_init(void)
{
uncached_start = memory_end;
uncached_end = uncached_start + uncached_size;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment