Commit 28b4af72 authored by Vineet Gupta's avatar Vineet Gupta

ARC: mm: PAE40: switch to using phys_addr_t for physical addresses

That way a single flip of phys_addr_t to 64 bit ensures all places
dealing with physical addresses get correct data
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 29e33226
...@@ -31,10 +31,10 @@ ...@@ -31,10 +31,10 @@
void flush_cache_all(void); void flush_cache_all(void);
void flush_icache_range(unsigned long start, unsigned long end); void flush_icache_range(unsigned long kstart, unsigned long kend);
void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
void __inv_icache_page(unsigned long paddr, unsigned long vaddr); void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
void __flush_dcache_page(unsigned long paddr, unsigned long vaddr); void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
......
...@@ -25,7 +25,7 @@ static int l2_line_sz; ...@@ -25,7 +25,7 @@ static int l2_line_sz;
int ioc_exists; int ioc_exists;
volatile int slc_enable = 1, ioc_enable = 1; volatile int slc_enable = 1, ioc_enable = 1;
void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr, void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int cacheop); unsigned long sz, const int cacheop);
void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
...@@ -216,7 +216,7 @@ void read_decode_cache_bcr(void) ...@@ -216,7 +216,7 @@ void read_decode_cache_bcr(void)
*/ */
static inline static inline
void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op) unsigned long sz, const int op)
{ {
unsigned int aux_cmd; unsigned int aux_cmd;
...@@ -254,7 +254,7 @@ void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, ...@@ -254,7 +254,7 @@ void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
} }
static inline static inline
void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op) unsigned long sz, const int op)
{ {
unsigned int aux_cmd, aux_tag; unsigned int aux_cmd, aux_tag;
...@@ -308,7 +308,7 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, ...@@ -308,7 +308,7 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
* specified in PTAG (similar to MMU v3) * specified in PTAG (similar to MMU v3)
*/ */
static inline static inline
void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr, void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int cacheop) unsigned long sz, const int cacheop)
{ {
unsigned int aux_cmd; unsigned int aux_cmd;
...@@ -412,7 +412,7 @@ static inline void __dc_entire_op(const int op) ...@@ -412,7 +412,7 @@ static inline void __dc_entire_op(const int op)
/* /*
* D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
*/ */
static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op) unsigned long sz, const int op)
{ {
unsigned long flags; unsigned long flags;
...@@ -445,7 +445,7 @@ static inline void __ic_entire_inv(void) ...@@ -445,7 +445,7 @@ static inline void __ic_entire_inv(void)
} }
static inline void static inline void
__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz) unsigned long sz)
{ {
unsigned long flags; unsigned long flags;
...@@ -462,7 +462,7 @@ __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, ...@@ -462,7 +462,7 @@ __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
#else #else
struct ic_inv_args { struct ic_inv_args {
unsigned long paddr, vaddr; phys_addr_t paddr, vaddr;
int sz; int sz;
}; };
...@@ -473,7 +473,7 @@ static void __ic_line_inv_vaddr_helper(void *info) ...@@ -473,7 +473,7 @@ static void __ic_line_inv_vaddr_helper(void *info)
__ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
} }
static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz) unsigned long sz)
{ {
struct ic_inv_args ic_inv = { struct ic_inv_args ic_inv = {
...@@ -494,7 +494,7 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, ...@@ -494,7 +494,7 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
#endif /* CONFIG_ARC_HAS_ICACHE */ #endif /* CONFIG_ARC_HAS_ICACHE */
noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
{ {
#ifdef CONFIG_ISA_ARCV2 #ifdef CONFIG_ISA_ARCV2
/* /*
...@@ -584,7 +584,7 @@ void flush_dcache_page(struct page *page) ...@@ -584,7 +584,7 @@ void flush_dcache_page(struct page *page)
} else if (page_mapped(page)) { } else if (page_mapped(page)) {
/* kernel reading from page with U-mapping */ /* kernel reading from page with U-mapping */
unsigned long paddr = (unsigned long)page_address(page); phys_addr_t paddr = (unsigned long)page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr)) if (addr_not_cache_congruent(paddr, vaddr))
...@@ -732,14 +732,14 @@ EXPORT_SYMBOL(flush_icache_range); ...@@ -732,14 +732,14 @@ EXPORT_SYMBOL(flush_icache_range);
* builtin kernel page will not have any virtual mappings. * builtin kernel page will not have any virtual mappings.
* kprobe on loadable module will be kernel vaddr. * kprobe on loadable module will be kernel vaddr.
*/ */
void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
{ {
__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
__ic_line_inv_vaddr(paddr, vaddr, len); __ic_line_inv_vaddr(paddr, vaddr, len);
} }
/* wrapper to compile time eliminate alignment checks in flush loop */ /* wrapper to compile time eliminate alignment checks in flush loop */
void __inv_icache_page(unsigned long paddr, unsigned long vaddr) void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
{ {
__ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
} }
...@@ -748,7 +748,7 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr) ...@@ -748,7 +748,7 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
* wrapper to clearout kernel or userspace mappings of a page * wrapper to clearout kernel or userspace mappings of a page
* For kernel mappings @vaddr == @paddr * For kernel mappings @vaddr == @paddr
*/ */
void __flush_dcache_page(unsigned long paddr, unsigned long vaddr) void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
{ {
__dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
} }
......
...@@ -499,7 +499,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -499,7 +499,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/* /*
* Routine to create a TLB entry * Routine to create a TLB entry
*/ */
void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{ {
unsigned long flags; unsigned long flags;
unsigned int asid_or_sasid, rwx; unsigned int asid_or_sasid, rwx;
...@@ -535,9 +535,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) ...@@ -535,9 +535,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
local_irq_save(flags); local_irq_save(flags);
tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
address &= PAGE_MASK; vaddr &= PAGE_MASK;
/* update this PTE credentials */ /* update this PTE credentials */
pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
...@@ -547,7 +547,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) ...@@ -547,7 +547,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
/* ASID for this task */ /* ASID for this task */
asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
/* /*
* ARC MMU provides fully orthogonal access bits for K/U mode, * ARC MMU provides fully orthogonal access bits for K/U mode,
...@@ -583,7 +583,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, ...@@ -583,7 +583,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep) pte_t *ptep)
{ {
unsigned long vaddr = vaddr_unaligned & PAGE_MASK; unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
unsigned long paddr = pte_val(*ptep) & PAGE_MASK; phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
struct page *page = pfn_to_page(pte_pfn(*ptep)); struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep); create_tlb(vma, vaddr, ptep);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment