Commit 336e2136 authored by Vineet Gupta's avatar Vineet Gupta

ARC: mm: preps ahead of HIGHMEM support

Before we plug in highmem support, some of code needs to be ready for it
 - copy_user_highpage() needs to be using the kmap_atomic API
 - mk_pte() can't assume page_address()
 - do_page_fault() can't assume VMALLOC_END is end of kernel vaddr space
Signed-off-by: default avatarAlexey Brodkin <abrodkin@synopsys.com>
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent d4084645
...@@ -270,13 +270,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) ...@@ -270,13 +270,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
PAGE_SHIFT))) PAGE_SHIFT)))
#define mk_pte(page, pgprot) \ #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
({ \
pte_t pte; \
pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
pte; \
})
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
...@@ -360,7 +354,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -360,7 +354,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
#endif #endif
extern void paging_init(void);
extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep); pte_t *ptep);
......
...@@ -806,8 +806,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, ...@@ -806,8 +806,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
void copy_user_highpage(struct page *to, struct page *from, void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma) unsigned long u_vaddr, struct vm_area_struct *vma)
{ {
unsigned long kfrom = (unsigned long)page_address(from); void *kfrom = kmap_atomic(from);
unsigned long kto = (unsigned long)page_address(to); void *kto = kmap_atomic(to);
int clean_src_k_mappings = 0; int clean_src_k_mappings = 0;
/* /*
...@@ -817,13 +817,16 @@ void copy_user_highpage(struct page *to, struct page *from, ...@@ -817,13 +817,16 @@ void copy_user_highpage(struct page *to, struct page *from,
* *
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
* equally valid for SRC page as well * equally valid for SRC page as well
*
* For !VIPT cache, all of this gets compiled out as
* addr_not_cache_congruent() is 0
*/ */
if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
__flush_dcache_page(kfrom, u_vaddr); __flush_dcache_page((unsigned long)kfrom, u_vaddr);
clean_src_k_mappings = 1; clean_src_k_mappings = 1;
} }
copy_page((void *)kto, (void *)kfrom); copy_page(kto, kfrom);
/* /*
* Mark DST page K-mapping as dirty for a later finalization by * Mark DST page K-mapping as dirty for a later finalization by
...@@ -840,11 +843,14 @@ void copy_user_highpage(struct page *to, struct page *from, ...@@ -840,11 +843,14 @@ void copy_user_highpage(struct page *to, struct page *from,
* sync the kernel mapping back to physical page * sync the kernel mapping back to physical page
*/ */
if (clean_src_k_mappings) { if (clean_src_k_mappings) {
__flush_dcache_page(kfrom, kfrom); __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
set_bit(PG_dc_clean, &from->flags); set_bit(PG_dc_clean, &from->flags);
} else { } else {
clear_bit(PG_dc_clean, &from->flags); clear_bit(PG_dc_clean, &from->flags);
} }
kunmap_atomic(kto);
kunmap_atomic(kfrom);
} }
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
......
...@@ -18,7 +18,14 @@ ...@@ -18,7 +18,14 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/mmu.h> #include <asm/mmu.h>
static int handle_vmalloc_fault(unsigned long address) /*
* kernel virtual address is required to implement vmalloc/pkmap/fixmap
* Refer to asm/processor.h for System Memory Map
*
* It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
* from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
*/
noinline static int handle_kernel_vaddr_fault(unsigned long address)
{ {
/* /*
* Synchronize this task's top level page-table * Synchronize this task's top level page-table
...@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) ...@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
* only copy the information from the master page table, * only copy the information from the master page table,
* nothing more. * nothing more.
*/ */
if (address >= VMALLOC_START && address <= VMALLOC_END) { if (address >= VMALLOC_START) {
ret = handle_vmalloc_fault(address); ret = handle_kernel_vaddr_fault(address);
if (unlikely(ret)) if (unlikely(ret))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment