Commit 09195817 authored by Roman Zippel's avatar Roman Zippel Committed by Russell King

[PATCH] replace mk_pte_phys() with pfn_pte()

Here is the patch to replace mk_pte_phys() with pfn_pte() and so creates
the counterpart to pte_pfn().
parent 5528f050
......@@ -421,6 +421,7 @@ irongate_remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -428,17 +429,17 @@ irongate_remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("irongate_remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte,
mk_pte_phys(phys_addr,
set_pte(pte, pfn_pte(pfn,
__pgprot(_PAGE_VALID | _PAGE_ASM |
_PAGE_KRE | _PAGE_KWE | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -250,12 +250,12 @@ callback_init(void * kernel_end)
/* Set up the third level PTEs and update the virtual
addresses of the CRB entries. */
for (i = 0; i < crb->map_entries; ++i) {
unsigned long paddr = crb->map[i].pa;
unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
crb->map[i].va = vaddr;
for (j = 0; j < crb->map[i].count; ++j) {
set_pte(pte_offset_kernel(pmd, vaddr),
mk_pte_phys(paddr, PAGE_KERNEL));
paddr += PAGE_SIZE;
pfn_pte(pfn, PAGE_KERNEL));
pfn++;
vaddr += PAGE_SIZE;
}
}
......
......@@ -138,7 +138,7 @@ void __init memtable_init(struct meminfo *mi)
page_nr = max_low_pfn;
pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
pte[0] = mk_pte_phys(PAGE_OFFSET + 491520, PAGE_READONLY);
pte[0] = pfn_pte((PAGE_OFFSET + 491520) >> PAGE_SHIFT, PAGE_READONLY);
pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);
for (i = 1; i < PTRS_PER_PGD; i++)
......
......@@ -51,7 +51,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, pgprot));
set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pte++;
......
......@@ -43,7 +43,7 @@ static pte_t *minicache_pte;
*/
unsigned long map_page_minicache(unsigned long virt)
{
set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot));
set_pte(minicache_pte, pfn_pte(__pa(virt) >> PAGE_SHIFT, minicache_pgprot));
flush_tlb_kernel_page(minicache_address);
return minicache_address;
......
......@@ -198,7 +198,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
}
ptep = pte_offset_kernel(pmdp, virt);
set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(prot)));
}
/*
......
......@@ -17,6 +17,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -24,16 +25,17 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | __READABLE |
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | __READABLE |
__WRITEABLE | _PAGE_GLOBAL |
_PAGE_KERNEL | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -550,7 +550,7 @@ static void acpi_create_identity_pmd (void)
/* fill page with low mapping */
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED));
set_pte(ptep + i, pfn_pte(i, PAGE_SHARED));
pgd = pgd_offset(current->active_mm, 0);
pmd = pmd_alloc(current->mm,pgd, 0);
......
......@@ -122,7 +122,7 @@ static inline void set_pte_phys (unsigned long vaddr,
}
pte = pte_offset_kernel(pmd, vaddr);
/* <phys,flags> stored as-is, to permit clearing entries */
set_pte(pte, mk_pte_phys(phys, flags));
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
/*
* It's enough to flush this one mapping.
......@@ -239,7 +239,7 @@ static void __init pagetable_init (void)
vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
if (end && (vaddr >= end))
break;
*pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
*pte = pfn_pte(__pa(vaddr) >> PAGE_SHIFT, PAGE_KERNEL);
}
set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
if (pte_base != pte_offset_kernel(pmd, 0))
......@@ -375,7 +375,7 @@ void __init test_wp_bit(void)
pmd = pmd_offset(pgd, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
old_pte = *pte;
*pte = mk_pte_phys(0, PAGE_READONLY);
*pte = pfn_pte(0, PAGE_READONLY);
local_flush_tlb();
boot_cpu_data.wp_works_ok = do_test_wp_bit(vaddr);
......
......@@ -20,6 +20,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -27,15 +28,16 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -268,7 +268,7 @@ efi_map_pal_code (void)
*/
ia64_clear_ic(flags);
ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL)), IA64_GRANULE_SHIFT);
pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT);
local_irq_restore(flags);
ia64_srlz_i();
}
......
......@@ -291,7 +291,7 @@ ia64_mmu_init (void *my_cpu_data)
ia64_srlz_d();
ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL)), PAGE_SHIFT);
pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), PAGE_SHIFT);
__restore_flags(flags);
ia64_srlz_i();
......
......@@ -89,7 +89,7 @@ sgi_mcatest(void)
printk("zzzspec: probe %ld, 0x%lx\n", res, val);
ia64_clear_ic(flags);
ia64_itc(0x2, 0xe00000ff00000000UL,
pte_val(mk_pte_phys(0xff00000000UL,
pte_val(pfn_pte(0xff00000000UL >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW))), _PAGE_SIZE_256M);
local_irq_restore(flags);
ia64_srlz_i ();
......
......@@ -78,7 +78,7 @@ extern void momenco_ocelot_irq_setup(void);
static char reset_reason;
#define ENTRYLO(x) ((pte_val(mk_pte_phys((x), PAGE_KERNEL_UNCACHED)) >> 6)|1)
#define ENTRYLO(x) ((pte_val(pfn_pte((x) >> PAGE_SHIFT, PAGE_KERNEL_UNCACHED)) >> 6)|1)
static void __init setup_l3cache(unsigned long size);
......
......@@ -18,6 +18,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
| __WRITEABLE | flags);
......@@ -27,14 +28,15 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, pgprot));
set_pte(pte, pfn_pte(pfn, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -237,7 +237,7 @@ map_page(unsigned long va, unsigned long pa, int flags)
pg = pte_alloc_kernel(&init_mm, pd, va);
if (pg != 0) {
err = 0;
set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
set_pte(pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
if (mem_init_done)
flush_HPTE(0, va, pmd_val(*pd));
}
......
......@@ -240,7 +240,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
pa = absolute_to_phys(pa);
set_pte(ptep, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
set_pte(ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
spin_unlock(&ioremap_mm.page_table_lock);
} else {
/* If the mm subsystem is not fully up, we cannot create a
......
......@@ -118,9 +118,8 @@ void __init paging_init(void)
pte_t pte;
int i;
unsigned long tmp;
unsigned long address=0;
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
unsigned long end_mem = (unsigned long) __va(max_low_pfn*PAGE_SIZE);
static const int ssm_mask = 0x04000000L;
/* unmap whole virtual address space */
......@@ -136,7 +135,7 @@ void __init paging_init(void)
pg_dir = swapper_pg_dir;
while (address < end_mem) {
while (pfn < max_low_pfn) {
/*
* pg_table is physical at this point
*/
......@@ -149,11 +148,11 @@ void __init paging_init(void)
pg_dir++;
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
pte = mk_pte_phys(address, PAGE_KERNEL);
if (address >= end_mem)
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_clear(&pte);
set_pte(pg_table, pte);
address += PAGE_SIZE;
pfn++;
}
}
......
......@@ -21,6 +21,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -28,15 +29,15 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr,
__pgprot(_PAGE_PRESENT | flags)));
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -116,10 +116,9 @@ void __init paging_init(void)
pte_t * pt_dir;
pte_t pte;
int i,j,k;
unsigned long address=0;
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
unsigned long end_mem = (unsigned long) __va(max_low_pfn*PAGE_SIZE);
static const int ssm_mask = 0x04000000L;
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
......@@ -147,7 +146,7 @@ void __init paging_init(void)
for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
if (address >= end_mem) {
if (pfn >= max_low_pfn) {
pgd_clear(pg_dir);
continue;
}
......@@ -156,7 +155,7 @@ void __init paging_init(void)
pgd_populate(&init_mm, pg_dir, pm_dir);
for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
if (address >= end_mem) {
if (pfn >= max_low_pfn) {
pmd_clear(pm_dir);
continue;
}
......@@ -165,13 +164,13 @@ void __init paging_init(void)
pmd_populate(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
pte = mk_pte_phys(address, PAGE_KERNEL);
if (address >= end_mem) {
pte = mk_pte_phys(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&pte);
continue;
}
set_pte(pt_dir, pte);
address += PAGE_SIZE;
pfn++;
}
}
}
......
......@@ -21,6 +21,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -28,15 +29,15 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr,
__pgprot(_PAGE_PRESENT | flags)));
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -398,7 +398,7 @@ void clear_user_page(void *to, unsigned long address)
pte_t entry;
unsigned long flags;
entry = mk_pte_phys(phys_addr, pgprot);
entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
save_and_cli(flags);
......@@ -437,7 +437,7 @@ void copy_user_page(void *to, void *from, unsigned long address)
pte_t entry;
unsigned long flags;
entry = mk_pte_phys(phys_addr, pgprot);
entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
save_and_cli(flags);
......
......@@ -17,6 +17,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
unsigned long size, unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
......@@ -27,14 +28,15 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, pgprot));
set_pte(pte, pfn_pte(pfn, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -2043,7 +2043,7 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pfn_pte, srmmu_pfn_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
......
......@@ -2526,7 +2526,7 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pfn_pte, sun4c_pfn_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
......
......@@ -125,7 +125,7 @@ static void set_pte_phys(unsigned long vaddr,
pte = pte_offset_kernel(pmd, vaddr);
if (pte_val(*pte))
pte_ERROR(*pte);
set_pte(pte, mk_pte_phys(phys, prot));
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
/*
* It's enough to flush this one mapping.
......
......@@ -20,6 +20,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -27,15 +28,16 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -59,6 +59,6 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low)
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#endif /* _I386_PGTABLE_2LEVEL_H */
......@@ -90,7 +90,7 @@ static inline int pte_same(pte_t a, pte_t b)
#define pte_none(x) (!(x).pte_low && !(x).pte_high)
#define pte_pfn(x) (((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT)))
static inline pte_t __mk_pte(unsigned long page_nr, pgprot_t pgprot)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
pte_t pte;
......
......@@ -229,10 +229,7 @@ static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, &ptep-
* and a page entry and page directory to the page they refer to.
*/
#define mk_pte(page, pgprot) __mk_pte((page) - mem_map, (pgprot))
/* This takes a physical page address that is used by the remapping functions */
#define mk_pte_phys(physpage, pgprot) __mk_pte((physpage) >> PAGE_SHIFT, pgprot)
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
......
......@@ -866,21 +866,22 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned
unsigned long phys_addr, pgprot_t prot)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
pfn = phys_addr >> PAGE_SHIFT;
do {
struct page *page;
pte_t oldpage = ptep_get_and_clear(pte);
unsigned long pfn = phys_addr >> PAGE_SHIFT;
if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
set_pte(pte, mk_pte_phys(phys_addr, prot));
set_pte(pte, pfn_pte(pfn, prot));
forget_pte(oldpage);
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment