Commit 991f6b0a authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/mingo/BK/linux-2.5/

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 58b31cb7 cc3100f3
......@@ -68,7 +68,7 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
unsigned long pages, i, j;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pte_t *pte, pte_entry;
DRM_DEBUG( "%s\n", __FUNCTION__ );
......@@ -144,18 +144,17 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
goto failed;
preempt_disable();
pte = pte_offset_map( pmd, i );
if ( !pte_present( *pte ) ) {
pte_unmap(pte);
preempt_enable();
goto failed;
}
pte = pte_offset_map(pmd, i);
pte_entry = *pte;
pte_unmap(pte);
preempt_enable();
entry->pagelist[j] = pte_page( *pte );
if (!pte_present(pte_entry))
goto failed;
entry->pagelist[j] = pte_page(pte_entry);
SetPageReserved( entry->pagelist[j] );
SetPageReserved(entry->pagelist[j]);
}
request.handle = entry->handle;
......
......@@ -154,7 +154,7 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long i;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pte_t *pte, entry;
struct page *page;
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
......@@ -166,20 +166,22 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
* they need to be virtually contiguous in kernel space.
*/
pgd = pgd_offset_k( i );
if( !pgd_present( *pgd ) ) return NOPAGE_OOM;
if (!pgd_present(*pgd))
goto oom;
pmd = pmd_offset( pgd, i );
if( !pmd_present( *pmd ) ) return NOPAGE_OOM;
if (!pmd_present(*pmd))
goto oom;
preempt_disable();
pte = pte_offset_map( pmd, i );
if( !pte_present( *pte ) ) {
pte_unmap(pte);
preempt_enable();
return NOPAGE_OOM;
}
pte = pte_offset_map(pmd, i);
entry = *pte;
pte_unmap(pte);
preempt_enable();
page = pte_page(*pte);
if (!pte_present(entry))
goto oom;
page = pte_page(entry);
get_page(page);
DRM_DEBUG("shm_nopage 0x%lx\n", address);
......@@ -188,6 +190,8 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
#else
return page;
#endif
oom:
return NOPAGE_OOM;
}
/* Special close routine which deletes map information if we are the last
......
......@@ -177,7 +177,7 @@ static inline unsigned long uvirt_to_bus(unsigned long adr)
{
unsigned long kva, ret;
kva = page_address(vmalloc_to_page(pgd_offset(current->mm, adr), adr));
kva = page_address(vmalloc_to_page(adr));
ret = virt_to_bus((void *)kva);
MDEBUG(printk("uv2b(%lx-->%lx)", adr, ret));
return ret;
......@@ -188,7 +188,7 @@ static inline unsigned long kvirt_to_bus(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = virt_to_bus((void *)kva);
MDEBUG(printk("kv2b(%lx-->%lx)", adr, ret));
return ret;
......@@ -203,7 +203,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = __pa(kva);
MDEBUG(printk("kv2pa(%lx-->%lx)", adr, ret));
return ret;
......
......@@ -145,7 +145,7 @@ static inline unsigned long uvirt_to_bus(unsigned long adr)
{
unsigned long kva, ret;
kva = page_address(vmalloc_to_page(pgd_offset(current->mm, adr), adr));
kva = page_address(vmalloc_to_page(adr));
ret = virt_to_bus((void *)kva);
MDEBUG(printk("uv2b(%lx-->%lx)", adr, ret));
return ret;
......@@ -156,7 +156,7 @@ static inline unsigned long kvirt_to_bus(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = virt_to_bus((void *)kva);
MDEBUG(printk("kv2b(%lx-->%lx)", adr, ret));
return ret;
......@@ -171,7 +171,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = __pa(kva);
MDEBUG(printk("kv2pa(%lx-->%lx)", adr, ret));
return ret;
......
......@@ -189,7 +189,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = __pa(kva);
return ret;
}
......
......@@ -126,7 +126,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr) {
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = __pa(kva);
MDEBUG(printk("kv2pa(%lx-->%lx)\n", adr, ret));
return ret;
......
......@@ -383,7 +383,7 @@ kvirt_to_pa(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = __pa(kva);
return ret;
}
......
......@@ -187,7 +187,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = __pa(kva);
return ret;
}
......
......@@ -92,7 +92,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = __pa(kva);
return ret;
}
......
......@@ -71,7 +71,7 @@ unsigned long usbvideo_kvirt_to_pa(unsigned long adr)
unsigned long va, kva, ret;
va = VMALLOC_VMADDR(adr);
kva = page_address(vmalloc_to_page(pgd_offset_k(va), va));
kva = page_address(vmalloc_to_page(va));
ret = __pa(kva);
MDEBUG(printk("kv2pa(%lx-->%lx)", adr, ret));
return ret;
......
......@@ -342,10 +342,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + __pte_offset(address))
#define pte_offset_map2(dir, address) \
#define pte_offset_map_nested(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + __pte_offset(address))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap2(pte) kunmap_atomic(pte, KM_PTE1)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
/*
* The i386 doesn't have any external MMU info: the kernel page
......
......@@ -514,7 +514,7 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
extern struct page * vmalloc_to_page(pgd_t *pgd, unsigned long adr);
extern struct page * vmalloc_to_page(unsigned long adr);
#endif /* __KERNEL__ */
......
......@@ -261,7 +261,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
if (!dst_pte)
goto nomem;
spin_lock(&src->page_table_lock);
src_pte = pte_offset_map2(src_pmd, address);
src_pte = pte_offset_map_nested(src_pmd, address);
do {
pte_t pte = *src_pte;
struct page *ptepage;
......@@ -295,14 +295,14 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
cont_copy_pte_range: set_pte(dst_pte, pte);
cont_copy_pte_range_noset: address += PAGE_SIZE;
if (address >= end) {
pte_unmap2(src_pte);
pte_unmap_nested(src_pte);
pte_unmap(dst_pte);
goto out_unlock;
}
src_pte++;
dst_pte++;
} while ((unsigned long)src_pte & PTE_TABLE_MASK);
pte_unmap2(src_pte-1);
pte_unmap_nested(src_pte-1);
pte_unmap(dst_pte-1);
spin_unlock(&src->page_table_lock);
......@@ -1475,9 +1475,10 @@ int make_pages_present(unsigned long addr, unsigned long end)
/*
* Map a vmalloc()-space virtual address to the physical page.
*/
struct page * vmalloc_to_page(pgd_t *pgd, unsigned long addr)
struct page * vmalloc_to_page(unsigned long addr)
{
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
pte_t *ptep, pte;
......
......@@ -17,7 +17,7 @@
extern int vm_enough_memory(long pages);
static inline pte_t *get_one_pte_map2(struct mm_struct *mm, unsigned long addr)
static inline pte_t *get_one_pte_map_nested(struct mm_struct *mm, unsigned long addr)
{
pgd_t * pgd;
pmd_t * pmd;
......@@ -41,9 +41,9 @@ static inline pte_t *get_one_pte_map2(struct mm_struct *mm, unsigned long addr)
goto end;
}
pte = pte_offset_map2(pmd, addr);
pte = pte_offset_map_nested(pmd, addr);
if (pte_none(*pte)) {
pte_unmap2(pte);
pte_unmap_nested(pte);
pte = NULL;
}
end:
......@@ -84,11 +84,11 @@ static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned
pte_t *src, *dst;
spin_lock(&mm->page_table_lock);
src = get_one_pte_map2(mm, old_addr);
src = get_one_pte_map_nested(mm, old_addr);
if (src) {
dst = alloc_one_pte_map(mm, new_addr);
error = copy_one_pte(mm, src, dst);
pte_unmap2(src);
pte_unmap_nested(src);
pte_unmap(dst);
}
spin_unlock(&mm->page_table_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment