Commit 0501bce1 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://fbdev.bkbits.net:8080/fbdev-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 2c799718 09195817
......@@ -421,6 +421,7 @@ irongate_remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -428,17 +429,17 @@ irongate_remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("irongate_remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte,
mk_pte_phys(phys_addr,
__pgprot(_PAGE_VALID | _PAGE_ASM |
_PAGE_KRE | _PAGE_KWE | flags)));
set_pte(pte, pfn_pte(pfn,
__pgprot(_PAGE_VALID | _PAGE_ASM |
_PAGE_KRE | _PAGE_KWE | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -250,12 +250,12 @@ callback_init(void * kernel_end)
/* Set up the third level PTEs and update the virtual
addresses of the CRB entries. */
for (i = 0; i < crb->map_entries; ++i) {
unsigned long paddr = crb->map[i].pa;
unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
crb->map[i].va = vaddr;
for (j = 0; j < crb->map[i].count; ++j) {
set_pte(pte_offset_kernel(pmd, vaddr),
mk_pte_phys(paddr, PAGE_KERNEL));
paddr += PAGE_SIZE;
pfn_pte(pfn, PAGE_KERNEL));
pfn++;
vaddr += PAGE_SIZE;
}
}
......
......@@ -138,7 +138,7 @@ void __init memtable_init(struct meminfo *mi)
page_nr = max_low_pfn;
pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
pte[0] = mk_pte_phys(PAGE_OFFSET + 491520, PAGE_READONLY);
pte[0] = pfn_pte((PAGE_OFFSET + 491520) >> PAGE_SHIFT, PAGE_READONLY);
pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);
for (i = 1; i < PTRS_PER_PGD; i++)
......
......@@ -150,8 +150,8 @@ static void __free_small_page(unsigned long spage, struct order *order)
unsigned long flags;
struct page *page;
page = virt_to_page(spage);
if (VALID_PAGE(page)) {
if (virt_addr_valid(spage)) {
page = virt_to_page(spage);
/*
* The container-page must be marked Reserved
......
......@@ -240,9 +240,13 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page)
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
struct page *page = pte_page(pte);
unsigned long pfn = pte_pfn(pte);
struct page *page;
if (VALID_PAGE(page) && page->mapping) {
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (page->mapping) {
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page);
......
......@@ -51,7 +51,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, pgprot));
set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pte++;
......
......@@ -43,7 +43,7 @@ static pte_t *minicache_pte;
*/
unsigned long map_page_minicache(unsigned long virt)
{
set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot));
set_pte(minicache_pte, pfn_pte(__pa(virt) >> PAGE_SHIFT, minicache_pgprot));
flush_tlb_kernel_page(minicache_address);
return minicache_address;
......
......@@ -198,7 +198,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
}
ptep = pte_offset_kernel(pmdp, virt);
set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(prot)));
}
/*
......
......@@ -17,6 +17,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -24,16 +25,17 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | __READABLE |
__WRITEABLE | _PAGE_GLOBAL |
_PAGE_KERNEL | flags)));
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | __READABLE |
__WRITEABLE | _PAGE_GLOBAL |
_PAGE_KERNEL | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -550,7 +550,7 @@ static void acpi_create_identity_pmd (void)
/* fill page with low mapping */
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED));
set_pte(ptep + i, pfn_pte(i, PAGE_SHARED));
pgd = pgd_offset(current->active_mm, 0);
pmd = pmd_alloc(current->mm,pgd, 0);
......
......@@ -122,7 +122,7 @@ static inline void set_pte_phys (unsigned long vaddr,
}
pte = pte_offset_kernel(pmd, vaddr);
/* <phys,flags> stored as-is, to permit clearing entries */
set_pte(pte, mk_pte_phys(phys, flags));
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
/*
* It's enough to flush this one mapping.
......@@ -239,7 +239,7 @@ static void __init pagetable_init (void)
vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
if (end && (vaddr >= end))
break;
*pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
*pte = pfn_pte(__pa(vaddr) >> PAGE_SHIFT, PAGE_KERNEL);
}
set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
if (pte_base != pte_offset_kernel(pmd, 0))
......@@ -375,7 +375,7 @@ void __init test_wp_bit(void)
pmd = pmd_offset(pgd, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
old_pte = *pte;
*pte = mk_pte_phys(0, PAGE_READONLY);
*pte = pfn_pte(0, PAGE_READONLY);
local_flush_tlb();
boot_cpu_data.wp_works_ok = do_test_wp_bit(vaddr);
......
......@@ -20,6 +20,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -27,15 +28,16 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -268,7 +268,7 @@ efi_map_pal_code (void)
*/
ia64_clear_ic(flags);
ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL)), IA64_GRANULE_SHIFT);
pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT);
local_irq_restore(flags);
ia64_srlz_i();
}
......
......@@ -109,6 +109,7 @@ free_initmem (void)
void
free_initrd_mem (unsigned long start, unsigned long end)
{
struct page *page;
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
......@@ -147,11 +148,12 @@ free_initrd_mem (unsigned long start, unsigned long end)
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
if (!VALID_PAGE(virt_to_page(start)))
if (!virt_addr_valid(start))
continue;
clear_bit(PG_reserved, &virt_to_page(start)->flags);
set_page_count(virt_to_page(start), 1);
free_page(start);
page = virt_to_page(start);
clear_bit(PG_reserved, &page->flags);
set_page_count(page, 1);
__free_page(page);
++totalram_pages;
}
}
......@@ -289,7 +291,7 @@ ia64_mmu_init (void *my_cpu_data)
ia64_srlz_d();
ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL)), PAGE_SHIFT);
pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), PAGE_SHIFT);
__restore_flags(flags);
ia64_srlz_i();
......
......@@ -89,7 +89,7 @@ sgi_mcatest(void)
printk("zzzspec: probe %ld, 0x%lx\n", res, val);
ia64_clear_ic(flags);
ia64_itc(0x2, 0xe00000ff00000000UL,
pte_val(mk_pte_phys(0xff00000000UL,
pte_val(pfn_pte(0xff00000000UL >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW))), _PAGE_SIZE_256M);
local_irq_restore(flags);
ia64_srlz_i ();
......
......@@ -78,7 +78,7 @@ extern void momenco_ocelot_irq_setup(void);
static char reset_reason;
#define ENTRYLO(x) ((pte_val(mk_pte_phys((x), PAGE_KERNEL_UNCACHED)) >> 6)|1)
#define ENTRYLO(x) ((pte_val(pfn_pte((x) >> PAGE_SHIFT, PAGE_KERNEL_UNCACHED)) >> 6)|1)
static void __init setup_l3cache(unsigned long size);
......
......@@ -18,6 +18,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
| __WRITEABLE | flags);
......@@ -27,14 +28,15 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, pgprot));
set_pte(pte, pfn_pte(pfn, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -116,8 +116,12 @@ void *vmalloc_uncached (unsigned long size)
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
unsigned long pfn = pte_pfn(page);
struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return;
__free_page(ptpage);
if (current->mm->rss <= 0)
......
......@@ -115,8 +115,12 @@ void *vmalloc_uncached (unsigned long size)
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
unsigned long pfn = pte_pfn(page);
struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return;
__free_page(ptpage);
if (current->mm->rss <= 0)
......
......@@ -237,7 +237,7 @@ map_page(unsigned long va, unsigned long pa, int flags)
pg = pte_alloc_kernel(&init_mm, pd, va);
if (pg != 0) {
err = 0;
set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
set_pte(pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
if (mem_init_done)
flush_HPTE(0, va, pmd_val(*pd));
}
......
......@@ -240,7 +240,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
pa = absolute_to_phys(pa);
set_pte(ptep, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
set_pte(ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
spin_unlock(&ioremap_mm.page_table_lock);
} else {
/* If the mm subsystem is not fully up, we cannot create a
......
......@@ -118,9 +118,8 @@ void __init paging_init(void)
pte_t pte;
int i;
unsigned long tmp;
unsigned long address=0;
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
unsigned long end_mem = (unsigned long) __va(max_low_pfn*PAGE_SIZE);
static const int ssm_mask = 0x04000000L;
/* unmap whole virtual address space */
......@@ -136,7 +135,7 @@ void __init paging_init(void)
pg_dir = swapper_pg_dir;
while (address < end_mem) {
while (pfn < max_low_pfn) {
/*
* pg_table is physical at this point
*/
......@@ -149,11 +148,11 @@ void __init paging_init(void)
pg_dir++;
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
pte = mk_pte_phys(address, PAGE_KERNEL);
if (address >= end_mem)
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_clear(&pte);
set_pte(pg_table, pte);
address += PAGE_SIZE;
pfn++;
}
}
......
......@@ -21,6 +21,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -28,15 +29,15 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr,
__pgprot(_PAGE_PRESENT | flags)));
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -116,10 +116,9 @@ void __init paging_init(void)
pte_t * pt_dir;
pte_t pte;
int i,j,k;
unsigned long address=0;
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
unsigned long end_mem = (unsigned long) __va(max_low_pfn*PAGE_SIZE);
static const int ssm_mask = 0x04000000L;
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
......@@ -147,7 +146,7 @@ void __init paging_init(void)
for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
if (address >= end_mem) {
if (pfn >= max_low_pfn) {
pgd_clear(pg_dir);
continue;
}
......@@ -156,7 +155,7 @@ void __init paging_init(void)
pgd_populate(&init_mm, pg_dir, pm_dir);
for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
if (address >= end_mem) {
if (pfn >= max_low_pfn) {
pmd_clear(pm_dir);
continue;
}
......@@ -165,13 +164,13 @@ void __init paging_init(void)
pmd_populate(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
pte = mk_pte_phys(address, PAGE_KERNEL);
if (address >= end_mem) {
pte = mk_pte_phys(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&pte);
continue;
}
set_pte(pt_dir, pte);
address += PAGE_SIZE;
pfn++;
}
}
}
......
......@@ -21,6 +21,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -28,15 +29,15 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr,
__pgprot(_PAGE_PRESENT | flags)));
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -398,7 +398,7 @@ void clear_user_page(void *to, unsigned long address)
pte_t entry;
unsigned long flags;
entry = mk_pte_phys(phys_addr, pgprot);
entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
save_and_cli(flags);
......@@ -437,7 +437,7 @@ void copy_user_page(void *to, void *from, unsigned long address)
pte_t entry;
unsigned long flags;
entry = mk_pte_phys(phys_addr, pgprot);
entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
save_and_cli(flags);
......
......@@ -290,6 +290,7 @@ void update_mmu_cache(struct vm_area_struct * vma,
unsigned long vpn;
#if defined(__SH4__)
struct page *page;
unsigned long pfn;
unsigned long ptea;
#endif
......@@ -298,11 +299,14 @@ void update_mmu_cache(struct vm_area_struct * vma,
return;
#if defined(__SH4__)
page = pte_page(pte);
if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
__set_bit(PG_mapped, &page->flags);
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!test_bit(PG_mapped, &page->flags)) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
__set_bit(PG_mapped, &page->flags);
}
}
#endif
......
......@@ -17,6 +17,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
unsigned long size, unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
......@@ -27,14 +28,15 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, pgprot));
set_pte(pte, pfn_pte(pfn, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -19,8 +19,12 @@ static inline void forget_pte(pte_t page)
if (pte_none(page))
return;
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
unsigned long pfn = pte_pfn(page);
struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return;
page_cache_release(ptpage);
return;
......
......@@ -2043,7 +2043,7 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pfn_pte, srmmu_pfn_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
......
......@@ -1327,7 +1327,7 @@ static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus
unsigned long page;
page = ((unsigned long)bufptr) & PAGE_MASK;
if (!VALID_PAGE(virt_to_page(page))) {
if (!virt_addr_valid(page)) {
sun4c_flush_page(page);
return (__u32)bufptr; /* already locked */
}
......@@ -2106,7 +2106,7 @@ static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); }
static int sun4c_pmd_bad(pmd_t pmd)
{
return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
(!VALID_PAGE(virt_to_page(pmd_val(pmd)))));
(!virt_addr_valid(pmd_val(pmd))));
}
static int sun4c_pmd_present(pmd_t pmd)
......@@ -2526,7 +2526,7 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pfn_pte, sun4c_pfn_pte, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
......
......@@ -1312,10 +1312,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
}
if (recoverable) {
struct page *page = virt_to_page(__va(afar));
if (VALID_PAGE(page))
get_page(page);
if (pfn_valid(afar >> PAGE_SHIFT))
get_page(pfn_to_page(afar >> PAGE_SHIFT));
else
recoverable = 0;
......
......@@ -20,8 +20,12 @@ static inline void forget_pte(pte_t page)
if (pte_none(page))
return;
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
unsigned long pfn = pte_pfn(page);
struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(page);
if (PageReserved(ptpage))
return;
page_cache_release(ptpage);
return;
......
......@@ -187,11 +187,13 @@ extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long addre
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct page *page = pte_page(pte);
struct page *page;
unsigned long pfn;
unsigned long pg_flags;
if (VALID_PAGE(page) &&
page->mapping &&
pfn = pte_pfn(pte);
if (pfn_valid(pfn) &&
(page = pfn_to_page(pfn), page->mapping) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
......@@ -260,10 +262,14 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
continue;
if (pte_present(pte) && pte_dirty(pte)) {
struct page *page = pte_page(pte);
struct page *page;
unsigned long pgaddr, uaddr;
unsigned long pfn = pte_pfn(pte);
if (!VALID_PAGE(page) || PageReserved(page) || !page->mapping)
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (PageReserved(page) || !page->mapping)
continue;
pgaddr = (unsigned long) page_address(page);
uaddr = address + offset;
......
......@@ -125,7 +125,7 @@ static void set_pte_phys(unsigned long vaddr,
pte = pte_offset_kernel(pmd, vaddr);
if (pte_val(*pte))
pte_ERROR(*pte);
set_pte(pte, mk_pte_phys(phys, prot));
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
/*
* It's enough to flush this one mapping.
......
......@@ -20,6 +20,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
......@@ -27,15 +28,16 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......
......@@ -751,6 +751,37 @@ CONFIG_IDEDMA_ONLYDISK
Generally say N here.
CONFIG_BLK_DEV_IDE_TCQ
Support for tagged command queueing on ATA disk drives. This enables
the IDE layer to have multiple in-flight requests on hardware that
supports it. For now this includes the IBM Deskstar series drives,
such as the 22GXP, 75GXP, 40GV, 60GXP, and 120GXP (ie any Deskstar made
in the last couple of years), and at least some of the Western
Digital drives in the Expert series (by nature of really being IBM
drives).
If you have such a drive, say Y here.
CONFIG_BLK_DEV_IDE_TCQ_DEPTH
Maximum size of commands to enable per-drive. Any value between 1
and 32 is valid, with 32 being the maxium that the hardware supports.
You probably just want the default of 32 here. If you enter an invalid
number, the default value will be used.
CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
Enabled tagged command queueing unconditionally on drives that report
support for it. Regardless of the chosen value here, tagging can be
controlled at run time:
echo "using_tcq:32" > /proc/ide/hdX/settings
where any value between 1-32 selects chosen queue depth and enables
TCQ, and 0 disables it. hdparm version 4.7 an above also support
TCQ manipulations.
Generally say Y here.
CONFIG_BLK_DEV_IT8172
Say Y here to support the on-board IDE controller on the Integrated
Technology Express, Inc. ITE8172 SBC. Vendor page at
......
......@@ -47,6 +47,11 @@ if [ "$CONFIG_BLK_DEV_IDE" != "n" ]; then
dep_bool ' Use PCI DMA by default when available' CONFIG_IDEDMA_PCI_AUTO $CONFIG_BLK_DEV_IDEDMA_PCI
dep_bool ' Enable DMA only for disks ' CONFIG_IDEDMA_ONLYDISK $CONFIG_IDEDMA_PCI_AUTO
define_bool CONFIG_BLK_DEV_IDEDMA $CONFIG_BLK_DEV_IDEDMA_PCI
dep_bool ' ATA tagged command queueing (EXPERIMENTAL)' CONFIG_BLK_DEV_IDE_TCQ $CONFIG_BLK_DEV_IDEDMA_PCI $CONFIG_EXPERIMENTAL
dep_bool ' TCQ on by default' CONFIG_BLK_DEV_IDE_TCQ_DEFAULT $CONFIG_BLK_DEV_IDE_TCQ
if [ "$CONFIG_BLK_DEV_IDE_TCQ" != "n" ]; then
int ' Default queue depth' CONFIG_BLK_DEV_IDE_TCQ_DEPTH 32
fi
dep_bool ' Good-Bad DMA Model-Firmware (EXPERIMENTAL)' CONFIG_IDEDMA_NEW_DRIVE_LISTINGS $CONFIG_EXPERIMENTAL
dep_bool ' AEC62XX chipset support' CONFIG_BLK_DEV_AEC62XX $CONFIG_BLK_DEV_IDEDMA_PCI
dep_mbool ' AEC62XX Tuning support' CONFIG_AEC62XX_TUNING $CONFIG_BLK_DEV_AEC62XX
......
......@@ -44,6 +44,7 @@ ide-obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
ide-obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
ide-obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
ide-obj-$(CONFIG_BLK_DEV_IDEDMA_PCI) += ide-dma.o
ide-obj-$(CONFIG_BLK_DEV_IDE_TCQ) += tcq.o
ide-obj-$(CONFIG_BLK_DEV_IDEPCI) += ide-pci.o
ide-obj-$(CONFIG_BLK_DEV_ISAPNP) += ide-pnp.o
ide-obj-$(CONFIG_BLK_DEV_IDE_PMAC) += ide-pmac.o
......
......@@ -249,14 +249,14 @@ static int config_chipset_for_dma (ide_drive_t *drive, byte ultra)
ide_dma_off_quietly);
}
static int config_drive_xfer_rate (ide_drive_t *drive)
static int config_drive_xfer_rate(struct ata_device *drive, struct request *rq)
{
struct hd_driveid *id = drive->id;
ide_dma_action_t dma_func = ide_dma_on;
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (ide_dmaproc(ide_dma_bad_drive, drive)) {
if (ide_dmaproc(ide_dma_bad_drive, drive, rq)) {
dma_func = ide_dma_off;
goto fast_ata_pio;
}
......@@ -278,7 +278,7 @@ static int config_drive_xfer_rate (ide_drive_t *drive)
if (dma_func != ide_dma_on)
goto no_dma_set;
}
} else if (ide_dmaproc(ide_dma_good_drive, drive)) {
} else if (ide_dmaproc(ide_dma_good_drive, drive, rq)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
......@@ -301,7 +301,7 @@ static int config_drive_xfer_rate (ide_drive_t *drive)
dma_func = ide_dma_off;
#endif /* CONFIG_HPT34X_AUTODMA */
return drive->channel->dmaproc(dma_func, drive);
return drive->channel->udma(dma_func, drive, rq);
}
/*
......@@ -312,7 +312,7 @@ static int config_drive_xfer_rate (ide_drive_t *drive)
* by HighPoint|Triones Technologies, Inc.
*/
int hpt34x_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
int hpt34x_dmaproc (ide_dma_action_t func, struct ata_device *drive, struct request *rq)
{
struct ata_channel *hwif = drive->channel;
unsigned long dma_base = hwif->dma_base;
......@@ -321,7 +321,7 @@ int hpt34x_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
switch (func) {
case ide_dma_check:
return config_drive_xfer_rate(drive);
return config_drive_xfer_rate(drive, rq);
case ide_dma_read:
reading = 1 << 3;
case ide_dma_write:
......@@ -347,7 +347,7 @@ int hpt34x_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
default:
break;
}
return ide_dmaproc(func, drive); /* use standard DMA stuff */
return ide_dmaproc(func, drive, rq); /* use standard DMA stuff */
}
#endif /* CONFIG_BLK_DEV_IDEDMA */
......@@ -423,7 +423,7 @@ void __init ide_init_hpt34x(struct ata_channel *hwif)
else
hwif->autodma = 0;
hwif->dmaproc = &hpt34x_dmaproc;
hwif->udma = &hpt34x_dmaproc;
hwif->highmem = 1;
} else {
hwif->drives[0].autotune = 1;
......
......@@ -346,8 +346,6 @@ static int n_hpt_devs;
static unsigned int pci_rev_check_hpt3xx(struct pci_dev *dev);
static unsigned int pci_rev2_check_hpt3xx(struct pci_dev *dev);
byte hpt366_proc = 0;
byte hpt363_shared_irq;
byte hpt363_shared_pin;
extern char *ide_xfer_verbose (byte xfer_rate);
#if defined(DISPLAY_HPT366_TIMINGS) && defined(CONFIG_PROC_FS)
......
/*
* Copyright (C) 1994-1998 Linus Torvalds and authors:
/***** vi:set ts=8 sts=8 sw=8:************************************************
*
* Copyright (C) 1994-1998,2002 Linus Torvalds and authors:
*
* Mark Lord <mlord@pobox.com>
* Gadi Oxman <gadio@netvision.net.il>
* Andre Hedrick <andre@linux-ide.org>
* Jens Axboe <axboe@suse.de>
* Marcin Dalecki <dalecki@evision.ag>
* Mark Lord <mlord@pobox.com>
* Gadi Oxman <gadio@netvision.net.il>
* Andre Hedrick <andre@linux-ide.org>
* Jens Axboe <axboe@suse.de>
* Marcin Dalecki <martin@dalecki.de>
*
* This is the ATA disk device driver, as evolved from hd.c and ide.c.
*/
......@@ -98,6 +99,8 @@ static u8 get_command(ide_drive_t *drive, int cmd)
if (lba48bit) {
if (cmd == READ) {
if (drive->using_tcq)
return WIN_READDMA_QUEUED_EXT;
if (drive->using_dma)
return WIN_READDMA_EXT;
else if (drive->mult_count)
......@@ -105,6 +108,8 @@ static u8 get_command(ide_drive_t *drive, int cmd)
else
return WIN_READ_EXT;
} else if (cmd == WRITE) {
if (drive->using_tcq)
return WIN_WRITEDMA_QUEUED_EXT;
if (drive->using_dma)
return WIN_WRITEDMA_EXT;
else if (drive->mult_count)
......@@ -114,6 +119,8 @@ static u8 get_command(ide_drive_t *drive, int cmd)
}
} else {
if (cmd == READ) {
if (drive->using_tcq)
return WIN_READDMA_QUEUED;
if (drive->using_dma)
return WIN_READDMA;
else if (drive->mult_count)
......@@ -121,6 +128,8 @@ static u8 get_command(ide_drive_t *drive, int cmd)
else
return WIN_READ;
} else if (cmd == WRITE) {
if (drive->using_tcq)
return WIN_WRITEDMA_QUEUED;
if (drive->using_dma)
return WIN_WRITEDMA;
else if (drive->mult_count)
......@@ -148,7 +157,11 @@ static ide_startstop_t chs_do_request(struct ata_device *drive, struct request *
memset(&args, 0, sizeof(args));
args.taskfile.sector_count = sectors;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.taskfile.sector_count = rq->tag << 3;
} else
args.taskfile.sector_count = sectors;
args.taskfile.sector_number = sect;
args.taskfile.low_cylinder = cyl;
......@@ -184,7 +197,12 @@ static ide_startstop_t lba28_do_request(struct ata_device *drive, struct request
memset(&args, 0, sizeof(args));
args.taskfile.sector_count = sectors;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.taskfile.sector_count = rq->tag << 3;
} else
args.taskfile.sector_count = sectors;
args.taskfile.sector_number = block;
args.taskfile.low_cylinder = (block >>= 8);
......@@ -226,8 +244,14 @@ static ide_startstop_t lba48_do_request(struct ata_device *drive, struct request
memset(&args, 0, sizeof(args));
args.taskfile.sector_count = sectors;
args.hobfile.sector_count = sectors >> 8;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.hobfile.feature = sectors >> 8;
args.taskfile.sector_count = rq->tag << 3;
} else {
args.taskfile.sector_count = sectors;
args.hobfile.sector_count = sectors >> 8;
}
args.taskfile.sector_number = block; /* low lba */
args.taskfile.low_cylinder = (block >>= 8); /* mid lba */
......@@ -285,6 +309,30 @@ static ide_startstop_t idedisk_do_request(struct ata_device *drive, struct reque
return promise_rw_disk(drive, rq, block);
}
/*
* start a tagged operation
*/
if (drive->using_tcq) {
unsigned long flags;
int ret;
spin_lock_irqsave(&ide_lock, flags);
ret = blk_queue_start_tag(&drive->queue, rq);
if (ata_pending_commands(drive) > drive->max_depth)
drive->max_depth = ata_pending_commands(drive);
if (ata_pending_commands(drive) > drive->max_last_depth)
drive->max_last_depth = ata_pending_commands(drive);
spin_unlock_irqrestore(&ide_lock, flags);
if (ret) {
BUG_ON(!ata_pending_commands(drive));
return ide_started;
}
}
/* 48-bit LBA */
if ((drive->id->cfs_enable_2 & 0x0400) && (drive->addressing))
return lba48_do_request(drive, rq, block);
......@@ -542,11 +590,61 @@ static int proc_idedisk_read_smart_values
PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
}
#ifdef CONFIG_BLK_DEV_IDE_TCQ
static int proc_idedisk_read_tcq
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
ide_drive_t *drive = (ide_drive_t *) data;
char *out = page;
int len, cmds, i;
unsigned long flags;
if (!blk_queue_tagged(&drive->queue)) {
len = sprintf(out, "not configured\n");
PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
spin_lock_irqsave(&ide_lock, flags);
len = sprintf(out, "TCQ currently on:\t%s\n", drive->using_tcq ? "yes" : "no");
len += sprintf(out+len, "Max queue depth:\t%d\n",drive->queue_depth);
len += sprintf(out+len, "Max achieved depth:\t%d\n",drive->max_depth);
len += sprintf(out+len, "Max depth since last:\t%d\n",drive->max_last_depth);
len += sprintf(out+len, "Current depth:\t\t%d\n", ata_pending_commands(drive));
len += sprintf(out+len, "Active tags:\t\t[ ");
for (i = 0, cmds = 0; i < drive->queue_depth; i++) {
struct request *rq = blk_queue_tag_request(&drive->queue, i);
if (!rq)
continue;
len += sprintf(out+len, "%d, ", i);
cmds++;
}
len += sprintf(out+len, "]\n");
len += sprintf(out+len, "Queue:\t\t\treleased [ %lu ] - started [ %lu ]\n", drive->immed_rel, drive->immed_comp);
if (ata_pending_commands(drive) != cmds)
len += sprintf(out+len, "pending request and queue count mismatch (counted: %d)\n", cmds);
len += sprintf(out+len, "DMA status:\t\t%srunning\n", test_bit(IDE_DMA, &HWGROUP(drive)->flags) ? "" : "not ");
drive->max_last_depth = 0;
spin_unlock_irqrestore(&ide_lock, flags);
PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
#endif
static ide_proc_entry_t idedisk_proc[] = {
{ "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL },
{ "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL },
{ "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_smart_values, NULL },
{ "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL },
#ifdef CONFIG_BLK_DEV_IDE_TCQ
{ "tcq", S_IFREG|S_IRUSR, proc_idedisk_read_tcq, NULL },
#endif
{ NULL, 0, NULL, NULL }
};
......@@ -633,6 +731,32 @@ static int set_acoustic(ide_drive_t *drive, int arg)
return 0;
}
#ifdef CONFIG_BLK_DEV_IDE_TCQ
static int set_using_tcq(ide_drive_t *drive, int arg)
{
if (!drive->driver)
return -EPERM;
if (!drive->channel->udma)
return -EPERM;
if (arg == drive->queue_depth && drive->using_tcq)
return 0;
/*
* set depth, but check also id for max supported depth
*/
drive->queue_depth = arg ? arg : 1;
if (drive->id) {
if (drive->queue_depth > drive->id->queue_depth + 1)
drive->queue_depth = drive->id->queue_depth + 1;
}
if (drive->channel->udma(arg ? ide_dma_queued_on : ide_dma_queued_off, drive, NULL))
return -EIO;
return 0;
}
#endif
static int probe_lba_addressing (ide_drive_t *drive, int arg)
{
drive->addressing = 0;
......@@ -664,6 +788,9 @@ static void idedisk_add_settings(ide_drive_t *drive)
ide_add_setting(drive, "acoustic", SETTING_RW, HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic);
ide_add_setting(drive, "failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL);
ide_add_setting(drive, "max_failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL);
#ifdef CONFIG_BLK_DEV_IDE_TCQ
ide_add_setting(drive, "using_tcq", SETTING_RW, HDIO_GET_QDMA, HDIO_SET_QDMA, TYPE_BYTE, 0, IDE_MAX_TAG, 1, 1, &drive->using_tcq, set_using_tcq);
#endif
}
static int idedisk_suspend(struct device *dev, u32 state, u32 level)
......
......@@ -522,6 +522,32 @@ static void ide_toggle_bounce(ide_drive_t *drive, int on)
blk_queue_bounce_limit(&drive->queue, addr);
}
int ide_start_dma(ide_dma_action_t func, struct ata_device *drive)
{
struct ata_channel *hwif = drive->channel;
unsigned long dma_base = hwif->dma_base;
unsigned int reading = 0;
if (rq_data_dir(HWGROUP(drive)->rq) == READ)
reading = 1 << 3;
/* active tuning based on IO direction */
if (hwif->rwproc)
hwif->rwproc(drive, func);
/*
* try PIO instead of DMA
*/
if (!ide_build_dmatable(drive, func))
return 1;
outl(hwif->dmatable_dma, dma_base + 4); /* PRD table */
outb(reading, dma_base); /* specify r/w */
outb(inb(dma_base+2)|6, dma_base+2); /* clear INTR & ERROR flags */
drive->waiting_for_dma = 1;
return 0;
}
/*
* This initiates/aborts DMA read/write operations on a drive.
*
......@@ -543,7 +569,7 @@ int ide_dmaproc(ide_dma_action_t func, struct ata_device *drive, struct request
struct ata_channel *hwif = drive->channel;
unsigned long dma_base = hwif->dma_base;
byte unit = (drive->select.b.unit & 0x01);
unsigned int count, reading = 0, set_high = 1;
unsigned int reading = 0, set_high = 1;
byte dma_stat;
switch (func) {
......@@ -552,27 +578,27 @@ int ide_dmaproc(ide_dma_action_t func, struct ata_device *drive, struct request
case ide_dma_off_quietly:
set_high = 0;
outb(inb(dma_base+2) & ~(1<<(5+unit)), dma_base+2);
#ifdef CONFIG_BLK_DEV_IDE_TCQ
hwif->udma(ide_dma_queued_off, drive, rq);
#endif
case ide_dma_on:
ide_toggle_bounce(drive, set_high);
drive->using_dma = (func == ide_dma_on);
if (drive->using_dma)
if (drive->using_dma) {
outb(inb(dma_base+2)|(1<<(5+unit)), dma_base+2);
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
hwif->udma(ide_dma_queued_on, drive, rq);
#endif
}
return 0;
case ide_dma_check:
return config_drive_for_dma (drive);
case ide_dma_read:
reading = 1 << 3;
case ide_dma_write:
/* active tuning based on IO direction */
if (hwif->rwproc)
hwif->rwproc(drive, func);
if (!(count = ide_build_dmatable(drive, func)))
return 1; /* try PIO instead of DMA */
outl(hwif->dmatable_dma, dma_base + 4); /* PRD table */
outb(reading, dma_base); /* specify r/w */
outb(inb(dma_base+2)|6, dma_base+2); /* clear INTR & ERROR flags */
drive->waiting_for_dma = 1;
if (ide_start_dma(func, drive))
return 1;
if (drive->type != ATA_DISK)
return 0;
......@@ -587,6 +613,14 @@ int ide_dmaproc(ide_dma_action_t func, struct ata_device *drive, struct request
OUT_BYTE(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
}
return drive->channel->udma(ide_dma_begin, drive, NULL);
#ifdef CONFIG_BLK_DEV_IDE_TCQ
case ide_dma_queued_on:
case ide_dma_queued_off:
case ide_dma_read_queued:
case ide_dma_write_queued:
case ide_dma_queued_start:
return ide_tcq_dmaproc(func, drive, rq);
#endif
case ide_dma_begin:
/* Note that this is done *after* the cmd has
* been issued to the drive, as per the BM-IDE spec.
......
/*
* Copyright (c) 1998-2000 Andre Hedrick <andre@linux-ide.org>
* Copyright (c) 1995-1998 Mark Lord
/**** vi:set ts=8 sts=8 sw=8:************************************************
*
* Copyright (C) 2002 Marcin Dalecki <martin@dalecki.de>
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 1995-1998 Mark Lord
*
* May be copied or modified under the terms of the GNU General Public License
*/
......@@ -81,17 +83,10 @@ extern void ide_init_hpt34x(struct ata_channel *);
#endif
#ifdef CONFIG_BLK_DEV_HPT366
extern byte hpt363_shared_irq;
extern byte hpt363_shared_pin;
extern unsigned int pci_init_hpt366(struct pci_dev *);
extern unsigned int ata66_hpt366(struct ata_channel *);
extern void ide_init_hpt366(struct ata_channel *);
extern void ide_dmacapable_hpt366(struct ata_channel *, unsigned long);
#else
/* FIXME: those have to be killed */
static byte hpt363_shared_irq;
static byte hpt363_shared_pin;
#endif
#ifdef CONFIG_BLK_DEV_NS87415
......@@ -177,7 +172,7 @@ typedef struct ide_pci_enablebit_s {
#define ATA_F_PHACK 0x40 /* apply PROMISE hacks */
#define ATA_F_HPTHACK 0x80 /* apply HPT366 hacks */
typedef struct ide_pci_device_s {
struct ata_pci_device {
unsigned short vendor;
unsigned short device;
unsigned int (*init_chipset)(struct pci_dev *dev);
......@@ -188,9 +183,9 @@ typedef struct ide_pci_device_s {
unsigned int bootable;
unsigned int extra;
unsigned int flags;
} ide_pci_device_t;
};
static ide_pci_device_t pci_chipsets[] __initdata = {
static struct ata_pci_device pci_chipsets[] __initdata = {
#ifdef CONFIG_BLK_DEV_PIIX
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_1, pci_init_piix, ata66_piix, ide_init_piix, ide_dmacapable_piix, {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, ON_BOARD, 0, 0 },
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_1, pci_init_piix, ata66_piix, ide_init_piix, ide_dmacapable_piix, {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, ON_BOARD, 0, 0 },
......@@ -319,7 +314,7 @@ static ide_pci_device_t pci_chipsets[] __initdata = {
* settings of split-mirror pci-config space, place chipset into init-mode,
* and/or preserve an interrupt if the card is not native ide support.
*/
static unsigned int __init trust_pci_irq(ide_pci_device_t *d, struct pci_dev *dev)
static unsigned int __init trust_pci_irq(struct ata_pci_device *d, struct pci_dev *dev)
{
if (d->flags & ATA_F_IRQ)
return dev->irq;
......@@ -484,7 +479,7 @@ static unsigned long __init get_dma_base(struct ata_channel *hwif, int extra, co
* Setup DMA transfers on a channel.
*/
static void __init setup_channel_dma(struct ata_channel *hwif, struct pci_dev *dev,
ide_pci_device_t *d,
struct ata_pci_device *d,
int port,
u8 class_rev,
int pciirq,
......@@ -534,7 +529,7 @@ static void __init setup_channel_dma(struct ata_channel *hwif, struct pci_dev *d
* This gets called once for the master and for the slave interface.
*/
static int __init setup_host_channel(struct pci_dev *dev,
ide_pci_device_t *d,
struct ata_pci_device *d,
int port,
u8 class_rev,
int pciirq,
......@@ -648,17 +643,16 @@ static int __init setup_host_channel(struct pci_dev *dev,
}
/*
* Looks at the primary/secondary channels on a PCI IDE device and, if they
* are enabled, prepares the IDE driver for use with them. This generic code
* works for most PCI chipsets.
* Looks at the primary/secondary channels on a PCI IDE device and, if they are
* enabled, prepares the IDE driver for use with them. This generic code works
* for most PCI chipsets.
*
* One thing that is not standardized is the location of the primary/secondary
* interface "enable/disable" bits. For chipsets that we "know" about, this
* information is in the ide_pci_device_t struct; for all other chipsets, we
* just assume both interfaces are enabled.
* information is in the struct ata_pci_device struct; for all other chipsets,
* we just assume both interfaces are enabled.
*/
static void __init setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d)
static void __init setup_pci_device(struct pci_dev *dev, struct ata_pci_device *d)
{
int autodma = 0;
int pciirq = 0;
......@@ -775,10 +769,11 @@ static void __init setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d)
setup_host_channel(dev, d, ATA_SECONDARY, class_rev, pciirq, autodma, &pcicmd);
}
static void __init pdc20270_device_order_fixup (struct pci_dev *dev, ide_pci_device_t *d)
static void __init pdc20270_device_order_fixup (struct pci_dev *dev, struct ata_pci_device *d)
{
struct pci_dev *dev2 = NULL, *findev;
ide_pci_device_t *d2;
struct pci_dev *dev2 = NULL;
struct pci_dev *findev;
struct ata_pci_device *d2;
if (dev->bus->self &&
dev->bus->self->vendor == PCI_VENDOR_ID_DEC &&
......@@ -814,10 +809,10 @@ static void __init pdc20270_device_order_fixup (struct pci_dev *dev, ide_pci_dev
setup_pci_device(dev2, d2);
}
static void __init hpt366_device_order_fixup (struct pci_dev *dev, ide_pci_device_t *d)
static void __init hpt366_device_order_fixup (struct pci_dev *dev, struct ata_pci_device *d)
{
struct pci_dev *dev2 = NULL, *findev;
ide_pci_device_t *d2;
struct ata_pci_device *d2;
unsigned char pin1 = 0, pin2 = 0;
unsigned int class_rev;
......@@ -843,9 +838,7 @@ static void __init hpt366_device_order_fixup (struct pci_dev *dev, ide_pci_devic
(PCI_FUNC(findev->devfn) & 1)) {
dev2 = findev;
pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2);
hpt363_shared_pin = (pin1 != pin2) ? 1 : 0;
hpt363_shared_irq = (dev->irq == dev2->irq) ? 1 : 0;
if (hpt363_shared_pin && hpt363_shared_irq) {
if ((pin1 != pin2) && (dev->irq == dev2->irq)) {
d->bootable = ON_BOARD;
printk("%s: onboard version of chipset, pin1=%d pin2=%d\n", dev->name, pin1, pin2);
}
......@@ -869,7 +862,7 @@ static void __init scan_pcidev(struct pci_dev *dev)
{
unsigned short vendor;
unsigned short device;
ide_pci_device_t *d;
struct ata_pci_device *d;
vendor = dev->vendor;
device = dev->device;
......@@ -881,7 +874,7 @@ static void __init scan_pcidev(struct pci_dev *dev)
++d;
if (d->init_channel == ATA_PCI_IGNORE)
printk("%s: has been ignored by PCI bus scan\n", dev->name);
printk(KERN_INFO "ATA: %s: ignored by PCI bus scan\n", dev->name);
else if ((d->vendor == PCI_VENDOR_ID_OPTI && d->device == PCI_DEVICE_ID_OPTI_82C558) && !(PCI_FUNC(dev->devfn) & 1))
return;
else if ((d->vendor == PCI_VENDOR_ID_CONTAQ && d->device == PCI_DEVICE_ID_CONTAQ_82C693) && (!(PCI_FUNC(dev->devfn) & 1) || !((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)))
......@@ -896,10 +889,10 @@ static void __init scan_pcidev(struct pci_dev *dev)
pdc20270_device_order_fixup(dev, d);
else if (!(d->vendor == 0 && d->device == 0) || (dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
if (d->vendor == 0 && d->device == 0)
printk("%s: unknown IDE controller on PCI slot %s, vendor=%04x, device=%04x\n",
dev->name, dev->slot_name, vendor, device);
printk(KERN_INFO "ATA: unknown ATA interface %s (%04x:%04x) on PCI slot %s\n",
dev->name, vendor, device, dev->slot_name);
else
printk("%s: IDE controller on PCI slot %s\n", dev->name, dev->slot_name);
printk(KERN_INFO "ATA: interface %s on PCI slot %s\n", dev->name, dev->slot_name);
setup_pci_device(dev, d);
}
}
......
This diff is collapsed.
......@@ -456,11 +456,39 @@ ide_startstop_t ata_taskfile(ide_drive_t *drive,
if (args->prehandler != NULL)
return args->prehandler(drive, rq);
} else {
/* for dma commands we down set the handler */
if (drive->using_dma &&
!(drive->channel->udma(((args->taskfile.command == WIN_WRITEDMA)
|| (args->taskfile.command == WIN_WRITEDMA_EXT))
? ide_dma_write : ide_dma_read, drive, rq)));
ide_dma_action_t dma_act;
int tcq = 0;
if (!drive->using_dma)
return ide_started;
/* for dma commands we don't set the handler */
if (args->taskfile.command == WIN_WRITEDMA || args->taskfile.command == WIN_WRITEDMA_EXT)
dma_act = ide_dma_write;
else if (args->taskfile.command == WIN_READDMA || args->taskfile.command == WIN_READDMA_EXT)
dma_act = ide_dma_read;
else if (args->taskfile.command == WIN_WRITEDMA_QUEUED || args->taskfile.command == WIN_WRITEDMA_QUEUED_EXT) {
tcq = 1;
dma_act = ide_dma_write_queued;
} else if (args->taskfile.command == WIN_READDMA_QUEUED || args->taskfile.command == WIN_READDMA_QUEUED_EXT) {
tcq = 1;
dma_act = ide_dma_read_queued;
} else {
printk("ata_taskfile: unknown command %x\n", args->taskfile.command);
return ide_stopped;
}
/*
* FIXME: this is a gross hack, need to unify tcq dma proc and
* regular dma proc -- basically split stuff that needs to act
* on a request from things like ide_dma_check etc.
*/
if (tcq)
return drive->channel->udma(dma_act, drive, rq);
else {
if (drive->channel->udma(dma_act, drive, rq))
return ide_stopped;
}
}
return ide_started;
......@@ -523,7 +551,7 @@ ide_startstop_t task_no_data_intr(struct ata_device *drive, struct request *rq)
ide__sti(); /* local CPU only */
if (!OK_STAT(stat = GET_STAT(), READY_STAT, BAD_STAT)) {
/* Keep quite for NOP becouse they are expected to fail. */
/* Keep quiet for NOP because it is expected to fail. */
if (args && args->taskfile.command != WIN_NOP)
return ide_error(drive, "task_no_data_intr", stat);
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include <asm/system.h>
#include <asm/io.h>
......@@ -40,7 +41,7 @@
#include "scsi.h"
#include "hosts.h"
#include<linux/stat.h>
#include <linux/stat.h>
#ifndef LINUX_VERSION_CODE
#include <linux/version.h>
......
......@@ -41,7 +41,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <asm/uaccess.h>
......
......@@ -374,8 +374,8 @@ static int __init root_nfs_getport(int program, int version, int proto)
{
struct sockaddr_in sin;
printk(KERN_NOTICE "Looking up port of RPC %d/%d on %s\n",
program, version, in_ntoa(servaddr));
printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n",
program, version, NIPQUAD(servaddr));
set_sockaddr(&sin, servaddr, 0);
return rpc_getport_external(&sin, program, version, proto);
}
......
......@@ -416,6 +416,7 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
do {
pte_t page = *pte;
struct page *ptpage;
unsigned long pfn;
address += PAGE_SIZE;
pte++;
......@@ -424,8 +425,11 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
++*total;
if (!pte_present(page))
continue;
ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
pfn = pte_pfn(page);
if (!pfn_valid(pfn))
continue;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
continue;
++*pages;
if (pte_dirty(page))
......
......@@ -657,12 +657,12 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
goto failed;
}
if (uspi->s_fsize < 512) {
printk("ufs_read_super: fragment size %u is too small\n"
printk("ufs_read_super: fragment size %u is too small\n",
uspi->s_fsize);
goto failed;
}
if (uspi->s_fsize > 4096) {
printk("ufs_read_super: fragment size %u is too large\n"
printk("ufs_read_super: fragment size %u is too large\n",
uspi->s_fsize);
goto failed;
}
......@@ -672,12 +672,12 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
goto failed;
}
if (uspi->s_bsize < 4096) {
printk("ufs_read_super: block size %u is too small\n"
printk("ufs_read_super: block size %u is too small\n",
uspi->s_fsize);
goto failed;
}
if (uspi->s_bsize / uspi->s_fsize > 8) {
printk("ufs_read_super: too many fragments per block (%u)\n"
printk("ufs_read_super: too many fragments per block (%u)\n",
uspi->s_bsize / uspi->s_fsize);
goto failed;
}
......
......@@ -102,7 +102,7 @@ unsigned long get_wchan(struct task_struct *p);
unsigned long eip = 0; \
unsigned long regs = (unsigned long)user_regs(tsk); \
if (regs > PAGE_SIZE && \
VALID_PAGE(virt_to_page(regs))) \
virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->irp; \
eip; })
......
......@@ -131,8 +131,12 @@ static __inline__ int get_order(unsigned long size)
#define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -56,8 +56,9 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
}
#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0))
#define pte_same(a, b) ((a).pte_low == (b).pte_low)
#define pte_page(x) (mem_map+((unsigned long)(((x).pte_low >> PAGE_SHIFT))))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low)
#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#endif /* _I386_PGTABLE_2LEVEL_H */
......@@ -86,10 +86,11 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}
#define pte_page(x) (mem_map+(((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT))))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low && !(x).pte_high)
#define pte_pfn(x) (((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT)))
static inline pte_t __mk_pte(unsigned long page_nr, pgprot_t pgprot)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
pte_t pte;
......
......@@ -229,10 +229,7 @@ static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, &ptep-
* and a page entry and page directory to the page they refer to.
*/
#define mk_pte(page, pgprot) __mk_pte((page) - mem_map, (pgprot))
/* This takes a physical page address that is used by the remapping functions */
#define mk_pte_phys(physpage, pgprot) __mk_pte((physpage) >> PAGE_SHIFT, pgprot)
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
......
......@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *p);
({ \
unsigned long eip = 0; \
if ((tsk)->thread.esp0 > PAGE_SIZE && \
(VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \
(virt_addr_valid((tsk)->thread.esp0))) \
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
......
......@@ -105,10 +105,13 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
pte_clear(ptep);
if (!pte_not_present(pte)) {
struct page *page = pte_page(pte);
if (VALID_PAGE(page)&&
(!page->mapping || !(page->mapping->i_mmap_shared)))
__clear_bit(PG_mapped, &page->flags);
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(page);
if (!page->mapping || !page->mapping->i_mmap_shared)
__clear_bit(PG_mapped, &page->flags);
}
}
return pte;
}
......
......@@ -140,7 +140,6 @@ typedef unsigned char byte; /* used everywhere */
*/
#define PRD_BYTES 8
#define PRD_ENTRIES (PAGE_SIZE / (2 * PRD_BYTES))
#define PRD_SEGMENTS 32
/*
* Some more useful definitions
......@@ -298,6 +297,7 @@ struct ata_device {
u8 tune_req; /* requested drive tuning setting */
byte using_dma; /* disk is using dma for read/write */
byte using_tcq; /* disk is using queueing */
byte retry_pio; /* retrying dma capable host in pio */
byte state; /* retry state */
byte dsc_overlap; /* flag: DSC overlap */
......@@ -360,9 +360,17 @@ struct ata_device {
byte dn; /* now wide spread use */
byte wcache; /* status of write cache */
byte acoustic; /* acoustic management */
byte queue_depth; /* max queue depth */
unsigned int failures; /* current failure count */
unsigned int max_failures; /* maximum allowed failure count */
struct device device; /* global device tree handle */
/*
* tcq statistics
*/
unsigned long immed_rel;
unsigned long immed_comp;
int max_last_depth;
int max_depth;
} ide_drive_t;
/*
......@@ -381,7 +389,10 @@ typedef enum { ide_dma_read, ide_dma_write, ide_dma_begin,
ide_dma_off, ide_dma_off_quietly, ide_dma_test_irq,
ide_dma_bad_drive, ide_dma_good_drive,
ide_dma_verbose, ide_dma_retune,
ide_dma_lostirq, ide_dma_timeout
ide_dma_lostirq, ide_dma_timeout,
ide_dma_read_queued, ide_dma_write_queued,
ide_dma_queued_start, ide_dma_queued_on,
ide_dma_queued_off,
} ide_dma_action_t;
enum {
......@@ -400,7 +411,7 @@ struct ata_channel {
#ifdef CONFIG_BLK_DEV_IDEPCI
struct pci_dev *pci_dev; /* for pci chipsets */
#endif
ide_drive_t drives[MAX_DRIVES]; /* drive info */
struct ata_device drives[MAX_DRIVES]; /* drive info */
struct gendisk *gd; /* gendisk structure */
/*
......@@ -409,32 +420,32 @@ struct ata_channel {
* A value of 255 indicates that the function should choose the optimal
* mode itself.
*/
void (*tuneproc) (ide_drive_t *, byte pio);
int (*speedproc) (ide_drive_t *, byte pio);
void (*tuneproc) (struct ata_device *, byte pio);
int (*speedproc) (struct ata_device *, byte pio);
/* tweaks hardware to select drive */
void (*selectproc) (ide_drive_t *);
void (*selectproc) (struct ata_device *);
/* routine to reset controller after a disk reset */
void (*resetproc) (ide_drive_t *);
void (*resetproc) (struct ata_device *);
/* special interrupt handling for shared pci interrupts */
void (*intrproc) (ide_drive_t *);
void (*intrproc) (struct ata_device *);
/* special host masking for drive selection */
void (*maskproc) (ide_drive_t *, int);
void (*maskproc) (struct ata_device *, int);
/* adjust timing based upon rq->cmd direction */
void (*rwproc) (ide_drive_t *, ide_dma_action_t);
void (*rwproc) (struct ata_device *, ide_dma_action_t);
/* check host's drive quirk list */
int (*quirkproc) (ide_drive_t *);
int (*quirkproc) (struct ata_device *);
/* CPU-polled transfer routines */
void (*ata_read)(ide_drive_t *, void *, unsigned int);
void (*ata_write)(ide_drive_t *, void *, unsigned int);
void (*atapi_read)(ide_drive_t *, void *, unsigned int);
void (*atapi_write)(ide_drive_t *, void *, unsigned int);
void (*ata_read)(struct ata_device *, void *, unsigned int);
void (*ata_write)(struct ata_device *, void *, unsigned int);
void (*atapi_read)(struct ata_device *, void *, unsigned int);
void (*atapi_write)(struct ata_device *, void *, unsigned int);
int (*udma)(ide_dma_action_t, struct ata_device *, struct request *); /* dma read/write/abort routine */
unsigned int *dmatable_cpu; /* dma physical region descriptor table (cpu view) */
......@@ -462,6 +473,7 @@ struct ata_channel {
unsigned highmem : 1; /* can do full 32-bit dma */
unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
unsigned no_unmask : 1; /* disallow setting unmask bit */
unsigned auto_poll : 1; /* supports nop auto-poll */
byte io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
byte unmask; /* flag: okay to unmask other irqs */
byte slow; /* flag: slow data port */
......@@ -470,7 +482,7 @@ struct ata_channel {
unsigned long last_time; /* time when previous rq was done */
#endif
byte straight8; /* Alan's straight 8 check */
int (*busproc)(ide_drive_t *, int); /* driver soft-power interface */
int (*busproc)(struct ata_device *, int); /* driver soft-power interface */
byte bus_state; /* power state of the IDE bus */
};
......@@ -501,6 +513,29 @@ struct ata_taskfile;
#define IDE_SLEEP 1
#define IDE_DMA 2 /* DMA in progress */
#define IDE_MAX_TAG 32
#ifdef CONFIG_BLK_DEV_IDE_TCQ
static inline int ata_pending_commands(struct ata_device *drive)
{
if (drive->using_tcq)
return blk_queue_tag_depth(&drive->queue);
return 0;
}
static inline int ata_can_queue(struct ata_device *drive)
{
if (drive->using_tcq)
return blk_queue_tag_queue(&drive->queue);
return 1;
}
#else
#define ata_pending_commands(drive) (0)
#define ata_can_queue(drive) (1)
#endif
typedef struct hwgroup_s {
ide_startstop_t (*handler)(struct ata_device *, struct request *); /* irq handler, if active */
unsigned long flags; /* BUSY, SLEEPING */
......@@ -746,14 +781,14 @@ struct ata_taskfile {
ide_startstop_t (*handler)(struct ata_device *, struct request *);
};
extern void ata_read(ide_drive_t *drive, void *buffer, unsigned int wcount);
extern void ata_write(ide_drive_t *drive, void *buffer, unsigned int wcount);
extern void ata_read(struct ata_device *, void *, unsigned int);
extern void ata_write(struct ata_device *, void *, unsigned int);
extern void atapi_read(ide_drive_t *drive, void *buffer, unsigned int bytecount);
extern void atapi_write(ide_drive_t *drive, void *buffer, unsigned int bytecount);
extern void atapi_read(struct ata_device *, void *, unsigned int);
extern void atapi_write(struct ata_device *, void *, unsigned int);
extern ide_startstop_t ata_taskfile(ide_drive_t *drive,
struct ata_taskfile *args, struct request *rq);
extern ide_startstop_t ata_taskfile(struct ata_device *,
struct ata_taskfile *, struct request *);
/*
* Special Flagged Register Validation Caller
......@@ -838,9 +873,9 @@ extern int idefloppy_init (void);
extern int idescsi_init (void);
#endif
ide_drive_t *ide_scan_devices (byte media, const char *name, struct ata_operations *driver, int n);
extern int ide_register_subdriver(ide_drive_t *drive, struct ata_operations *driver);
extern int ide_unregister_subdriver(ide_drive_t *drive);
extern struct ata_device *ide_scan_devices(byte, const char *, struct ata_operations *, int);
extern int ide_register_subdriver(struct ata_device *, struct ata_operations *);
extern int ide_unregister_subdriver(struct ata_device *drive);
#ifdef CONFIG_BLK_DEV_IDEPCI
# define ON_BOARD 1
......@@ -854,21 +889,22 @@ extern int ide_unregister_subdriver(ide_drive_t *drive);
void __init ide_scan_pcibus(int scan_direction);
#endif
#ifdef CONFIG_BLK_DEV_IDEDMA
int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func);
void ide_destroy_dmatable (ide_drive_t *drive);
extern int ide_build_dmatable(struct ata_device *, ide_dma_action_t);
extern void ide_destroy_dmatable(struct ata_device *);
extern ide_startstop_t ide_dma_intr(struct ata_device *, struct request *);
int check_drive_lists (ide_drive_t *drive, int good_bad);
int ide_dmaproc (ide_dma_action_t func, struct ata_device *drive, struct request *);
extern void ide_release_dma(struct ata_channel *hwif);
extern void ide_setup_dma(struct ata_channel *hwif,
unsigned long dmabase, unsigned int num_ports) __init;
extern int check_drive_lists(struct ata_device *, int good_bad);
extern int ide_dmaproc(ide_dma_action_t func, struct ata_device *, struct request *);
extern ide_startstop_t ide_tcq_dmaproc(ide_dma_action_t, struct ata_device *, struct request *);
extern void ide_release_dma(struct ata_channel *);
extern void ide_setup_dma(struct ata_channel *, unsigned long, unsigned int) __init;
extern int ide_start_dma(ide_dma_action_t, struct ata_device *);
#endif
extern spinlock_t ide_lock;
#define DRIVE_LOCK(drive) ((drive)->queue.queue_lock)
extern int drive_is_ready(ide_drive_t *drive);
extern int drive_is_ready(struct ata_device *drive);
extern void revalidate_drives(void);
#endif
......@@ -374,7 +374,7 @@ static void __init change_floppy(char *fmt, ...)
va_start(args, fmt);
vsprintf(buf, fmt, args);
va_end(args);
fd = open("/dev/root", O_RDWR, 0);
fd = open("/dev/root", O_RDWR | O_NDELAY, 0);
if (fd >= 0) {
sys_ioctl(fd, FDEJECT, 0);
close(fd);
......
......@@ -76,8 +76,12 @@ mem_map_t * mem_map;
*/
void __free_pte(pte_t pte)
{
struct page *page = pte_page(pte);
if ((!VALID_PAGE(page)) || PageReserved(page))
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (PageReserved(page))
return;
if (pte_dirty(pte))
set_page_dirty(page);
......@@ -269,6 +273,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
do {
pte_t pte = *src_pte;
struct page *ptepage;
unsigned long pfn;
/* copy_one_pte */
......@@ -278,9 +283,11 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
swap_duplicate(pte_to_swp_entry(pte));
goto cont_copy_pte_range;
}
ptepage = pte_page(pte);
if ((!VALID_PAGE(ptepage)) ||
PageReserved(ptepage))
pfn = pte_pfn(pte);
if (!pfn_valid(pfn))
goto cont_copy_pte_range;
ptepage = pfn_to_page(pfn);
if (PageReserved(ptepage))
goto cont_copy_pte_range;
/* If it's a COW mapping, write protect it both in the parent and the child */
......@@ -356,9 +363,13 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
if (pte_none(pte))
continue;
if (pte_present(pte)) {
struct page *page = pte_page(pte);
if (VALID_PAGE(page) && !PageReserved(page))
freed ++;
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page))
freed++;
}
/* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset);
} else {
......@@ -451,6 +462,7 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
unsigned long pfn;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || pgd_bad(*pgd))
......@@ -472,8 +484,11 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
preempt_enable();
if (pte_present(pte)) {
if (!write ||
(pte_write(pte) && pte_dirty(pte)))
return pte_page(pte);
(pte_write(pte) && pte_dirty(pte))) {
pfn = pte_pfn(pte);
if (pfn_valid(pfn))
return pfn_to_page(pfn);
}
}
out:
......@@ -488,8 +503,6 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
static inline struct page * get_page_map(struct page *page)
{
if (!VALID_PAGE(page))
return 0;
return page;
}
......@@ -853,22 +866,22 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned
unsigned long phys_addr, pgprot_t prot)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
pfn = phys_addr >> PAGE_SHIFT;
do {
struct page *page;
pte_t oldpage;
oldpage = ptep_get_and_clear(pte);
pte_t oldpage = ptep_get_and_clear(pte);
page = virt_to_page(__va(phys_addr));
if ((!VALID_PAGE(page)) || PageReserved(page))
set_pte(pte, mk_pte_phys(phys_addr, prot));
if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
set_pte(pte, pfn_pte(pfn, prot));
forget_pte(oldpage);
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
......@@ -977,10 +990,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
{
struct page *old_page, *new_page;
unsigned long pfn = pte_pfn(pte);
old_page = pte_page(pte);
if (!VALID_PAGE(old_page))
if (!pfn_valid(pfn))
goto bad_wp_page;
old_page = pfn_to_page(pfn);
if (!TestSetPageLocked(old_page)) {
int reuse = can_share_swap_page(old_page);
......
......@@ -26,10 +26,14 @@ static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
pte_t pte = *ptep;
if (pte_present(pte) && pte_dirty(pte)) {
struct page *page = pte_page(pte);
if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
flush_tlb_page(vma, address);
set_page_dirty(page);
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
flush_tlb_page(vma, address);
set_page_dirty(page);
}
}
}
return 0;
......
......@@ -101,8 +101,6 @@ static void __free_pages_ok (struct page *page, unsigned int order)
BUG();
if (page->mapping)
BUG();
if (!VALID_PAGE(page))
BUG();
if (PageLocked(page))
BUG();
if (PageLRU(page))
......@@ -295,8 +293,6 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
BUG();
if (page->mapping)
BUG();
if (!VALID_PAGE(page))
BUG();
if (PageLocked(page))
BUG();
if (PageLRU(page))
......@@ -477,8 +473,10 @@ void __free_pages(struct page *page, unsigned int order)
void free_pages(unsigned long addr, unsigned int order)
{
if (addr != 0)
if (addr != 0) {
BUG_ON(!virt_addr_valid(addr));
__free_pages(virt_to_page(addr), order);
}
}
/*
......
......@@ -1415,15 +1415,16 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
#if DEBUG
# define CHECK_NR(pg) \
do { \
if (!VALID_PAGE(pg)) { \
if (!virt_addr_valid(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
} \
} while (0)
# define CHECK_PAGE(page) \
# define CHECK_PAGE(addr) \
do { \
CHECK_NR(page); \
struct page *page = virt_to_page(addr); \
CHECK_NR(addr); \
if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
......@@ -1439,7 +1440,7 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
{
slab_t* slabp;
CHECK_PAGE(virt_to_page(objp));
CHECK_PAGE(objp);
/* reduces memory footprint
*
if (OPTIMIZE(cachep))
......@@ -1519,7 +1520,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
#ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep);
CHECK_PAGE(virt_to_page(objp));
CHECK_PAGE(objp);
if (cc) {
int batchcount;
if (cc->avail < cc->limit) {
......@@ -1601,7 +1602,7 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp)
{
unsigned long flags;
#if DEBUG
CHECK_PAGE(virt_to_page(objp));
CHECK_PAGE(objp);
if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
BUG();
#endif
......@@ -1626,7 +1627,7 @@ void kfree (const void *objp)
if (!objp)
return;
local_irq_save(flags);
CHECK_PAGE(virt_to_page(objp));
CHECK_PAGE(objp);
c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp);
local_irq_restore(flags);
......
......@@ -45,8 +45,12 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
if (pte_none(page))
continue;
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
struct page *ptpage;
unsigned long pfn = pte_pfn(page);
if (!pfn_valid(pfn))
continue;
ptpage = pfn_to_page(pfn);
if (!PageReserved(ptpage))
__free_page(ptpage);
continue;
}
......
......@@ -216,9 +216,10 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
do {
if (pte_present(*pte)) {
struct page *page = pte_page(*pte);
unsigned long pfn = pte_pfn(*pte);
struct page *page = pfn_to_page(pfn);
if (VALID_PAGE(page) && !PageReserved(page)) {
if (pfn_valid(pfn) && !PageReserved(page)) {
count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
if (!count) {
address += PAGE_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment