Commit f5539ad0 authored by Geert Uytterhoeven's avatar Geert Uytterhoeven Committed by Linus Torvalds

[PATCH] M68k virt/phys fallback removal

Remove m68k fallback function for virtual/physical address conversions
(should there be any driver relying on this, that driver is seriously b0rken)
(from Roman Zippel)
parent 6ade2cd2
......@@ -45,7 +45,6 @@ EXPORT_SYMBOL(mm_end_of_chunk);
#else
EXPORT_SYMBOL(m68k_memoffset);
#endif /* !CONFIG_SINGLE_MEMORY_CHUNK */
EXPORT_SYMBOL(mm_vtop_fallback);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(kernel_set_cachemode);
......
......@@ -133,34 +133,6 @@ int free_pointer_table (pmd_t *ptable)
return 0;
}
static unsigned long transp_transl_matches( unsigned long regval,
unsigned long vaddr )
{
unsigned long base, mask;
/* enabled? */
if (!(regval & 0x8000))
return( 0 );
if (CPU_IS_030) {
/* function code match? */
base = (regval >> 4) & 7;
mask = ~(regval & 7);
if (((SUPER_DATA ^ base) & mask) != 0)
return 0;
}
else {
/* must not be user-only */
if ((regval & 0x6000) == 0)
return( 0 );
}
/* address match? */
base = regval & 0xff000000;
mask = ~(regval << 8) & 0xff000000;
return (((unsigned long)vaddr ^ base) & mask) == 0;
}
#if DEBUG_INVALID_PTOV
int mm_inv_cnt = 5;
#endif
......@@ -190,134 +162,9 @@ unsigned long mm_vtop(unsigned long vaddr)
if (voff == 0)
return m68k_memory[i-1].addr + m68k_memory[i-1].size;
return mm_vtop_fallback(vaddr);
}
#endif
/* Separate function to make the common case faster (needs to save less
registers) */
unsigned long mm_vtop_fallback(unsigned long vaddr)
{
/* not in one of the memory chunks; test for applying transparent
* translation */
if (CPU_IS_030) {
unsigned long ttreg;
asm volatile( ".chip 68030\n\t"
"pmove %/tt0,%0@\n\t"
".chip 68k"
: : "a" (&ttreg) );
if (transp_transl_matches( ttreg, vaddr ))
return (unsigned long)vaddr;
asm volatile( ".chip 68030\n\t"
"pmove %/tt1,%0@\n\t"
".chip 68k"
: : "a" (&ttreg) );
if (transp_transl_matches( ttreg, vaddr ))
return (unsigned long)vaddr;
}
else if (CPU_IS_040_OR_060) {
unsigned long ttreg;
asm volatile( ".chip 68040\n\t"
"movec %%dtt0,%0\n\t"
".chip 68k"
: "=d" (ttreg) );
if (transp_transl_matches( ttreg, vaddr ))
return (unsigned long)vaddr;
asm volatile( ".chip 68040\n\t"
"movec %%dtt1,%0\n\t"
".chip 68k"
: "=d" (ttreg) );
if (transp_transl_matches( ttreg, vaddr ))
return (unsigned long)vaddr;
}
/* no match, too, so get the actual physical address from the MMU. */
if (CPU_IS_060) {
mm_segment_t fs = get_fs();
unsigned long paddr;
set_fs (MAKE_MM_SEG(SUPER_DATA));
/* The PLPAR instruction causes an access error if the translation
* is not possible. To catch this we use the same exception mechanism
* as for user space accesses in <asm/uaccess.h>. */
asm volatile (".chip 68060\n"
"1: plpar (%0)\n"
".chip 68k\n"
"2:\n"
".section .fixup,\"ax\"\n"
" .even\n"
"3: lea -1,%0\n"
" jra 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 1b,3b\n"
".previous"
: "=a" (paddr)
: "0" (vaddr));
set_fs (fs);
return paddr;
} else if (CPU_IS_040) {
unsigned long mmusr;
mm_segment_t fs = get_fs();
set_fs (MAKE_MM_SEG(SUPER_DATA));
asm volatile (".chip 68040\n\t"
"ptestr (%1)\n\t"
"movec %%mmusr, %0\n\t"
".chip 68k"
: "=r" (mmusr)
: "a" (vaddr));
set_fs (fs);
if (mmusr & MMU_T_040) {
return (unsigned long)vaddr; /* Transparent translation */
}
if (mmusr & MMU_R_040)
return (mmusr & PAGE_MASK) | ((unsigned long)vaddr & (PAGE_SIZE-1));
printk("VTOP040: bad virtual address %lx (%lx)", vaddr, mmusr);
return -1;
} else {
volatile unsigned short temp;
unsigned short mmusr;
unsigned long *descaddr;
asm volatile ("ptestr #5,%2@,#7,%0\n\t"
"pmove %/psr,%1@"
: "=a&" (descaddr)
: "a" (&temp), "a" (vaddr));
mmusr = temp;
if (mmusr & (MMU_I|MMU_B|MMU_L))
printk("VTOP030: bad virtual address %lx (%x)\n", vaddr, mmusr);
descaddr = phys_to_virt((unsigned long)descaddr);
switch (mmusr & MMU_NUM) {
case 1:
return (*descaddr & 0xfe000000) | ((unsigned long)vaddr & 0x01ffffff);
case 2:
return (*descaddr & 0xfffc0000) | ((unsigned long)vaddr & 0x0003ffff);
case 3:
return (*descaddr & PAGE_MASK) | ((unsigned long)vaddr & (PAGE_SIZE-1));
default:
printk("VTOP: bad levels (%u) for virtual address %lx\n",
mmusr & MMU_NUM, vaddr);
}
}
printk("VTOP: bad virtual address %lx\n", vaddr);
return -1;
}
#endif
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
unsigned long mm_ptov (unsigned long paddr)
......@@ -342,29 +189,6 @@ unsigned long mm_ptov (unsigned long paddr)
printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
paddr, __builtin_return_address(0));
}
#endif
/*
* assume that the kernel virtual address is the same as the
* physical address.
*
* This should be reasonable in most situations:
* 1) They shouldn't be dereferencing the virtual address
* unless they are sure that it is valid from kernel space.
* 2) The only usage I see so far is converting a page table
* reference to some non-FASTMEM address space when freeing
* mmaped "/dev/mem" pages. These addresses are just passed
* to "free_page", which ignores addresses that aren't in
* the memory list anyway.
*
*/
#ifdef CONFIG_AMIGA
/*
* if on an amiga and address is in first 16M, move it
* to the ZTWO_VADDR range
*/
if (MACH_IS_AMIGA && paddr < 16*1024*1024)
return ZTWO_VADDR(paddr);
#endif
return -1;
}
......
......@@ -20,7 +20,6 @@
*/
#ifndef CONFIG_SUN3
extern unsigned long mm_vtop(unsigned long addr) __attribute__ ((const));
extern unsigned long mm_vtop_fallback (unsigned long) __attribute__ ((const));
extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
#else
extern inline unsigned long mm_vtop(unsigned long vaddr)
......@@ -35,39 +34,22 @@ extern inline unsigned long mm_ptov(unsigned long paddr)
#endif
#ifdef CONFIG_SINGLE_MEMORY_CHUNK
extern inline unsigned long virt_to_phys(volatile void *vaddr)
static inline unsigned long virt_to_phys(void *vaddr)
{
unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET;
if (voff < m68k_memory[0].size)
return voff + m68k_memory[0].addr;
return mm_vtop_fallback((unsigned long)vaddr);
return (unsigned long)vaddr - PAGE_OFFSET + m68k_memory[0].addr;
}
extern inline void * phys_to_virt(unsigned long paddr)
static inline void * phys_to_virt(unsigned long paddr)
{
unsigned long poff = paddr - m68k_memory[0].addr;
if (poff < m68k_memory[0].size)
return (void *)(poff + PAGE_OFFSET);
#ifdef CONFIG_AMIGA
/*
* if on an amiga and address is in first 16M, move it
* to the ZTWO_VADDR range
*/
if (MACH_IS_AMIGA && paddr < 16*1024*1024)
return (void *)ZTWO_VADDR(paddr);
#endif
return (void *)paddr;
return (void *)(paddr - m68k_memory[0].addr + PAGE_OFFSET);
}
#else
extern inline unsigned long virt_to_phys(volatile void * address)
static inline unsigned long virt_to_phys(void *address)
{
return mm_vtop((unsigned long)address);
}
extern inline void * phys_to_virt(unsigned long address)
static inline void *phys_to_virt(unsigned long address)
{
return (void *) mm_ptov(address);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment