Commit afb7359a authored by Jes Sorensen's avatar Jes Sorensen Committed by Linus Torvalds

[PATCH] ia64 specific /dev/mem handlers

Convert /dev/mem read/write calls to use arch_translate_mem_ptr if
available.  Needed on ia64 for pages converted fo uncached mappings to
avoid it being accessed in cached mode after the conversion which can lead
to memory corruption.  Introduces PG_uncached page flag for marking pages
uncached.

Also folds do_write_mem into write_mem as it was it's only user.

Use __ARCH_HAS_NO_PAGE_ZERO_MAPPED for architectures to indicate they
require magic handling of the zero page (Sparc and m68k).
Signed-off-by: default avatarJes Sorensen <jes@wildopensource.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 93903452
......@@ -111,39 +111,6 @@ static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
}
#endif
static ssize_t do_write_mem(void *p, unsigned long realp,
const char __user * buf, size_t count, loff_t *ppos)
{
ssize_t written;
unsigned long copied;
written = 0;
#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
/* we don't have page 0 mapped on sparc and m68k.. */
if (realp < PAGE_SIZE) {
unsigned long sz = PAGE_SIZE-realp;
if (sz > count) sz = count;
/* Hmm. Do something? */
buf+=sz;
p+=sz;
count-=sz;
written+=sz;
}
#endif
copied = copy_from_user(p, buf, count);
if (copied) {
ssize_t ret = written + (count - copied);
if (ret)
return ret;
return -EFAULT;
}
written += count;
*ppos += written;
return written;
}
/*
* This funcion reads the *physical* memory. The f_pos points directly to the
* memory location.
......@@ -152,15 +119,16 @@ static ssize_t read_mem(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
ssize_t read;
ssize_t read, sz;
char *ptr;
if (!valid_phys_addr_range(p, &count))
return -EFAULT;
read = 0;
#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
unsigned long sz = PAGE_SIZE-p;
sz = PAGE_SIZE - p;
if (sz > count)
sz = count;
if (sz > 0) {
......@@ -173,9 +141,33 @@ static ssize_t read_mem(struct file * file, char __user * buf,
}
}
#endif
if (copy_to_user(buf, __va(p), count))
while (count > 0) {
/*
* Handle first page in case it's not aligned
*/
if (-p & (PAGE_SIZE - 1))
sz = -p & (PAGE_SIZE - 1);
else
sz = PAGE_SIZE;
sz = min_t(unsigned long, sz, count);
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur
*/
ptr = xlate_dev_mem_ptr(p);
if (copy_to_user(buf, ptr, sz))
return -EFAULT;
read += count;
buf += sz;
p += sz;
count -= sz;
read += sz;
}
*ppos += read;
return read;
}
......@@ -184,10 +176,64 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
ssize_t written, sz;
unsigned long copied;
void *ptr;
if (!valid_phys_addr_range(p, &count))
return -EFAULT;
return do_write_mem(__va(p), p, buf, count, ppos);
written = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
unsigned long sz = PAGE_SIZE - p;
if (sz > count)
sz = count;
/* Hmm. Do something? */
buf += sz;
p += sz;
count -= sz;
written += sz;
}
#endif
while (count > 0) {
/*
* Handle first page in case it's not aligned
*/
if (-p & (PAGE_SIZE - 1))
sz = -p & (PAGE_SIZE - 1);
else
sz = PAGE_SIZE;
sz = min_t(unsigned long, sz, count);
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur
*/
ptr = xlate_dev_mem_ptr(p);
copied = copy_from_user(ptr, buf, sz);
if (copied) {
ssize_t ret;
ret = written + (sz - copied);
if (ret)
return ret;
return -EFAULT;
}
buf += sz;
p += sz;
count -= sz;
written += sz;
}
*ppos += written;
return written;
}
static int mmap_mem(struct file * file, struct vm_area_struct * vma)
......@@ -221,16 +267,17 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
ssize_t read = 0;
ssize_t virtr = 0;
ssize_t read, virtr, sz;
char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
read = 0;
virtr = 0;
if (p < (unsigned long) high_memory) {
read = count;
if (count > (unsigned long) high_memory - p)
read = (unsigned long) high_memory - p;
#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE && read > 0) {
size_t tmp = PAGE_SIZE - p;
......@@ -243,11 +290,31 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
count -= tmp;
}
#endif
if (copy_to_user(buf, (char *)p, read))
while (read > 0) {
/*
* Handle first page in case it's not aligned
*/
if (-p & (PAGE_SIZE - 1))
sz = -p & (PAGE_SIZE - 1);
else
sz = PAGE_SIZE;
sz = min_t(unsigned long, sz, count);
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur
*/
kbuf = xlate_dev_kmem_ptr((char *)p);
if (copy_to_user(buf, kbuf, sz))
return -EFAULT;
p += read;
buf += read;
count -= read;
buf += sz;
p += sz;
read -= sz;
count -= sz;
}
}
if (count > 0) {
......@@ -277,6 +344,70 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
return virtr + read;
}
static inline ssize_t
do_write_kmem(void *p, unsigned long realp, const char __user * buf,
size_t count, loff_t *ppos)
{
ssize_t written, sz;
unsigned long copied;
written = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (realp < PAGE_SIZE) {
unsigned long sz = PAGE_SIZE - realp;
if (sz > count)
sz = count;
/* Hmm. Do something? */
buf += sz;
p += sz;
realp += sz;
count -= sz;
written += sz;
}
#endif
while (count > 0) {
char *ptr;
/*
* Handle first page in case it's not aligned
*/
if (-realp & (PAGE_SIZE - 1))
sz = -realp & (PAGE_SIZE - 1);
else
sz = PAGE_SIZE;
sz = min_t(unsigned long, sz, count);
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur
*/
ptr = xlate_dev_kmem_ptr(p);
copied = copy_from_user(ptr, buf, sz);
if (copied) {
ssize_t ret;
ret = written + (sz - copied);
if (ret)
return ret;
return -EFAULT;
}
buf += sz;
p += sz;
realp += sz;
count -= sz;
written += sz;
}
*ppos += written;
return written;
}
/*
* This function writes to the *virtual* memory as seen by the kernel.
*/
......@@ -295,7 +426,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
if (count > (unsigned long) high_memory - p)
wrote = (unsigned long) high_memory - p;
written = do_write_mem((void*)p, p, buf, wrote, ppos);
written = do_write_kmem((void*)p, p, buf, wrote, ppos);
if (written != wrote)
return written;
wrote = written;
......
......@@ -666,6 +666,17 @@ isa_memcpy_toio(unsigned long offset, const void *src, long n)
#define writeq writeq
#define readq readq
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* __ALPHA_IO_H */
......@@ -271,5 +271,16 @@ extern void __iounmap(void __iomem *addr);
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_IO_H */
......@@ -420,5 +420,16 @@ extern void consistent_sync(void *vaddr, size_t size, int rw);
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_IO_H */
......@@ -86,4 +86,15 @@ extern void iounmap(void *addr);
#define outsw(x,y,z)
#define outsl(x,y,z)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif
......@@ -273,6 +273,18 @@ static inline void flush_write_buffers(void)
__asm__ __volatile__ ("membar" : : :"memory");
}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* _ASM_IO_H */
......@@ -317,6 +317,17 @@ static __inline__ void ctrl_outl(unsigned long b, unsigned long addr)
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* _H8300_IO_H */
......@@ -49,6 +49,17 @@
#include <linux/vmalloc.h>
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
......
......@@ -35,6 +35,8 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <asm/intrinsics.h>
#include <asm/pgtable.h>
......@@ -367,4 +369,38 @@ ia64_done_with_exception (struct pt_regs *regs)
return 0;
}
#define ARCH_HAS_TRANSLATE_MEM_PTR 1
static __inline__ char *
xlate_dev_mem_ptr (unsigned long p)
{
struct page *page;
char * ptr;
page = pfn_to_page(p >> PAGE_SHIFT);
if (PageUncached(page))
ptr = (char *)p + __IA64_UNCACHED_OFFSET;
else
ptr = __va(p);
return ptr;
}
/*
* Convert a virtual cached kernel memory pointer to an uncached pointer
*/
static __inline__ char *
xlate_dev_kmem_ptr (char * p)
{
struct page *page;
char * ptr;
page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
if (PageUncached(page))
ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
else
ptr = p;
return ptr;
}
#endif /* _ASM_IA64_UACCESS_H */
......@@ -216,6 +216,17 @@ memcpy_toio(volatile void __iomem *dst, const void *src, int count)
memcpy((void __force *) dst, src, count);
}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* _ASM_M32R_IO_H */
......@@ -359,4 +359,18 @@ extern void dma_cache_inv(unsigned long start, unsigned long size);
#endif
#endif /* __KERNEL__ */
#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* _IO_H */
......@@ -187,6 +187,17 @@ extern void iounmap(void *addr);
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* _M68KNOMMU_IO_H */
......@@ -616,4 +616,15 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* _ASM_IO_H */
......@@ -404,4 +404,15 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#include <asm-generic/iomap.h>
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif
......@@ -552,4 +552,15 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#include <asm/mpc8260_pci9.h>
#endif
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
......@@ -442,6 +442,17 @@ static inline int check_signature(const volatile void __iomem * io_addr,
extern int check_legacy_ioport(unsigned long base_port);
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* _PPC64_IO_H */
......@@ -107,6 +107,17 @@ extern void iounmap(void *addr);
#define mmiowb()
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif
......@@ -274,4 +274,17 @@ extern void sbus_iounmap(volatile void __iomem *vaddr, unsigned long size);
#endif
#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* !(__SPARC_IO_H) */
......@@ -485,6 +485,17 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif
#endif /* !(__SPARC64_IO_H) */
......@@ -22,4 +22,15 @@ static inline void * phys_to_virt(unsigned long address)
return __va(address);
}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif
......@@ -119,4 +119,15 @@ outsl (unsigned long port, const void *src, unsigned long count)
#define memcpy_fromio(dst, src, len) memcpy (dst, (void *)src, len)
#define memcpy_toio(dst, src, len) memcpy ((void *)dst, src, len)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __V850_IO_H__ */
......@@ -329,6 +329,17 @@ static inline int check_signature(void __iomem *io_addr,
extern int iommu_bio_merge;
#define BIO_VMERGE_BOUNDARY iommu_bio_merge
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif
......@@ -75,7 +75,7 @@
#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */
#define PG_reclaim 18 /* To be reclaimed asap */
#define PG_nosave_free 19 /* Free, should not be written */
#define PG_uncached 20 /* Page has been mapped as uncached */
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
......@@ -301,6 +301,10 @@ extern void __mod_page_state(unsigned offset, unsigned long delta);
#define PageSwapCache(page) 0
#endif
#define PageUncached(page) test_bit(PG_uncached, &(page)->flags)
#define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
#define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
struct page; /* forward declaration */
int test_clear_page_dirty(struct page *page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment