Commit 78d2ba74 authored by Haren Myneni's avatar Haren Myneni Committed by Linus Torvalds

[PATCH] ppc64: implement page_is_ram

This patch contains - Removes __initdata from lmb definition (struct lmb
lmb;) and modified the existing page_is_ram function.

Also changed the current argument from physical address to pfn to make it
compatible across architectures.  Please review them and send me your
comments/suggestions.  If you are OK with any one patch, please include it
in the mainline kernel.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8d51b032
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <asm/abs_addr.h> #include <asm/abs_addr.h>
#include <asm/bitops.h> #include <asm/bitops.h>
struct lmb lmb __initdata; struct lmb lmb;
static unsigned long __init static unsigned long __init
lmb_addrs_overlap(unsigned long base1, unsigned long size1, lmb_addrs_overlap(unsigned long base1, unsigned long size1,
......
...@@ -85,7 +85,6 @@ unsigned long __max_memory; ...@@ -85,7 +85,6 @@ unsigned long __max_memory;
/* info on what we think the IO hole is */ /* info on what we think the IO hole is */
unsigned long io_hole_start; unsigned long io_hole_start;
unsigned long io_hole_size; unsigned long io_hole_size;
unsigned long top_of_ram;
void show_mem(void) void show_mem(void)
{ {
...@@ -498,16 +497,12 @@ void __init mm_init_ppc64(void) ...@@ -498,16 +497,12 @@ void __init mm_init_ppc64(void)
* So we need some rough way to tell where your big IO hole * So we need some rough way to tell where your big IO hole
* is. On pmac, it's between 2G and 4G, on POWER3, it's around * is. On pmac, it's between 2G and 4G, on POWER3, it's around
* that area as well, on POWER4 we don't have one, etc... * that area as well, on POWER4 we don't have one, etc...
* We need that to implement something approx. decent for * We need that as a "hint" when sizing the TCE table on POWER3
* page_is_ram() so that /dev/mem doesn't map cacheable IO space
* when XFree resquest some IO regions witout using O_SYNC, we
* also need that as a "hint" when sizing the TCE table on POWER3
* So far, the simplest way that seem work well enough for us it * So far, the simplest way that seem work well enough for us it
* to just assume that the first discontinuity in our physical * to just assume that the first discontinuity in our physical
* RAM layout is the IO hole. That may not be correct in the future * RAM layout is the IO hole. That may not be correct in the future
* (and isn't on iSeries but then we don't care ;) * (and isn't on iSeries but then we don't care ;)
*/ */
top_of_ram = lmb_end_of_DRAM();
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
for (i = 1; i < lmb.memory.cnt; i++) { for (i = 1; i < lmb.memory.cnt; i++) {
...@@ -530,22 +525,32 @@ void __init mm_init_ppc64(void) ...@@ -530,22 +525,32 @@ void __init mm_init_ppc64(void)
ppc64_boot_msg(0x100, "MM Init Done"); ppc64_boot_msg(0x100, "MM Init Done");
} }
/* /*
* This is called by /dev/mem to know if a given address has to * This is called by /dev/mem to know if a given address has to
* be mapped non-cacheable or not * be mapped non-cacheable or not
*/ */
int page_is_ram(unsigned long physaddr) int page_is_ram(unsigned long pfn)
{ {
#ifdef CONFIG_PPC_ISERIES int i;
return 1; unsigned long paddr = (pfn << PAGE_SHIFT);
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long base;
#ifdef CONFIG_MSCHUNKS
base = lmb.memory.region[i].physbase;
#else
base = lmb.memory.region[i].base;
#endif #endif
if (physaddr >= top_of_ram) if ((paddr >= base) &&
return 0; (paddr < (base + lmb.memory.region[i].size))) {
return io_hole_start == 0 || physaddr < io_hole_start || return 1;
physaddr >= (io_hole_start + io_hole_size); }
} }
return 0;
}
EXPORT_SYMBOL(page_is_ram);
/* /*
* Initialize the bootmem system and give it all the memory we * Initialize the bootmem system and give it all the memory we
...@@ -599,6 +604,7 @@ void __init paging_init(void) ...@@ -599,6 +604,7 @@ void __init paging_init(void)
unsigned long zones_size[MAX_NR_ZONES]; unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES];
unsigned long total_ram = lmb_phys_mem_size(); unsigned long total_ram = lmb_phys_mem_size();
unsigned long top_of_ram = lmb_end_of_DRAM();
printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram); top_of_ram, total_ram);
......
...@@ -86,7 +86,7 @@ static inline int uncached_access(struct file *file, unsigned long addr) ...@@ -86,7 +86,7 @@ static inline int uncached_access(struct file *file, unsigned long addr)
* above the IO hole... Ah, and of course, XFree86 doesn't pass * above the IO hole... Ah, and of course, XFree86 doesn't pass
* O_SYNC when mapping us to tap IO space. Surprised ? * O_SYNC when mapping us to tap IO space. Surprised ?
*/ */
return !page_is_ram(addr); return !page_is_ram(addr >> PAGE_SHIFT);
#else #else
/* /*
* Accessing memory above the top the kernel knows about or through a file pointer * Accessing memory above the top the kernel knows about or through a file pointer
......
...@@ -47,7 +47,7 @@ struct lmb { ...@@ -47,7 +47,7 @@ struct lmb {
struct lmb_region reserved; struct lmb_region reserved;
}; };
extern struct lmb lmb __initdata; extern struct lmb lmb;
extern void __init lmb_init(void); extern void __init lmb_init(void);
extern void __init lmb_analyze(void); extern void __init lmb_analyze(void);
......
...@@ -181,8 +181,7 @@ static inline int get_order(unsigned long size) ...@@ -181,8 +181,7 @@ static inline int get_order(unsigned long size)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
/* Not 100% correct, for use by /dev/mem only */ extern int page_is_ram(unsigned long pfn);
extern int page_is_ram(unsigned long physaddr);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment