Commit 27254059 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] use page_to_pfn() instead of mem_map[]

A patch from Martin Bligh which cleans up the open-coded uses of
mem_map for ia32.  Basically it replaces

mem_map + pagenr

with

pfn_to_page(pagenr)

in lots of places.  Because mem_map[] doesn't work like that with
discontigmem.

It also fixes a bug in bad_range, that happens to work for contig mem
systems, but is incorrect.  Tested both with and without discontigmem
support.
parent 0d0bdc8e
......@@ -235,7 +235,7 @@ void __init set_highmem_pages_init(int bad_ppro)
{
int pfn;
for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
one_highpage_init((struct page *)(mem_map + pfn), pfn, bad_ppro);
one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
totalram_pages += totalhigh_pages;
}
#else
......@@ -419,7 +419,7 @@ void __init test_wp_bit(void)
static void __init set_max_mapnr_init(void)
{
#ifdef CONFIG_HIGHMEM
highmem_start_page = mem_map + highstart_pfn;
highmem_start_page = pfn_to_page(highstart_pfn);
max_mapnr = num_physpages = highend_pfn;
#else
max_mapnr = num_physpages = max_low_pfn;
......@@ -458,7 +458,7 @@ void __init mem_init(void)
/*
* Only count reserved RAM pages
*/
if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
reservedpages++;
set_highmem_pages_init(bad_ppro);
......
......@@ -22,24 +22,26 @@
void show_mem(void)
{
int i, total = 0, reserved = 0;
int pfn, total = 0, reserved = 0;
int shared = 0, cached = 0;
int highmem = 0;
struct page *page;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
while (i-- > 0) {
pfn = max_mapnr;
while (pfn-- > 0) {
page = pfn_to_page(pfn);
total++;
if (PageHighMem(mem_map+i))
if (PageHighMem(page))
highmem++;
if (PageReserved(mem_map+i))
if (PageReserved(page))
reserved++;
else if (PageSwapCache(mem_map+i))
else if (PageSwapCache(page))
cached++;
else if (page_count(mem_map+i))
shared += page_count(mem_map+i) - 1;
else if (page_count(page))
shared += page_count(page) - 1;
}
printk("%d pages of RAM\n", total);
printk("%d pages of HIGHMEM\n",highmem);
......
......@@ -1081,7 +1081,7 @@ static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *_dev)
frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
(long long)buf, (long)(frag->page - mem_map),
(long long)buf, (long) page_to_pfn(frag->page),
frag->page_offset);
len = frag->size;
frag++;
......
......@@ -96,23 +96,7 @@ static inline void * phys_to_virt(unsigned long address)
/*
* Change "struct page" to physical address.
*/
#ifdef CONFIG_HIGHMEM64G
#ifndef CONFIG_DISCONTIGMEM
#define page_to_phys(page) ((u64)(page - mem_map) << PAGE_SHIFT)
#else
#define page_to_phys(page) ((u64)(page - page_zone(page)->zone_mem_map + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
#endif /* !CONFIG_DISCONTIGMEM */
#else
#ifndef CONFIG_DISCONTIGMEM
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#else
#define page_to_phys(page) ((page - page_zone(page)->zone_mem_map + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
#endif /* !CONFIG_DISCONTIGMEM */
#endif /* CONFIG_HIGHMEM64G */
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
......
......@@ -107,7 +107,7 @@ static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
if (direction == PCI_DMA_NONE)
BUG();
return (dma_addr_t)(page - mem_map) * PAGE_SIZE + offset;
return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
}
static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
......@@ -236,9 +236,7 @@ pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offse
static __inline__ struct page *
pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
{
unsigned long poff = (dma_addr >> PAGE_SHIFT);
return mem_map + poff;
return pfn_to_page(dma_addr >> PAGE_SHIFT);
}
static __inline__ unsigned long
......
......@@ -13,7 +13,7 @@
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE +
((unsigned long long)(pte - mem_map) <<
((unsigned long long)page_to_pfn(pte) <<
(unsigned long long) PAGE_SHIFT)));
}
/*
......
......@@ -235,8 +235,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#ifndef CONFIG_DISCONTIGMEM
#define pmd_page(pmd) \
(mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
#endif /* !CONFIG_DISCONTIGMEM */
#define pmd_large(pmd) \
......
......@@ -468,31 +468,33 @@ static int count_and_copy_data_pages(struct pbe *pagedir_p)
{
int chunk_size;
int nr_copy_pages = 0;
int loop;
int pfn;
struct page *page;
if (max_mapnr != num_physpages)
panic("mapnr is not expected");
for (loop = 0; loop < max_mapnr; loop++) {
if (PageHighMem(mem_map+loop))
for (pfn = 0; pfn < max_mapnr; pfn++) {
page = pfn_to_page(pfn);
if (PageHighMem(page))
panic("Swsusp not supported on highmem boxes. Send 1GB of RAM to <pavel@ucw.cz> and try again ;-).");
if (!PageReserved(mem_map+loop)) {
if (PageNosave(mem_map+loop))
if (!PageReserved(page)) {
if (PageNosave(page))
continue;
if ((chunk_size=is_head_of_free_region(mem_map+loop))!=0) {
loop += chunk_size - 1;
if ((chunk_size=is_head_of_free_region(page))!=0) {
pfn += chunk_size - 1;
continue;
}
} else if (PageReserved(mem_map+loop)) {
BUG_ON (PageNosave(mem_map+loop));
} else if (PageReserved(page)) {
BUG_ON (PageNosave(page));
/*
* Just copy whole code segment. Hopefully it is not that big.
*/
if (ADDRESS(loop) >= (unsigned long)
&__nosave_begin && ADDRESS(loop) <
if (ADDRESS(pfn) >= (unsigned long)
&__nosave_begin && ADDRESS(pfn) <
(unsigned long)&__nosave_end) {
PRINTK("[nosave %x]", ADDRESS(loop));
PRINTK("[nosave %x]", ADDRESS(pfn));
continue;
}
/* Hmm, perhaps copying all reserved pages is not too healthy as they may contain
......@@ -501,7 +503,7 @@ static int count_and_copy_data_pages(struct pbe *pagedir_p)
nr_copy_pages++;
if (pagedir_p) {
pagedir_p->orig_address = ADDRESS(loop);
pagedir_p->orig_address = ADDRESS(pfn);
copy_page(pagedir_p->address, pagedir_p->orig_address);
pagedir_p++;
}
......
......@@ -46,9 +46,9 @@ static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
*/
static inline int bad_range(struct zone *zone, struct page *page)
{
if (page - mem_map >= zone->zone_start_mapnr + zone->size)
if (page_to_pfn(page) >= zone->zone_start_pfn + zone->size)
return 1;
if (page - mem_map < zone->zone_start_mapnr)
if (page_to_pfn(page) < zone->zone_start_pfn)
return 1;
if (zone != page_zone(page))
return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment