Commit 6471f52a authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

alpha: switch to NO_BOOTMEM

Replace bootmem allocator with memblock and enable use of NO_BOOTMEM like
on most other architectures.

Alpha gets the description of the physical memory from the firmware as an
array of memory clusters.  Each cluster that is not reserved by the
firmware is added to memblock.memory.

Once the memblock.memory is set up, we reserve the kernel and initrd pages
with memblock reserve.

Since we don't need the bootmem bitmap anymore, the code that finds an
appropriate place is removed.

The conversion does not take care of NUMA support which is marked broken
for more than 10 years now.

Link: http://lkml.kernel.org/r/1535952894-10967-1-git-send-email-rppt@linux.vnet.ibm.comSigned-off-by: default avatarMike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e92d39cd
...@@ -31,6 +31,8 @@ config ALPHA ...@@ -31,6 +31,8 @@ config ALPHA
select ODD_RT_SIGACTION select ODD_RT_SIGACTION
select OLD_SIGSUSPEND select OLD_SIGSUSPEND
select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
select HAVE_MEMBLOCK
select NO_BOOTMEM
help help
The Alpha is a 64-bit general-purpose processor designed and The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory, marketed by the Digital Equipment Corporation of blessed memory,
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -241,8 +242,7 @@ albacore_init_arch(void) ...@@ -241,8 +242,7 @@ albacore_init_arch(void)
size / 1024); size / 1024);
} }
#endif #endif
reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop - memblock_reserve(pci_mem, memtop - pci_mem);
pci_mem, BOOTMEM_DEFAULT);
printk("irongate_init_arch: temporarily reserving " printk("irongate_init_arch: temporarily reserving "
"region %08lx-%08lx for PCI\n", pci_mem, memtop - 1); "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
} }
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
...@@ -312,9 +313,7 @@ setup_memory(void *kernel_end) ...@@ -312,9 +313,7 @@ setup_memory(void *kernel_end)
{ {
struct memclust_struct * cluster; struct memclust_struct * cluster;
struct memdesc_struct * memdesc; struct memdesc_struct * memdesc;
unsigned long start_kernel_pfn, end_kernel_pfn; unsigned long kernel_size;
unsigned long bootmap_size, bootmap_pages, bootmap_start;
unsigned long start, end;
unsigned long i; unsigned long i;
/* Find free clusters, and init and free the bootmem accordingly. */ /* Find free clusters, and init and free the bootmem accordingly. */
...@@ -322,6 +321,8 @@ setup_memory(void *kernel_end) ...@@ -322,6 +321,8 @@ setup_memory(void *kernel_end)
(hwrpb->mddt_offset + (unsigned long) hwrpb); (hwrpb->mddt_offset + (unsigned long) hwrpb);
for_each_mem_cluster(memdesc, cluster, i) { for_each_mem_cluster(memdesc, cluster, i) {
unsigned long end;
printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
i, cluster->usage, cluster->start_pfn, i, cluster->usage, cluster->start_pfn,
cluster->start_pfn + cluster->numpages); cluster->start_pfn + cluster->numpages);
...@@ -335,6 +336,9 @@ setup_memory(void *kernel_end) ...@@ -335,6 +336,9 @@ setup_memory(void *kernel_end)
end = cluster->start_pfn + cluster->numpages; end = cluster->start_pfn + cluster->numpages;
if (end > max_low_pfn) if (end > max_low_pfn)
max_low_pfn = end; max_low_pfn = end;
memblock_add(PFN_PHYS(cluster->start_pfn),
cluster->numpages << PAGE_SHIFT);
} }
/* /*
...@@ -363,87 +367,9 @@ setup_memory(void *kernel_end) ...@@ -363,87 +367,9 @@ setup_memory(void *kernel_end)
max_low_pfn = mem_size_limit; max_low_pfn = mem_size_limit;
} }
/* Find the bounds of kernel memory. */ /* Reserve the kernel memory. */
start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); memblock_reserve(KERNEL_START_PHYS, kernel_size);
bootmap_start = -1;
try_again:
if (max_low_pfn <= end_kernel_pfn)
panic("not enough memory to boot");
/* We need to know how many physically contiguous pages
we'll need for the bootmap. */
bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
/* Now find a good region where to allocate the bootmap. */
for_each_mem_cluster(memdesc, cluster, i) {
if (cluster->usage & 3)
continue;
start = cluster->start_pfn;
end = start + cluster->numpages;
if (start >= max_low_pfn)
continue;
if (end > max_low_pfn)
end = max_low_pfn;
if (start < start_kernel_pfn) {
if (end > end_kernel_pfn
&& end - end_kernel_pfn >= bootmap_pages) {
bootmap_start = end_kernel_pfn;
break;
} else if (end > start_kernel_pfn)
end = start_kernel_pfn;
} else if (start < end_kernel_pfn)
start = end_kernel_pfn;
if (end - start >= bootmap_pages) {
bootmap_start = start;
break;
}
}
if (bootmap_start == ~0UL) {
max_low_pfn >>= 1;
goto try_again;
}
/* Allocate the bootmap and mark the whole MM as reserved. */
bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
/* Mark the free regions. */
for_each_mem_cluster(memdesc, cluster, i) {
if (cluster->usage & 3)
continue;
start = cluster->start_pfn;
end = cluster->start_pfn + cluster->numpages;
if (start >= max_low_pfn)
continue;
if (end > max_low_pfn)
end = max_low_pfn;
if (start < start_kernel_pfn) {
if (end > end_kernel_pfn) {
free_bootmem(PFN_PHYS(start),
(PFN_PHYS(start_kernel_pfn)
- PFN_PHYS(start)));
printk("freeing pages %ld:%ld\n",
start, start_kernel_pfn);
start = end_kernel_pfn;
} else if (end > start_kernel_pfn)
end = start_kernel_pfn;
} else if (start < end_kernel_pfn)
start = end_kernel_pfn;
if (start >= end)
continue;
free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
printk("freeing pages %ld:%ld\n", start, end);
}
/* Reserve the bootmap memory. */
reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size,
BOOTMEM_DEFAULT);
printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
initrd_start = INITRD_START; initrd_start = INITRD_START;
...@@ -459,8 +385,8 @@ setup_memory(void *kernel_end) ...@@ -459,8 +385,8 @@ setup_memory(void *kernel_end)
initrd_end, initrd_end,
phys_to_virt(PFN_PHYS(max_low_pfn))); phys_to_virt(PFN_PHYS(max_low_pfn)));
} else { } else {
reserve_bootmem(virt_to_phys((void *)initrd_start), memblock_reserve(virt_to_phys((void *)initrd_start),
INITRD_SIZE, BOOTMEM_DEFAULT); INITRD_SIZE);
} }
} }
#endif /* CONFIG_BLK_DEV_INITRD */ #endif /* CONFIG_BLK_DEV_INITRD */
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/pfn.h> #include <linux/pfn.h>
...@@ -59,12 +60,10 @@ setup_memory_node(int nid, void *kernel_end) ...@@ -59,12 +60,10 @@ setup_memory_node(int nid, void *kernel_end)
struct memclust_struct * cluster; struct memclust_struct * cluster;
struct memdesc_struct * memdesc; struct memdesc_struct * memdesc;
unsigned long start_kernel_pfn, end_kernel_pfn; unsigned long start_kernel_pfn, end_kernel_pfn;
unsigned long bootmap_size, bootmap_pages, bootmap_start;
unsigned long start, end; unsigned long start, end;
unsigned long node_pfn_start, node_pfn_end; unsigned long node_pfn_start, node_pfn_end;
unsigned long node_min_pfn, node_max_pfn; unsigned long node_min_pfn, node_max_pfn;
int i; int i;
unsigned long node_datasz = PFN_UP(sizeof(pg_data_t));
int show_init = 0; int show_init = 0;
/* Find the bounds of current node */ /* Find the bounds of current node */
...@@ -134,24 +133,14 @@ setup_memory_node(int nid, void *kernel_end) ...@@ -134,24 +133,14 @@ setup_memory_node(int nid, void *kernel_end)
/* Cute trick to make sure our local node data is on local memory */ /* Cute trick to make sure our local node data is on local memory */
node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
#endif #endif
/* Quasi-mark the pg_data_t as in-use */
node_min_pfn += node_datasz;
if (node_min_pfn >= node_max_pfn) {
printk(" not enough mem to reserve NODE_DATA");
return;
}
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
printk(" Detected node memory: start %8lu, end %8lu\n", printk(" Detected node memory: start %8lu, end %8lu\n",
node_min_pfn, node_max_pfn); node_min_pfn, node_max_pfn);
DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid));
DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata);
/* Find the bounds of kernel memory. */ /* Find the bounds of kernel memory. */
start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
bootmap_start = -1;
if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
panic("kernel loaded out of ram"); panic("kernel loaded out of ram");
...@@ -161,89 +150,11 @@ setup_memory_node(int nid, void *kernel_end) ...@@ -161,89 +150,11 @@ setup_memory_node(int nid, void *kernel_end)
has much larger alignment than 8Mb, so it's safe. */ has much larger alignment than 8Mb, so it's safe. */
node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
/* We need to know how many physically contiguous pages memblock_add(PFN_PHYS(node_min_pfn),
we'll need for the bootmap. */ (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn);
/* Now find a good region where to allocate the bootmap. */
for_each_mem_cluster(memdesc, cluster, i) {
if (cluster->usage & 3)
continue;
start = cluster->start_pfn;
end = start + cluster->numpages;
if (start >= node_max_pfn || end <= node_min_pfn)
continue;
if (end > node_max_pfn)
end = node_max_pfn;
if (start < node_min_pfn)
start = node_min_pfn;
if (start < start_kernel_pfn) {
if (end > end_kernel_pfn
&& end - end_kernel_pfn >= bootmap_pages) {
bootmap_start = end_kernel_pfn;
break;
} else if (end > start_kernel_pfn)
end = start_kernel_pfn;
} else if (start < end_kernel_pfn)
start = end_kernel_pfn;
if (end - start >= bootmap_pages) {
bootmap_start = start;
break;
}
}
if (bootmap_start == -1)
panic("couldn't find a contiguous place for the bootmap");
/* Allocate the bootmap and mark the whole MM as reserved. */
bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start,
node_min_pfn, node_max_pfn);
DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n",
bootmap_start, bootmap_size, bootmap_pages);
/* Mark the free regions. */ NODE_DATA(nid)->node_start_pfn = node_min_pfn;
for_each_mem_cluster(memdesc, cluster, i) { NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
if (cluster->usage & 3)
continue;
start = cluster->start_pfn;
end = cluster->start_pfn + cluster->numpages;
if (start >= node_max_pfn || end <= node_min_pfn)
continue;
if (end > node_max_pfn)
end = node_max_pfn;
if (start < node_min_pfn)
start = node_min_pfn;
if (start < start_kernel_pfn) {
if (end > end_kernel_pfn) {
free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start),
(PFN_PHYS(start_kernel_pfn)
- PFN_PHYS(start)));
printk(" freeing pages %ld:%ld\n",
start, start_kernel_pfn);
start = end_kernel_pfn;
} else if (end > start_kernel_pfn)
end = start_kernel_pfn;
} else if (start < end_kernel_pfn)
start = end_kernel_pfn;
if (start >= end)
continue;
free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
printk(" freeing pages %ld:%ld\n", start, end);
}
/* Reserve the bootmap memory. */
reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start),
bootmap_size, BOOTMEM_DEFAULT);
printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
node_set_online(nid); node_set_online(nid);
} }
...@@ -251,6 +162,7 @@ setup_memory_node(int nid, void *kernel_end) ...@@ -251,6 +162,7 @@ setup_memory_node(int nid, void *kernel_end)
void __init void __init
setup_memory(void *kernel_end) setup_memory(void *kernel_end)
{ {
unsigned long kernel_size;
int nid; int nid;
show_mem_layout(); show_mem_layout();
...@@ -262,6 +174,9 @@ setup_memory(void *kernel_end) ...@@ -262,6 +174,9 @@ setup_memory(void *kernel_end)
for (nid = 0; nid < MAX_NUMNODES; nid++) for (nid = 0; nid < MAX_NUMNODES; nid++)
setup_memory_node(nid, kernel_end); setup_memory_node(nid, kernel_end);
kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
memblock_reserve(KERNEL_START_PHYS, kernel_size);
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
initrd_start = INITRD_START; initrd_start = INITRD_START;
if (initrd_start) { if (initrd_start) {
...@@ -279,9 +194,8 @@ setup_memory(void *kernel_end) ...@@ -279,9 +194,8 @@ setup_memory(void *kernel_end)
phys_to_virt(PFN_PHYS(max_low_pfn))); phys_to_virt(PFN_PHYS(max_low_pfn)));
} else { } else {
nid = kvaddr_to_nid(initrd_start); nid = kvaddr_to_nid(initrd_start);
reserve_bootmem_node(NODE_DATA(nid), memblock_reserve(virt_to_phys((void *)initrd_start),
virt_to_phys((void *)initrd_start), INITRD_SIZE);
INITRD_SIZE, BOOTMEM_DEFAULT);
} }
} }
#endif /* CONFIG_BLK_DEV_INITRD */ #endif /* CONFIG_BLK_DEV_INITRD */
...@@ -303,9 +217,8 @@ void __init paging_init(void) ...@@ -303,9 +217,8 @@ void __init paging_init(void)
dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
for_each_online_node(nid) { for_each_online_node(nid) {
bootmem_data_t *bdata = &bootmem_node_data[nid]; unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
unsigned long start_pfn = bdata->node_min_pfn; unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_present_pages;
unsigned long end_pfn = bdata->node_low_pfn;
if (dma_local_pfn >= end_pfn - start_pfn) if (dma_local_pfn >= end_pfn - start_pfn)
zones_size[ZONE_DMA] = end_pfn - start_pfn; zones_size[ZONE_DMA] = end_pfn - start_pfn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment