Commit b640f042 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'topic/slab/earlyboot' of...

Merge branch 'topic/slab/earlyboot' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'topic/slab/earlyboot' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  vgacon: use slab allocator instead of the bootmem allocator
  irq: use kcalloc() instead of the bootmem allocator
  sched: use slab in cpupri_init()
  sched: use alloc_cpumask_var() instead of alloc_bootmem_cpumask_var()
  memcg: don't use bootmem allocator in setup code
  irq/cpumask: make memoryless node zero happy
  x86: remove some alloc_bootmem_cpumask_var calling
  vt: use kzalloc() instead of the bootmem allocator
  sched: use kzalloc() instead of the bootmem allocator
  init: introduce mm_init()
  vmalloc: use kzalloc() instead of alloc_bootmem()
  slab: setup allocators earlier in the boot sequence
  bootmem: fix slab fallback on numa
  bootmem: use slab if bootmem is no longer available
parents 871fa907 b8ec7573
...@@ -177,16 +177,18 @@ int __init arch_early_irq_init(void) ...@@ -177,16 +177,18 @@ int __init arch_early_irq_init(void)
struct irq_cfg *cfg; struct irq_cfg *cfg;
struct irq_desc *desc; struct irq_desc *desc;
int count; int count;
int node;
int i; int i;
cfg = irq_cfgx; cfg = irq_cfgx;
count = ARRAY_SIZE(irq_cfgx); count = ARRAY_SIZE(irq_cfgx);
node= cpu_to_node(boot_cpu_id);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
desc = irq_to_desc(i); desc = irq_to_desc(i);
desc->chip_data = &cfg[i]; desc->chip_data = &cfg[i];
alloc_bootmem_cpumask_var(&cfg[i].domain); alloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
alloc_bootmem_cpumask_var(&cfg[i].old_domain); alloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
if (i < NR_IRQS_LEGACY) if (i < NR_IRQS_LEGACY)
cpumask_setall(cfg[i].domain); cpumask_setall(cfg[i].domain);
} }
......
...@@ -95,7 +95,6 @@ ...@@ -95,7 +95,6 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/font.h> #include <linux/font.h>
#include <linux/bitops.h> #include <linux/bitops.h>
...@@ -2875,14 +2874,11 @@ static int __init con_init(void) ...@@ -2875,14 +2874,11 @@ static int __init con_init(void)
mod_timer(&console_timer, jiffies + blankinterval); mod_timer(&console_timer, jiffies + blankinterval);
} }
/*
* kmalloc is not running yet - we use the bootmem allocator.
*/
for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) { for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
vc_cons[currcons].d = vc = alloc_bootmem(sizeof(struct vc_data)); vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1); visual_init(vc, currcons, 1);
vc->vc_screenbuf = (unsigned short *)alloc_bootmem(vc->vc_screenbuf_size); vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc->vc_kmalloced = 0; vc->vc_kmalloced = 0;
vc_init(vc, vc->vc_rows, vc->vc_cols, vc_init(vc, vc->vc_rows, vc->vc_cols,
currcons || !vc->vc_sw->con_save_screen); currcons || !vc->vc_sw->con_save_screen);
......
...@@ -180,7 +180,7 @@ static inline void vga_set_mem_top(struct vc_data *c) ...@@ -180,7 +180,7 @@ static inline void vga_set_mem_top(struct vc_data *c)
} }
#ifdef CONFIG_VGACON_SOFT_SCROLLBACK #ifdef CONFIG_VGACON_SOFT_SCROLLBACK
#include <linux/bootmem.h> #include <linux/slab.h>
/* software scrollback */ /* software scrollback */
static void *vgacon_scrollback; static void *vgacon_scrollback;
static int vgacon_scrollback_tail; static int vgacon_scrollback_tail;
...@@ -210,8 +210,7 @@ static void vgacon_scrollback_init(int pitch) ...@@ -210,8 +210,7 @@ static void vgacon_scrollback_init(int pitch)
*/ */
static void __init_refok vgacon_scrollback_startup(void) static void __init_refok vgacon_scrollback_startup(void)
{ {
vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
* 1024);
vgacon_scrollback_init(vga_video_num_columns * 2); vgacon_scrollback_init(vga_video_num_columns * 2);
} }
......
...@@ -430,23 +430,19 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); ...@@ -430,23 +430,19 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
* Returns true if successful (or not required). * Returns true if successful (or not required).
*/ */
static inline bool alloc_desc_masks(struct irq_desc *desc, int node, static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
bool boot) bool boot)
{ {
#ifdef CONFIG_CPUMASK_OFFSTACK gfp_t gfp = GFP_ATOMIC;
if (boot) {
alloc_bootmem_cpumask_var(&desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ if (boot)
alloc_bootmem_cpumask_var(&desc->pending_mask); gfp = GFP_NOWAIT;
#endif
return true;
}
if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) #ifdef CONFIG_CPUMASK_OFFSTACK
if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
return false; return false;
#ifdef CONFIG_GENERIC_PENDING_IRQ #ifdef CONFIG_GENERIC_PENDING_IRQ
if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
free_cpumask_var(desc->affinity); free_cpumask_var(desc->affinity);
return false; return false;
} }
......
...@@ -533,6 +533,16 @@ void __init __weak thread_info_cache_init(void) ...@@ -533,6 +533,16 @@ void __init __weak thread_info_cache_init(void)
{ {
} }
/*
* Set up kernel memory allocators
*/
static void __init mm_init(void)
{
mem_init();
kmem_cache_init();
vmalloc_init();
}
asmlinkage void __init start_kernel(void) asmlinkage void __init start_kernel(void)
{ {
char * command_line; char * command_line;
...@@ -574,6 +584,23 @@ asmlinkage void __init start_kernel(void) ...@@ -574,6 +584,23 @@ asmlinkage void __init start_kernel(void)
setup_nr_cpu_ids(); setup_nr_cpu_ids();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
build_all_zonelists();
page_alloc_init();
printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
parse_early_param();
parse_args("Booting kernel", static_command_line, __start___param,
__stop___param - __start___param,
&unknown_bootoption);
/*
* These use large bootmem allocations and must precede
* kmem_cache_init()
*/
pidhash_init();
vfs_caches_init_early();
sort_main_extable();
trap_init();
mm_init();
/* /*
* Set up the scheduler prior starting any interrupts (such as the * Set up the scheduler prior starting any interrupts (such as the
* timer interrupt). Full topology setup happens at smp_init() * timer interrupt). Full topology setup happens at smp_init()
...@@ -585,25 +612,15 @@ asmlinkage void __init start_kernel(void) ...@@ -585,25 +612,15 @@ asmlinkage void __init start_kernel(void)
* fragile until we cpu_idle() for the first time. * fragile until we cpu_idle() for the first time.
*/ */
preempt_disable(); preempt_disable();
build_all_zonelists();
page_alloc_init();
printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
parse_early_param();
parse_args("Booting kernel", static_command_line, __start___param,
__stop___param - __start___param,
&unknown_bootoption);
if (!irqs_disabled()) { if (!irqs_disabled()) {
printk(KERN_WARNING "start_kernel(): bug: interrupts were " printk(KERN_WARNING "start_kernel(): bug: interrupts were "
"enabled *very* early, fixing it\n"); "enabled *very* early, fixing it\n");
local_irq_disable(); local_irq_disable();
} }
sort_main_extable();
trap_init();
rcu_init(); rcu_init();
/* init some links before init_ISA_irqs() */ /* init some links before init_ISA_irqs() */
early_irq_init(); early_irq_init();
init_IRQ(); init_IRQ();
pidhash_init();
init_timers(); init_timers();
hrtimers_init(); hrtimers_init();
softirq_init(); softirq_init();
...@@ -645,14 +662,10 @@ asmlinkage void __init start_kernel(void) ...@@ -645,14 +662,10 @@ asmlinkage void __init start_kernel(void)
initrd_start = 0; initrd_start = 0;
} }
#endif #endif
vmalloc_init();
vfs_caches_init_early();
cpuset_init_early(); cpuset_init_early();
page_cgroup_init(); page_cgroup_init();
mem_init();
enable_debug_pagealloc(); enable_debug_pagealloc();
cpu_hotplug_init(); cpu_hotplug_init();
kmem_cache_init();
kmemtrace_init(); kmemtrace_init();
debug_objects_mem_init(); debug_objects_mem_init();
idr_init_cache(); idr_init_cache();
......
...@@ -1857,7 +1857,7 @@ struct cgroup_subsys cpuset_subsys = { ...@@ -1857,7 +1857,7 @@ struct cgroup_subsys cpuset_subsys = {
int __init cpuset_init_early(void) int __init cpuset_init_early(void)
{ {
alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed); alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_NOWAIT);
top_cpuset.mems_generation = cpuset_mems_generation++; top_cpuset.mems_generation = cpuset_mems_generation++;
return 0; return 0;
......
...@@ -150,6 +150,7 @@ int __init early_irq_init(void) ...@@ -150,6 +150,7 @@ int __init early_irq_init(void)
{ {
struct irq_desc *desc; struct irq_desc *desc;
int legacy_count; int legacy_count;
int node;
int i; int i;
init_irq_default_affinity(); init_irq_default_affinity();
...@@ -160,20 +161,20 @@ int __init early_irq_init(void) ...@@ -160,20 +161,20 @@ int __init early_irq_init(void)
desc = irq_desc_legacy; desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy); legacy_count = ARRAY_SIZE(irq_desc_legacy);
node = first_online_node;
/* allocate irq_desc_ptrs array based on nr_irqs */ /* allocate irq_desc_ptrs array based on nr_irqs */
irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
/* allocate based on nr_cpu_ids */ /* allocate based on nr_cpu_ids */
/* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * sizeof(int), GFP_NOWAIT, node);
sizeof(int));
for (i = 0; i < legacy_count; i++) { for (i = 0; i < legacy_count; i++) {
desc[i].irq = i; desc[i].irq = i;
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
alloc_desc_masks(&desc[i], 0, true); alloc_desc_masks(&desc[i], node, true);
init_desc_masks(&desc[i]); init_desc_masks(&desc[i]);
irq_desc_ptrs[i] = desc + i; irq_desc_ptrs[i] = desc + i;
} }
......
...@@ -111,12 +111,6 @@ int __ref profile_init(void) ...@@ -111,12 +111,6 @@ int __ref profile_init(void)
/* only text is profiled */ /* only text is profiled */
prof_len = (_etext - _stext) >> prof_shift; prof_len = (_etext - _stext) >> prof_shift;
buffer_bytes = prof_len*sizeof(atomic_t); buffer_bytes = prof_len*sizeof(atomic_t);
if (!slab_is_available()) {
prof_buffer = alloc_bootmem(buffer_bytes);
alloc_bootmem_cpumask_var(&prof_cpu_mask);
cpumask_copy(prof_cpu_mask, cpu_possible_mask);
return 0;
}
if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
......
...@@ -68,7 +68,6 @@ ...@@ -68,7 +68,6 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
...@@ -7782,24 +7781,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) ...@@ -7782,24 +7781,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
{ {
gfp_t gfp = GFP_KERNEL;
memset(rd, 0, sizeof(*rd)); memset(rd, 0, sizeof(*rd));
if (bootmem) { if (bootmem)
alloc_bootmem_cpumask_var(&def_root_domain.span); gfp = GFP_NOWAIT;
alloc_bootmem_cpumask_var(&def_root_domain.online);
alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
cpupri_init(&rd->cpupri, true);
return 0;
}
if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) if (!alloc_cpumask_var(&rd->span, gfp))
goto out; goto out;
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) if (!alloc_cpumask_var(&rd->online, gfp))
goto free_span; goto free_span;
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) if (!alloc_cpumask_var(&rd->rto_mask, gfp))
goto free_online; goto free_online;
if (cpupri_init(&rd->cpupri, false) != 0) if (cpupri_init(&rd->cpupri, bootmem) != 0)
goto free_rto_mask; goto free_rto_mask;
return 0; return 0;
...@@ -9123,7 +9119,7 @@ void __init sched_init(void) ...@@ -9123,7 +9119,7 @@ void __init sched_init(void)
* we use alloc_bootmem(). * we use alloc_bootmem().
*/ */
if (alloc_size) { if (alloc_size) {
ptr = (unsigned long)alloc_bootmem(alloc_size); ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
init_task_group.se = (struct sched_entity **)ptr; init_task_group.se = (struct sched_entity **)ptr;
...@@ -9314,13 +9310,13 @@ void __init sched_init(void) ...@@ -9314,13 +9310,13 @@ void __init sched_init(void)
current->sched_class = &fair_sched_class; current->sched_class = &fair_sched_class;
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
alloc_bootmem_cpumask_var(&nohz_cpu_mask); alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
alloc_bootmem_cpumask_var(&nohz.cpu_mask); alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
alloc_bootmem_cpumask_var(&nohz.ilb_grp_nohz_mask); alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
#endif #endif
alloc_bootmem_cpumask_var(&cpu_isolated_map); alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */ #endif /* SMP */
scheduler_running = 1; scheduler_running = 1;
......
...@@ -154,8 +154,12 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) ...@@ -154,8 +154,12 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
*/ */
int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
{ {
gfp_t gfp = GFP_KERNEL;
int i; int i;
if (bootmem)
gfp = GFP_NOWAIT;
memset(cp, 0, sizeof(*cp)); memset(cp, 0, sizeof(*cp));
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
...@@ -163,9 +167,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) ...@@ -163,9 +167,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
spin_lock_init(&vec->lock); spin_lock_init(&vec->lock);
vec->count = 0; vec->count = 0;
if (bootmem) if (!zalloc_cpumask_var(&vec->mask, gfp))
alloc_bootmem_cpumask_var(&vec->mask);
else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
goto cleanup; goto cleanup;
} }
......
...@@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) ...@@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
*/ */
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{ {
if (likely(slab_is_available())) *mask = kmalloc_node(cpumask_size(), flags, node);
*mask = kmalloc_node(cpumask_size(), flags, node);
else {
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
printk(KERN_ERR
"=> alloc_cpumask_var: kmalloc not available!\n");
#endif
*mask = NULL;
}
#ifdef CONFIG_DEBUG_PER_CPU_MAPS #ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (!*mask) { if (!*mask) {
printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
......
...@@ -532,6 +532,9 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, ...@@ -532,6 +532,9 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
unsigned long size, unsigned long align, unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit) unsigned long goal, unsigned long limit)
{ {
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc(size, GFP_NOWAIT);
#ifdef CONFIG_HAVE_ARCH_BOOTMEM #ifdef CONFIG_HAVE_ARCH_BOOTMEM
bootmem_data_t *p_bdata; bootmem_data_t *p_bdata;
...@@ -662,6 +665,9 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, ...@@ -662,6 +665,9 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal) unsigned long align, unsigned long goal)
{ {
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
} }
...@@ -693,6 +699,9 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, ...@@ -693,6 +699,9 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
{ {
void *ptr; void *ptr;
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
if (ptr) if (ptr)
return ptr; return ptr;
...@@ -745,6 +754,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, ...@@ -745,6 +754,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal) unsigned long align, unsigned long goal)
{ {
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
return ___alloc_bootmem_node(pgdat->bdata, size, align, return ___alloc_bootmem_node(pgdat->bdata, size, align,
goal, ARCH_LOW_ADDRESS_LIMIT); goal, ARCH_LOW_ADDRESS_LIMIT);
} }
...@@ -47,6 +47,8 @@ static int __init alloc_node_page_cgroup(int nid) ...@@ -47,6 +47,8 @@ static int __init alloc_node_page_cgroup(int nid)
struct page_cgroup *base, *pc; struct page_cgroup *base, *pc;
unsigned long table_size; unsigned long table_size;
unsigned long start_pfn, nr_pages, index; unsigned long start_pfn, nr_pages, index;
struct page *page;
unsigned int order;
start_pfn = NODE_DATA(nid)->node_start_pfn; start_pfn = NODE_DATA(nid)->node_start_pfn;
nr_pages = NODE_DATA(nid)->node_spanned_pages; nr_pages = NODE_DATA(nid)->node_spanned_pages;
...@@ -55,11 +57,13 @@ static int __init alloc_node_page_cgroup(int nid) ...@@ -55,11 +57,13 @@ static int __init alloc_node_page_cgroup(int nid)
return 0; return 0;
table_size = sizeof(struct page_cgroup) * nr_pages; table_size = sizeof(struct page_cgroup) * nr_pages;
order = get_order(table_size);
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order);
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!page)
if (!base) page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order);
if (!page)
return -ENOMEM; return -ENOMEM;
base = page_address(page);
for (index = 0; index < nr_pages; index++) { for (index = 0; index < nr_pages; index++) {
pc = base + index; pc = base + index;
__init_page_cgroup(pc, start_pfn + index); __init_page_cgroup(pc, start_pfn + index);
......
This diff is collapsed.
...@@ -2557,13 +2557,16 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, ...@@ -2557,13 +2557,16 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
if (gfp_flags & SLUB_DMA) if (gfp_flags & SLUB_DMA)
flags = SLAB_CACHE_DMA; flags = SLAB_CACHE_DMA;
down_write(&slub_lock); /*
* This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slub_lock here.
*/
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL)) flags, NULL))
goto panic; goto panic;
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
up_write(&slub_lock);
if (sysfs_slab_add(s)) if (sysfs_slab_add(s))
goto panic; goto panic;
return s; return s;
...@@ -3021,7 +3024,7 @@ void __init kmem_cache_init(void) ...@@ -3021,7 +3024,7 @@ void __init kmem_cache_init(void)
* kmem_cache_open for slab_state == DOWN. * kmem_cache_open for slab_state == DOWN.
*/ */
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
sizeof(struct kmem_cache_node), GFP_KERNEL); sizeof(struct kmem_cache_node), GFP_NOWAIT);
kmalloc_caches[0].refcount = -1; kmalloc_caches[0].refcount = -1;
caches++; caches++;
...@@ -3034,16 +3037,16 @@ void __init kmem_cache_init(void) ...@@ -3034,16 +3037,16 @@ void __init kmem_cache_init(void)
/* Caches that are not of the two-to-the-power-of size */ /* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 64) { if (KMALLOC_MIN_SIZE <= 64) {
create_kmalloc_cache(&kmalloc_caches[1], create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_KERNEL); "kmalloc-96", 96, GFP_NOWAIT);
caches++; caches++;
create_kmalloc_cache(&kmalloc_caches[2], create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_KERNEL); "kmalloc-192", 192, GFP_NOWAIT);
caches++; caches++;
} }
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i], create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL); "kmalloc", 1 << i, GFP_NOWAIT);
caches++; caches++;
} }
...@@ -3080,7 +3083,7 @@ void __init kmem_cache_init(void) ...@@ -3080,7 +3083,7 @@ void __init kmem_cache_init(void)
/* Provide the correct kmalloc names now that the caches are up */ /* Provide the correct kmalloc names now that the caches are up */
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name = kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier); register_cpu_notifier(&slab_notifier);
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/bootmem.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <asm/atomic.h> #include <asm/atomic.h>
...@@ -1032,7 +1031,7 @@ void __init vmalloc_init(void) ...@@ -1032,7 +1031,7 @@ void __init vmalloc_init(void)
/* Import existing vmlist entries. */ /* Import existing vmlist entries. */
for (tmp = vmlist; tmp; tmp = tmp->next) { for (tmp = vmlist; tmp; tmp = tmp->next) {
va = alloc_bootmem(sizeof(struct vmap_area)); va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
va->flags = tmp->flags | VM_VM_AREA; va->flags = tmp->flags | VM_VM_AREA;
va->va_start = (unsigned long)tmp->addr; va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size; va->va_end = va->va_start + tmp->size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment