Commit c31a910c authored by Hyeonggon Yoo's avatar Hyeonggon Yoo Committed by Vlastimil Babka

mm/slab: move NUMA-related code to __do_cache_alloc()

To implement slab_alloc_node() independent of NUMA configuration,
move NUMA fallback/alternate allocation code into __do_cache_alloc().

One functional change here is not to check availability of node
when allocating from local node.
Signed-off-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 1c23f9e6
...@@ -3180,13 +3180,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3180,13 +3180,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
return obj ? obj : fallback_alloc(cachep, flags); return obj ? obj : fallback_alloc(cachep, flags);
} }
static void *__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid);
static __always_inline void * static __always_inline void *
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size, slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
unsigned long caller) unsigned long caller)
{ {
unsigned long save_flags; unsigned long save_flags;
void *ptr; void *ptr;
int slab_node = numa_mem_id();
struct obj_cgroup *objcg = NULL; struct obj_cgroup *objcg = NULL;
bool init = false; bool init = false;
...@@ -3200,30 +3201,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ ...@@ -3200,30 +3201,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
goto out_hooks; goto out_hooks;
local_irq_save(save_flags); local_irq_save(save_flags);
ptr = __do_cache_alloc(cachep, flags, nodeid);
if (nodeid == NUMA_NO_NODE)
nodeid = slab_node;
if (unlikely(!get_node(cachep, nodeid))) {
/* Node not bootstrapped yet */
ptr = fallback_alloc(cachep, flags);
goto out;
}
if (nodeid == slab_node) {
/*
* Use the locally cached objects if possible.
* However ____cache_alloc does not allow fallback
* to other nodes. It may fail while we still have
* objects on other nodes available.
*/
ptr = ____cache_alloc(cachep, flags);
if (ptr)
goto out;
}
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
local_irq_restore(save_flags); local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
init = slab_want_init_on_alloc(flags, cachep); init = slab_want_init_on_alloc(flags, cachep);
...@@ -3234,31 +3212,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ ...@@ -3234,31 +3212,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
} }
static __always_inline void * static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
void *objp; void *objp = NULL;
int slab_node = numa_mem_id();
if (current->mempolicy || cpuset_do_slab_mem_spread()) { if (nodeid == NUMA_NO_NODE) {
objp = alternate_node_alloc(cache, flags); if (current->mempolicy || cpuset_do_slab_mem_spread()) {
if (objp) objp = alternate_node_alloc(cachep, flags);
goto out; if (objp)
goto out;
}
/*
* Use the locally cached objects if possible.
* However ____cache_alloc does not allow fallback
* to other nodes. It may fail while we still have
* objects on other nodes available.
*/
objp = ____cache_alloc(cachep, flags);
nodeid = slab_node;
} else if (nodeid == slab_node) {
objp = ____cache_alloc(cachep, flags);
} else if (!get_node(cachep, nodeid)) {
/* Node not bootstrapped yet */
objp = fallback_alloc(cachep, flags);
goto out;
} }
objp = ____cache_alloc(cache, flags);
/* /*
* We may just have run out of memory on the local node. * We may just have run out of memory on the local node.
* ____cache_alloc_node() knows how to locate memory on other nodes * ____cache_alloc_node() knows how to locate memory on other nodes
*/ */
if (!objp) if (!objp)
objp = ____cache_alloc_node(cache, flags, numa_mem_id()); objp = ____cache_alloc_node(cachep, flags, nodeid);
out: out:
return objp; return objp;
} }
#else #else
static __always_inline void * static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused)
{ {
return ____cache_alloc(cachep, flags); return ____cache_alloc(cachep, flags);
} }
...@@ -3284,7 +3277,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, ...@@ -3284,7 +3277,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
goto out; goto out;
local_irq_save(save_flags); local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags); objp = __do_cache_alloc(cachep, flags, NUMA_NO_NODE);
local_irq_restore(save_flags); local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp); prefetchw(objp);
...@@ -3521,7 +3514,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -3521,7 +3514,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
local_irq_disable(); local_irq_disable();
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags); void *objp = kfence_alloc(s, s->object_size, flags) ?:
__do_cache_alloc(s, flags, NUMA_NO_NODE);
if (unlikely(!objp)) if (unlikely(!objp))
goto error; goto error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment