Commit d0e0ac97 authored by Chen Gang's avatar Chen Gang Committed by Pekka Enberg

mm/slub: beautify code for 80 column limitation and tab alignment

Be sure of 80 column limitation for both code and comments.

Correct tab alignment for 'if-else' statement.
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarChen Gang <gang.chen@asianux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent e35e1a97
...@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page ...@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
#endif #endif
{ {
slab_lock(page); slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) { if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new; page->freelist = freelist_new;
page->counters = counters_new; page->counters = counters_new;
slab_unlock(page); slab_unlock(page);
...@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, ...@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
local_irq_save(flags); local_irq_save(flags);
slab_lock(page); slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) { if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new; page->freelist = freelist_new;
page->counters = counters_new; page->counters = counters_new;
slab_unlock(page); slab_unlock(page);
...@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object) ...@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
static void print_page_info(struct page *page) static void print_page_info(struct page *page)
{ {
printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", printk(KERN_ERR
page, page->objects, page->inuse, page->freelist, page->flags); "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
page, page->objects, page->inuse, page->freelist, page->flags);
} }
...@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page, ...@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
print_trailer(s, page, object); print_trailer(s, page, object);
} }
static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) static void slab_err(struct kmem_cache *s, struct page *page,
const char *fmt, ...)
{ {
va_list args; va_list args;
char buf[100]; char buf[100];
...@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page, ...@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
} else { } else {
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
check_bytes_and_report(s, page, p, "Alignment padding", check_bytes_and_report(s, page, p, "Alignment padding",
endobject, POISON_INUSE, s->inuse - s->object_size); endobject, POISON_INUSE,
s->inuse - s->object_size);
} }
} }
...@@ -918,7 +923,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, ...@@ -918,7 +923,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
page->freelist); page->freelist);
if (!alloc) if (!alloc)
print_section("Object ", (void *)object, s->object_size); print_section("Object ", (void *)object,
s->object_size);
dump_stack(); dump_stack();
} }
...@@ -937,7 +943,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) ...@@ -937,7 +943,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
return should_failslab(s->object_size, flags, s->flags); return should_failslab(s->object_size, flags, s->flags);
} }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) static inline void slab_post_alloc_hook(struct kmem_cache *s,
gfp_t flags, void *object)
{ {
flags &= gfp_allowed_mask; flags &= gfp_allowed_mask;
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
...@@ -1039,7 +1046,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, ...@@ -1039,7 +1046,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object); init_tracking(s, object);
} }
static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, static noinline int alloc_debug_processing(struct kmem_cache *s,
struct page *page,
void *object, unsigned long addr) void *object, unsigned long addr)
{ {
if (!check_slab(s, page)) if (!check_slab(s, page))
...@@ -1743,7 +1751,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s) ...@@ -1743,7 +1751,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
/* /*
* Remove the cpu slab * Remove the cpu slab
*/ */
static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist) static void deactivate_slab(struct kmem_cache *s, struct page *page,
void *freelist)
{ {
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
...@@ -2002,7 +2011,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) ...@@ -2002,7 +2011,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->pobjects = pobjects; page->pobjects = pobjects;
page->next = oldpage; page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
!= oldpage);
#endif #endif
} }
...@@ -2172,8 +2182,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) ...@@ -2172,8 +2182,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
} }
/* /*
* Check the page->freelist of a page and either transfer the freelist to the per cpu freelist * Check the page->freelist of a page and either transfer the freelist to the
* or deactivate the page. * per cpu freelist or deactivate the page.
* *
* The page is still frozen if the return value is not NULL. * The page is still frozen if the return value is not NULL.
* *
...@@ -2317,7 +2327,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2317,7 +2327,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto load_freelist; goto load_freelist;
/* Only entered in the debug case */ /* Only entered in the debug case */
if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr)) if (kmem_cache_debug(s) &&
!alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */ goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, page, get_freepointer(s, freelist)); deactivate_slab(s, page, get_freepointer(s, freelist));
...@@ -2385,13 +2396,15 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -2385,13 +2396,15 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
* The cmpxchg will only match if there was no additional * The cmpxchg will only match if there was no additional
* operation and if we are on the right processor. * operation and if we are on the right processor.
* *
* The cmpxchg does the following atomically (without lock semantics!) * The cmpxchg does the following atomically (without lock
* semantics!)
* 1. Relocate first pointer to the current per cpu area. * 1. Relocate first pointer to the current per cpu area.
* 2. Verify that tid and freelist have not been changed * 2. Verify that tid and freelist have not been changed
* 3. If they were not changed replace tid and freelist * 3. If they were not changed replace tid and freelist
* *
* Since this is without lock semantics the protection is only against * Since this is without lock semantics the protection is only
* code executing on this cpu *not* from access by other cpus. * against code executing on this cpu *not* from access by
* other cpus.
*/ */
if (unlikely(!this_cpu_cmpxchg_double( if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid, s->cpu_slab->freelist, s->cpu_slab->tid,
...@@ -2423,7 +2436,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) ...@@ -2423,7 +2436,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{ {
void *ret = slab_alloc(s, gfpflags, _RET_IP_); void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
s->size, gfpflags);
return ret; return ret;
} }
...@@ -2515,8 +2529,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2515,8 +2529,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
if (kmem_cache_has_cpu_partial(s) && !prior) if (kmem_cache_has_cpu_partial(s) && !prior)
/* /*
* Slab was on no list before and will be partially empty * Slab was on no list before and will be
* We can defer the list move and instead freeze it. * partially empty
* We can defer the list move and instead
* freeze it.
*/ */
new.frozen = 1; new.frozen = 1;
...@@ -3074,8 +3090,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) ...@@ -3074,8 +3090,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
* A) The number of objects from per cpu partial slabs dumped to the * A) The number of objects from per cpu partial slabs dumped to the
* per node list when we reach the limit. * per node list when we reach the limit.
* B) The number of objects in cpu partial slabs to extract from the * B) The number of objects in cpu partial slabs to extract from the
* per node list when we run out of per cpu objects. We only fetch 50% * per node list when we run out of per cpu objects. We only fetch
* to keep some capacity around for frees. * 50% to keep some capacity around for frees.
*/ */
if (!kmem_cache_has_cpu_partial(s)) if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0; s->cpu_partial = 0;
...@@ -3102,8 +3118,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) ...@@ -3102,8 +3118,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u " panic("Cannot create slab %s size=%lu realsize=%u "
"order=%u offset=%u flags=%lx\n", "order=%u offset=%u flags=%lx\n",
s->name, (unsigned long)s->size, s->size, oo_order(s->oo), s->name, (unsigned long)s->size, s->size,
s->offset, flags); oo_order(s->oo), s->offset, flags);
return -EINVAL; return -EINVAL;
} }
...@@ -3341,7 +3357,8 @@ bool verify_mem_not_deleted(const void *x) ...@@ -3341,7 +3357,8 @@ bool verify_mem_not_deleted(const void *x)
slab_lock(page); slab_lock(page);
if (on_freelist(page->slab_cache, page, object)) { if (on_freelist(page->slab_cache, page, object)) {
object_err(page->slab_cache, page, object, "Object is on free-list"); object_err(page->slab_cache, page, object,
"Object is on free-list");
rv = false; rv = false;
} else { } else {
rv = true; rv = true;
...@@ -4165,15 +4182,17 @@ static int list_locations(struct kmem_cache *s, char *buf, ...@@ -4165,15 +4182,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
!cpumask_empty(to_cpumask(l->cpus)) && !cpumask_empty(to_cpumask(l->cpus)) &&
len < PAGE_SIZE - 60) { len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus="); len += sprintf(buf + len, " cpus=");
len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, len += cpulist_scnprintf(buf + len,
PAGE_SIZE - len - 50,
to_cpumask(l->cpus)); to_cpumask(l->cpus));
} }
if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60) { len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " nodes="); len += sprintf(buf + len, " nodes=");
len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, len += nodelist_scnprintf(buf + len,
l->nodes); PAGE_SIZE - len - 50,
l->nodes);
} }
len += sprintf(buf + len, "\n"); len += sprintf(buf + len, "\n");
...@@ -4280,7 +4299,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4280,7 +4299,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
cpu);
int node; int node;
struct page *page; struct page *page;
...@@ -4314,12 +4334,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4314,12 +4334,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node); struct kmem_cache_node *n = get_node(s, node);
if (flags & SO_TOTAL) if (flags & SO_TOTAL)
x = atomic_long_read(&n->total_objects); x = atomic_long_read(&n->total_objects);
else if (flags & SO_OBJECTS) else if (flags & SO_OBJECTS)
x = atomic_long_read(&n->total_objects) - x = atomic_long_read(&n->total_objects) -
count_partial(n, count_free); count_partial(n, count_free);
else else
x = atomic_long_read(&n->nr_slabs); x = atomic_long_read(&n->nr_slabs);
total += x; total += x;
...@@ -5135,7 +5154,8 @@ static char *create_unique_id(struct kmem_cache *s) ...@@ -5135,7 +5154,8 @@ static char *create_unique_id(struct kmem_cache *s)
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
if (!is_root_cache(s)) if (!is_root_cache(s))
p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg)); p += sprintf(p, "-%08d",
memcg_cache_id(s->memcg_params->memcg));
#endif #endif
BUG_ON(p > name + ID_STR_LENGTH - 1); BUG_ON(p > name + ID_STR_LENGTH - 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment