Commit 6f6528a1 authored by Joe Perches's avatar Joe Perches Committed by Linus Torvalds

slub: use bool function return values of true/false not 1/0

Use the normal return values for bool functions
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 124dee09
...@@ -374,7 +374,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page ...@@ -374,7 +374,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
if (cmpxchg_double(&page->freelist, &page->counters, if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old, freelist_old, counters_old,
freelist_new, counters_new)) freelist_new, counters_new))
return 1; return true;
} else } else
#endif #endif
{ {
...@@ -384,7 +384,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page ...@@ -384,7 +384,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
page->freelist = freelist_new; page->freelist = freelist_new;
set_page_slub_counters(page, counters_new); set_page_slub_counters(page, counters_new);
slab_unlock(page); slab_unlock(page);
return 1; return true;
} }
slab_unlock(page); slab_unlock(page);
} }
...@@ -396,7 +396,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page ...@@ -396,7 +396,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
pr_info("%s %s: cmpxchg double redo ", n, s->name); pr_info("%s %s: cmpxchg double redo ", n, s->name);
#endif #endif
return 0; return false;
} }
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
...@@ -410,7 +410,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, ...@@ -410,7 +410,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
if (cmpxchg_double(&page->freelist, &page->counters, if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old, freelist_old, counters_old,
freelist_new, counters_new)) freelist_new, counters_new))
return 1; return true;
} else } else
#endif #endif
{ {
...@@ -424,7 +424,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, ...@@ -424,7 +424,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
set_page_slub_counters(page, counters_new); set_page_slub_counters(page, counters_new);
slab_unlock(page); slab_unlock(page);
local_irq_restore(flags); local_irq_restore(flags);
return 1; return true;
} }
slab_unlock(page); slab_unlock(page);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -437,7 +437,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, ...@@ -437,7 +437,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
pr_info("%s %s: cmpxchg double redo ", n, s->name); pr_info("%s %s: cmpxchg double redo ", n, s->name);
#endif #endif
return 0; return false;
} }
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment