mm: Convert get_page_unless_zero() to return bool

atomic_add_unless() returns bool, so remove the widening casts to int
in page_ref_add_unless() and get_page_unless_zero().  This causes gcc
to produce slightly larger code in isolate_migratepages_block(), but
it's not clear that it's worse code.  Net +19 bytes of text.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 5816b3e6
...@@ -754,7 +754,7 @@ static inline int put_page_testzero(struct page *page) ...@@ -754,7 +754,7 @@ static inline int put_page_testzero(struct page *page)
* This can be called when MMU is off so it must not access * This can be called when MMU is off so it must not access
* any of the virtual mappings. * any of the virtual mappings.
*/ */
static inline int get_page_unless_zero(struct page *page) static inline bool get_page_unless_zero(struct page *page)
{ {
return page_ref_add_unless(page, 1, 0); return page_ref_add_unless(page, 1, 0);
} }
......
...@@ -161,9 +161,9 @@ static inline int page_ref_dec_return(struct page *page) ...@@ -161,9 +161,9 @@ static inline int page_ref_dec_return(struct page *page)
return ret; return ret;
} }
static inline int page_ref_add_unless(struct page *page, int nr, int u) static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{ {
int ret = atomic_add_unless(&page->_refcount, nr, u); bool ret = atomic_add_unless(&page->_refcount, nr, u);
if (page_ref_tracepoint_active(page_ref_mod_unless)) if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret); __page_ref_mod_unless(page, nr, ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment