Commit c06b1fca authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

mm/page_alloc.c: don't cache `current' in a local

It's old-fashioned and unneeded.

akpm:/usr/src/25> size mm/page_alloc.o
   text    data     bss     dec     hex filename
  39884 1241317   18808 1300009  13d629 mm/page_alloc.o (before)
  39838 1241317   18808 1299963  13d5fb mm/page_alloc.o (after)
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fd4a4663
...@@ -1809,15 +1809,14 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, ...@@ -1809,15 +1809,14 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
bool sync_migration) bool sync_migration)
{ {
struct page *page; struct page *page;
struct task_struct *tsk = current;
if (!order || compaction_deferred(preferred_zone)) if (!order || compaction_deferred(preferred_zone))
return NULL; return NULL;
tsk->flags |= PF_MEMALLOC; current->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
nodemask, sync_migration); nodemask, sync_migration);
tsk->flags &= ~PF_MEMALLOC; current->flags &= ~PF_MEMALLOC;
if (*did_some_progress != COMPACT_SKIPPED) { if (*did_some_progress != COMPACT_SKIPPED) {
/* Page migration frees to the PCP lists but we want merging */ /* Page migration frees to the PCP lists but we want merging */
...@@ -1869,23 +1868,22 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, ...@@ -1869,23 +1868,22 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
{ {
struct page *page = NULL; struct page *page = NULL;
struct reclaim_state reclaim_state; struct reclaim_state reclaim_state;
struct task_struct *p = current;
bool drained = false; bool drained = false;
cond_resched(); cond_resched();
/* We now go into synchronous reclaim */ /* We now go into synchronous reclaim */
cpuset_memory_pressure_bump(); cpuset_memory_pressure_bump();
p->flags |= PF_MEMALLOC; current->flags |= PF_MEMALLOC;
lockdep_set_current_reclaim_state(gfp_mask); lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0; reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state; current->reclaim_state = &reclaim_state;
*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
p->reclaim_state = NULL; current->reclaim_state = NULL;
lockdep_clear_current_reclaim_state(); lockdep_clear_current_reclaim_state();
p->flags &= ~PF_MEMALLOC; current->flags &= ~PF_MEMALLOC;
cond_resched(); cond_resched();
...@@ -1950,7 +1948,6 @@ void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, ...@@ -1950,7 +1948,6 @@ void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
static inline int static inline int
gfp_to_alloc_flags(gfp_t gfp_mask) gfp_to_alloc_flags(gfp_t gfp_mask)
{ {
struct task_struct *p = current;
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
const gfp_t wait = gfp_mask & __GFP_WAIT; const gfp_t wait = gfp_mask & __GFP_WAIT;
...@@ -1977,12 +1974,12 @@ gfp_to_alloc_flags(gfp_t gfp_mask) ...@@ -1977,12 +1974,12 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* See also cpuset_zone_allowed() comment in kernel/cpuset.c. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/ */
alloc_flags &= ~ALLOC_CPUSET; alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(p)) && !in_interrupt()) } else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER; alloc_flags |= ALLOC_HARDER;
if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
if (!in_interrupt() && if (!in_interrupt() &&
((p->flags & PF_MEMALLOC) || ((current->flags & PF_MEMALLOC) ||
unlikely(test_thread_flag(TIF_MEMDIE)))) unlikely(test_thread_flag(TIF_MEMDIE))))
alloc_flags |= ALLOC_NO_WATERMARKS; alloc_flags |= ALLOC_NO_WATERMARKS;
} }
...@@ -2001,7 +1998,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2001,7 +1998,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
int alloc_flags; int alloc_flags;
unsigned long pages_reclaimed = 0; unsigned long pages_reclaimed = 0;
unsigned long did_some_progress; unsigned long did_some_progress;
struct task_struct *p = current;
bool sync_migration = false; bool sync_migration = false;
/* /*
...@@ -2060,7 +2056,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2060,7 +2056,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto nopage; goto nopage;
/* Avoid recursion of direct reclaim */ /* Avoid recursion of direct reclaim */
if (p->flags & PF_MEMALLOC) if (current->flags & PF_MEMALLOC)
goto nopage; goto nopage;
/* Avoid allocations with no watermarks from looping endlessly */ /* Avoid allocations with no watermarks from looping endlessly */
...@@ -2153,7 +2149,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2153,7 +2149,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
printk(KERN_WARNING "%s: page allocation failure." printk(KERN_WARNING "%s: page allocation failure."
" order:%d, mode:0x%x\n", " order:%d, mode:0x%x\n",
p->comm, order, gfp_mask); current->comm, order, gfp_mask);
dump_stack(); dump_stack();
show_mem(); show_mem();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment