Commit 276aeee1 authored by yanghui's avatar yanghui Committed by Linus Torvalds

mm/mempolicy: fix a race between offset_il_node and mpol_rebind_task

Servers happened below panic:

  Kernel version:5.4.56
  BUG: unable to handle page fault for address: 0000000000002c48
  RIP: 0010:__next_zones_zonelist+0x1d/0x40
  Call Trace:
    __alloc_pages_nodemask+0x277/0x310
    alloc_page_interleave+0x13/0x70
    handle_mm_fault+0xf99/0x1390
    __do_page_fault+0x288/0x500
    do_page_fault+0x30/0x110
    page_fault+0x3e/0x50

The reason for the panic is that MAX_NUMNODES is passed in the third
parameter in __alloc_pages_nodemask(preferred_nid).  So access to
zonelist->zoneref->zone_idx in __next_zones_zonelist will cause a panic.

In offset_il_node(), first_node() returns nid from pol->v.nodes, after
this other threads may chang pol->v.nodes before next_node().  This race
condition will let next_node return MAX_NUMNODES.  So put pol->nodes in
a local variable.

The race condition is between offset_il_node and cpuset_change_task_nodemask:

  CPU0:                                     CPU1:
  alloc_pages_vma()
    interleave_nid(pol,)
      offset_il_node(pol,)
        first_node(pol->v.nodes)            cpuset_change_task_nodemask
                        //nodes==0xc          mpol_rebind_task
                                                mpol_rebind_policy
                                                  mpol_rebind_nodemask(pol,nodes)
                        //nodes==0x3
        next_node(nid, pol->v.nodes)//return MAX_NUMNODES

Link: https://lkml.kernel.org/r/20210906034658.48721-1-yanghui.def@bytedance.comSigned-off-by: default avataryanghui <yanghui.def@bytedance.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79d37050
...@@ -1979,17 +1979,26 @@ unsigned int mempolicy_slab_node(void) ...@@ -1979,17 +1979,26 @@ unsigned int mempolicy_slab_node(void)
*/ */
static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
{ {
unsigned nnodes = nodes_weight(pol->nodes); nodemask_t nodemask = pol->nodes;
unsigned target; unsigned int target, nnodes;
int i; int i;
int nid; int nid;
/*
* The barrier will stabilize the nodemask in a register or on
* the stack so that it will stop changing under the code.
*
* Between first_node() and next_node(), pol->nodes could be changed
* by other threads. So we put pol->nodes in a local stack.
*/
barrier();
nnodes = nodes_weight(nodemask);
if (!nnodes) if (!nnodes)
return numa_node_id(); return numa_node_id();
target = (unsigned int)n % nnodes; target = (unsigned int)n % nnodes;
nid = first_node(pol->nodes); nid = first_node(nodemask);
for (i = 0; i < target; i++) for (i = 0; i < target; i++)
nid = next_node(nid, pol->nodes); nid = next_node(nid, nodemask);
return nid; return nid;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment