Commit 6ecb0fc6 authored by Baoquan He's avatar Baoquan He Committed by Linus Torvalds

mm/sparse.c: move subsection_map related functions together

No functional change.

[bhe@redhat.com: move functions into CONFIG_MEMORY_HOTPLUG ifdeffery scope]
  Link: http://lkml.kernel.org/r/20200316045804.GC3486@MiWiFi-R3L-srvSigned-off-by: default avatarBaoquan He <bhe@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Link: http://lkml.kernel.org/r/20200312124414.439-6-bhe@redhat.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 95a5a34d
...@@ -666,6 +666,55 @@ static void free_map_bootmem(struct page *memmap) ...@@ -666,6 +666,55 @@ static void free_map_bootmem(struct page *memmap)
vmemmap_free(start, end, NULL); vmemmap_free(start, end, NULL);
} }
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
struct mem_section *ms = __pfn_to_section(pfn);
unsigned long *subsection_map = ms->usage
? &ms->usage->subsection_map[0] : NULL;
subsection_mask_set(map, pfn, nr_pages);
if (subsection_map)
bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
"section already deactivated (%#lx + %ld)\n",
pfn, nr_pages))
return -EINVAL;
bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
return 0;
}
static bool is_subsection_map_empty(struct mem_section *ms)
{
return bitmap_empty(&ms->usage->subsection_map[0],
SUBSECTIONS_PER_SECTION);
}
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
struct mem_section *ms = __pfn_to_section(pfn);
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
unsigned long *subsection_map;
int rc = 0;
subsection_mask_set(map, pfn, nr_pages);
subsection_map = &ms->usage->subsection_map[0];
if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
rc = -EINVAL;
else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
rc = -EEXIST;
else
bitmap_or(subsection_map, map, subsection_map,
SUBSECTIONS_PER_SECTION);
return rc;
}
#else #else
struct page * __meminit populate_section_memmap(unsigned long pfn, struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap) unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
...@@ -709,46 +758,22 @@ static void free_map_bootmem(struct page *memmap) ...@@ -709,46 +758,22 @@ static void free_map_bootmem(struct page *memmap)
put_page_bootmem(page); put_page_bootmem(page);
} }
} }
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
{ {
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
struct mem_section *ms = __pfn_to_section(pfn);
unsigned long *subsection_map = ms->usage
? &ms->usage->subsection_map[0] : NULL;
subsection_mask_set(map, pfn, nr_pages);
if (subsection_map)
bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
"section already deactivated (%#lx + %ld)\n",
pfn, nr_pages))
return -EINVAL;
bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
return 0; return 0;
} }
static bool is_subsection_map_empty(struct mem_section *ms) static bool is_subsection_map_empty(struct mem_section *ms)
{ {
return bitmap_empty(&ms->usage->subsection_map[0], return true;
SUBSECTIONS_PER_SECTION);
}
#else
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
return 0;
} }
static bool is_subsection_map_empty(struct mem_section *ms) static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{ {
return true; return 0;
} }
#endif #endif /* CONFIG_SPARSEMEM_VMEMMAP */
/* /*
* To deactivate a memory region, there are 3 cases to handle across * To deactivate a memory region, there are 3 cases to handle across
...@@ -810,35 +835,6 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages, ...@@ -810,35 +835,6 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
ms->section_mem_map = (unsigned long)NULL; ms->section_mem_map = (unsigned long)NULL;
} }
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
struct mem_section *ms = __pfn_to_section(pfn);
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
unsigned long *subsection_map;
int rc = 0;
subsection_mask_set(map, pfn, nr_pages);
subsection_map = &ms->usage->subsection_map[0];
if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
rc = -EINVAL;
else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
rc = -EEXIST;
else
bitmap_or(subsection_map, map, subsection_map,
SUBSECTIONS_PER_SECTION);
return rc;
}
#else
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
return 0;
}
#endif
static struct page * __meminit section_activate(int nid, unsigned long pfn, static struct page * __meminit section_activate(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap) unsigned long nr_pages, struct vmem_altmap *altmap)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment