Commit 91fd8b95 authored by Zhou Chengming's avatar Zhou Chengming Committed by Linus Torvalds

make __section_nr() more efficient

When CONFIG_SPARSEMEM_EXTREME is disabled, __section_nr can get the
section number with a subtraction directly.

Link: http://lkml.kernel.org/r/1468988310-11560-1-git-send-email-zhouchengming1@huawei.comSigned-off-by: default avatarZhou Chengming <zhouchengming1@huawei.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: Li Bin <huawei.libin@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 98c42d94
...@@ -100,11 +100,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid) ...@@ -100,11 +100,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid)
} }
#endif #endif
/* #ifdef CONFIG_SPARSEMEM_EXTREME
* Although written for the SPARSEMEM_EXTREME case, this happens
* to also work for the flat array case because
* NR_SECTION_ROOTS==NR_MEM_SECTIONS.
*/
int __section_nr(struct mem_section* ms) int __section_nr(struct mem_section* ms)
{ {
unsigned long root_nr; unsigned long root_nr;
...@@ -123,6 +119,12 @@ int __section_nr(struct mem_section* ms) ...@@ -123,6 +119,12 @@ int __section_nr(struct mem_section* ms)
return (root_nr * SECTIONS_PER_ROOT) + (ms - root); return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
} }
#else
int __section_nr(struct mem_section* ms)
{
return (int)(ms - mem_section[0]);
}
#endif
/* /*
* During early boot, before section_mem_map is used for an actual * During early boot, before section_mem_map is used for an actual
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment