Commit 2f9f0854 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: memory: rename pages_per_huge_page to nr_pages

Since the callers are converted to use nr_pages naming, use it inside too.

Link: https://lkml.kernel.org/r/20240618091242.2140164-5-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 530dd992
......@@ -6387,23 +6387,23 @@ EXPORT_SYMBOL(__might_fault);
* cache lines hot.
*/
static inline int process_huge_page(
unsigned long addr_hint, unsigned int pages_per_huge_page,
unsigned long addr_hint, unsigned int nr_pages,
int (*process_subpage)(unsigned long addr, int idx, void *arg),
void *arg)
{
int i, n, base, l, ret;
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
/* Process target subpage last to keep its cache lines hot */
might_sleep();
n = (addr_hint - addr) / PAGE_SIZE;
if (2 * n <= pages_per_huge_page) {
if (2 * n <= nr_pages) {
/* If target subpage in first half of huge page */
base = 0;
l = n;
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
for (i = nr_pages - 1; i >= 2 * n; i--) {
cond_resched();
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
if (ret)
......@@ -6411,8 +6411,8 @@ static inline int process_huge_page(
}
} else {
/* If target subpage in second half of huge page */
base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
l = pages_per_huge_page - n;
base = nr_pages - 2 * (nr_pages - n);
l = nr_pages - n;
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
......@@ -6442,12 +6442,12 @@ static inline int process_huge_page(
}
static void clear_gigantic_page(struct folio *folio, unsigned long addr,
unsigned int pages_per_huge_page)
unsigned int nr_pages)
{
int i;
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
for (i = 0; i < nr_pages; i++) {
cond_resched();
clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
}
......@@ -6477,15 +6477,15 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
unsigned long addr,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
unsigned long addr,
struct vm_area_struct *vma,
unsigned int nr_pages)
{
int i;
struct page *dst_page;
struct page *src_page;
for (i = 0; i < pages_per_huge_page; i++) {
for (i = 0; i < nr_pages; i++) {
dst_page = folio_page(dst, i);
src_page = folio_page(src, i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment