Commit 57c5a5b3 authored by David Hildenbrand's avatar David Hildenbrand Committed by Michael S. Tsirkin

virtio-mem: prepare fake page onlining code for granularity smaller than MAX_ORDER - 1

Let's prepare our fake page onlining code for subblock size smaller than
MAX_ORDER - 1: we might get called for ranges not covering properly
aligned MAX_ORDER - 1 pages. We have to detect the order to use
dynamically.
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20211126134209.17332-3-david@redhat.comSigned-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Reviewed-by: default avatarEric Ren <renzhengeek@gmail.com>
parent 6639032a
...@@ -1121,15 +1121,18 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn, ...@@ -1121,15 +1121,18 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
*/ */
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{ {
const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES; unsigned long order = MAX_ORDER - 1;
unsigned long i; unsigned long i;
/* /*
* We are always called at least with MAX_ORDER_NR_PAGES * We might get called for ranges that don't cover properly aligned
* granularity/alignment (e.g., the way subblocks work). All pages * MAX_ORDER - 1 pages; however, we can only online properly aligned
* inside such a block are alike. * pages with an order of MAX_ORDER - 1 at maximum.
*/ */
for (i = 0; i < nr_pages; i += max_nr_pages) { while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
order--;
for (i = 0; i < nr_pages; i += 1 << order) {
struct page *page = pfn_to_page(pfn + i); struct page *page = pfn_to_page(pfn + i);
/* /*
...@@ -1139,14 +1142,12 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) ...@@ -1139,14 +1142,12 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
* alike. * alike.
*/ */
if (PageDirty(page)) { if (PageDirty(page)) {
virtio_mem_clear_fake_offline(pfn + i, max_nr_pages, virtio_mem_clear_fake_offline(pfn + i, 1 << order, false);
false); generic_online_page(page, order);
generic_online_page(page, MAX_ORDER - 1);
} else { } else {
virtio_mem_clear_fake_offline(pfn + i, max_nr_pages, virtio_mem_clear_fake_offline(pfn + i, 1 << order, true);
true); free_contig_range(pfn + i, 1 << order);
free_contig_range(pfn + i, max_nr_pages); adjust_managed_page_count(page, 1 << order);
adjust_managed_page_count(page, max_nr_pages);
} }
} }
} }
...@@ -2477,7 +2478,6 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm) ...@@ -2477,7 +2478,6 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
/* /*
* We want subblocks to span at least MAX_ORDER_NR_PAGES and * We want subblocks to span at least MAX_ORDER_NR_PAGES and
* pageblock_nr_pages pages. This: * pageblock_nr_pages pages. This:
* - Simplifies our fake page onlining code (virtio_mem_fake_online).
* - Is required for now for alloc_contig_range() to work reliably - * - Is required for now for alloc_contig_range() to work reliably -
* it doesn't properly handle smaller granularity on ZONE_NORMAL. * it doesn't properly handle smaller granularity on ZONE_NORMAL.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment