Commit 8e5c921c authored by David Hildenbrand's avatar David Hildenbrand Committed by Michael S. Tsirkin

virtio-mem: Allow to offline partially unplugged memory blocks

Dropping the reference count of PageOffline() pages during MEM_GOING_ONLINE
allows offlining code to skip them. However, we also have to clear
PG_reserved, because PG_reserved pages get detected as unmovable right
away. Take care of restoring the reference count when offlining is
canceled.

Clarify why we don't have to perform any action when unloading the
driver. Also, let's add a warning if anybody is still holding a
reference to unplugged pages when offlining.
Tested-by: default avatarPankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20200507140139.17083-8-david@redhat.comSigned-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent aa218795
...@@ -572,6 +572,57 @@ static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id, ...@@ -572,6 +572,57 @@ static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id,
virtio_mem_retry(vm); virtio_mem_retry(vm);
} }
static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
unsigned long mb_id)
{
const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
struct page *page;
unsigned long pfn;
int sb_id, i;
for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
continue;
/*
* Drop our reference to the pages so the memory can get
* offlined and add the unplugged pages to the managed
* page counters (so offlining code can correctly subtract
* them again).
*/
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->subblock_size);
adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(pfn + i);
if (WARN_ON(!page_ref_dec_and_test(page)))
dump_page(page, "unplugged page referenced");
}
}
}
static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
unsigned long mb_id)
{
const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
unsigned long pfn;
int sb_id, i;
for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
continue;
/*
* Get the reference we dropped when going offline and
* subtract the unplugged pages from the managed page
* counters.
*/
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->subblock_size);
adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
for (i = 0; i < nr_pages; i++)
page_ref_inc(pfn_to_page(pfn + i));
}
}
/* /*
* This callback will either be called synchronously from add_memory() or * This callback will either be called synchronously from add_memory() or
* asynchronously (e.g., triggered via user space). We have to be careful * asynchronously (e.g., triggered via user space). We have to be careful
...@@ -618,6 +669,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, ...@@ -618,6 +669,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
break; break;
} }
vm->hotplug_active = true; vm->hotplug_active = true;
virtio_mem_notify_going_offline(vm, mb_id);
break; break;
case MEM_GOING_ONLINE: case MEM_GOING_ONLINE:
mutex_lock(&vm->hotplug_mutex); mutex_lock(&vm->hotplug_mutex);
...@@ -642,6 +694,12 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, ...@@ -642,6 +694,12 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
mutex_unlock(&vm->hotplug_mutex); mutex_unlock(&vm->hotplug_mutex);
break; break;
case MEM_CANCEL_OFFLINE: case MEM_CANCEL_OFFLINE:
if (!vm->hotplug_active)
break;
virtio_mem_notify_cancel_offline(vm, mb_id);
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_CANCEL_ONLINE: case MEM_CANCEL_ONLINE:
if (!vm->hotplug_active) if (!vm->hotplug_active)
break; break;
...@@ -668,8 +726,11 @@ static void virtio_mem_set_fake_offline(unsigned long pfn, ...@@ -668,8 +726,11 @@ static void virtio_mem_set_fake_offline(unsigned long pfn,
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
__SetPageOffline(page); __SetPageOffline(page);
if (!onlined) if (!onlined) {
SetPageDirty(page); SetPageDirty(page);
/* FIXME: remove after cleanups */
ClearPageReserved(page);
}
} }
} }
...@@ -1722,6 +1783,11 @@ static void virtio_mem_remove(struct virtio_device *vdev) ...@@ -1722,6 +1783,11 @@ static void virtio_mem_remove(struct virtio_device *vdev)
BUG_ON(rc); BUG_ON(rc);
virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED); virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
} }
/*
* After we unregistered our callbacks, user space can no longer
* offline partially plugged online memory blocks. No need to worry
* about them.
*/
/* unregister callbacks */ /* unregister callbacks */
unregister_virtio_mem_device(vm); unregister_virtio_mem_device(vm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment