Commit b12ce36a authored by Ben Gardon's avatar Ben Gardon Committed by Paolo Bonzini

kvm: Add memcg accounting to KVM allocations

There are many KVM kernel memory allocations which are tied to the life of
the VM process and should be charged to the VM process's cgroup. If the
allocations aren't tied to the process, the OOM killer will not know
that killing the process will free the associated kernel memory.
Add __GFP_ACCOUNT flags to many of the allocations which are not yet being
charged to the VM process's cgroup.

Tested:
	Ran all kvm-unit-tests on a 64 bit Haswell machine, the patch
	introduced no new failures.
	Ran a kernel memory accounting test which creates a VM to touch
	memory and then checks that the kernel memory allocated for the
	process is within certain bounds.
	With this patch we account for much more of the vmalloc and slab memory
	allocated for the VM.

There remain a few allocations which should be charged to the VM's
cgroup but are not. In they include:
        vcpu->run
        kvm->coalesced_mmio_ring
There allocations are unaccounted in this patch because they are mapped
to userspace, and accounting them to a cgroup causes problems. This
should be addressed in a future patch.
Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 359a6c3d
...@@ -144,7 +144,8 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, ...@@ -144,7 +144,8 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
if (zone->pio != 1 && zone->pio != 0) if (zone->pio != 1 && zone->pio != 0)
return -EINVAL; return -EINVAL;
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
GFP_KERNEL_ACCOUNT);
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
......
...@@ -297,7 +297,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) ...@@ -297,7 +297,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
if (!kvm_arch_intc_initialized(kvm)) if (!kvm_arch_intc_initialized(kvm))
return -EAGAIN; return -EAGAIN;
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
if (!irqfd) if (!irqfd)
return -ENOMEM; return -ENOMEM;
...@@ -345,7 +345,8 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) ...@@ -345,7 +345,8 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
} }
if (!irqfd->resampler) { if (!irqfd->resampler) {
resampler = kzalloc(sizeof(*resampler), GFP_KERNEL); resampler = kzalloc(sizeof(*resampler),
GFP_KERNEL_ACCOUNT);
if (!resampler) { if (!resampler) {
ret = -ENOMEM; ret = -ENOMEM;
mutex_unlock(&kvm->irqfds.resampler_lock); mutex_unlock(&kvm->irqfds.resampler_lock);
...@@ -797,7 +798,7 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm, ...@@ -797,7 +798,7 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
if (IS_ERR(eventfd)) if (IS_ERR(eventfd))
return PTR_ERR(eventfd); return PTR_ERR(eventfd);
p = kzalloc(sizeof(*p), GFP_KERNEL); p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
if (!p) { if (!p) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
......
...@@ -196,7 +196,7 @@ int kvm_set_irq_routing(struct kvm *kvm, ...@@ -196,7 +196,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
nr_rt_entries += 1; nr_rt_entries += 1;
new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)), new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)),
GFP_KERNEL); GFP_KERNEL_ACCOUNT);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
...@@ -208,7 +208,7 @@ int kvm_set_irq_routing(struct kvm *kvm, ...@@ -208,7 +208,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
for (i = 0; i < nr; ++i) { for (i = 0; i < nr; ++i) {
r = -ENOMEM; r = -ENOMEM;
e = kzalloc(sizeof(*e), GFP_KERNEL); e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT);
if (!e) if (!e)
goto out; goto out;
......
...@@ -525,7 +525,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void) ...@@ -525,7 +525,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
int i; int i;
struct kvm_memslots *slots; struct kvm_memslots *slots;
slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
if (!slots) if (!slots)
return NULL; return NULL;
...@@ -601,12 +601,12 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) ...@@ -601,12 +601,12 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
sizeof(*kvm->debugfs_stat_data), sizeof(*kvm->debugfs_stat_data),
GFP_KERNEL); GFP_KERNEL_ACCOUNT);
if (!kvm->debugfs_stat_data) if (!kvm->debugfs_stat_data)
return -ENOMEM; return -ENOMEM;
for (p = debugfs_entries; p->name; p++) { for (p = debugfs_entries; p->name; p++) {
stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL); stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
if (!stat_data) if (!stat_data)
return -ENOMEM; return -ENOMEM;
...@@ -671,7 +671,7 @@ static struct kvm *kvm_create_vm(unsigned long type) ...@@ -671,7 +671,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err_no_irq_srcu; goto out_err_no_irq_srcu;
for (i = 0; i < KVM_NR_BUSES; i++) { for (i = 0; i < KVM_NR_BUSES; i++) {
rcu_assign_pointer(kvm->buses[i], rcu_assign_pointer(kvm->buses[i],
kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL)); kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
if (!kvm->buses[i]) if (!kvm->buses[i])
goto out_err; goto out_err;
} }
...@@ -789,7 +789,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) ...@@ -789,7 +789,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{ {
unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL); memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
return -ENOMEM; return -ENOMEM;
...@@ -1018,7 +1018,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1018,7 +1018,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free; goto out_free;
} }
slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
if (!slots) if (!slots)
goto out_free; goto out_free;
memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
...@@ -2683,7 +2683,7 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -2683,7 +2683,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_regs *kvm_regs; struct kvm_regs *kvm_regs;
r = -ENOMEM; r = -ENOMEM;
kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
if (!kvm_regs) if (!kvm_regs)
goto out; goto out;
r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
...@@ -2711,7 +2711,8 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -2711,7 +2711,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
break; break;
} }
case KVM_GET_SREGS: { case KVM_GET_SREGS: {
kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
GFP_KERNEL_ACCOUNT);
r = -ENOMEM; r = -ENOMEM;
if (!kvm_sregs) if (!kvm_sregs)
goto out; goto out;
...@@ -2803,7 +2804,7 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -2803,7 +2804,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
break; break;
} }
case KVM_GET_FPU: { case KVM_GET_FPU: {
fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
r = -ENOMEM; r = -ENOMEM;
if (!fpu) if (!fpu)
goto out; goto out;
...@@ -2980,7 +2981,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm, ...@@ -2980,7 +2981,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
if (test) if (test)
return 0; return 0;
dev = kzalloc(sizeof(*dev), GFP_KERNEL); dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
...@@ -3715,7 +3716,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ...@@ -3715,7 +3716,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
return -ENOSPC; return -ENOSPC;
new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
GFP_KERNEL); GFP_KERNEL_ACCOUNT);
if (!new_bus) if (!new_bus)
return -ENOMEM; return -ENOMEM;
...@@ -3761,7 +3762,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -3761,7 +3762,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
return; return;
new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
GFP_KERNEL); GFP_KERNEL_ACCOUNT);
if (!new_bus) { if (!new_bus) {
pr_err("kvm: failed to shrink bus, removing it completely\n"); pr_err("kvm: failed to shrink bus, removing it completely\n");
goto broken; goto broken;
...@@ -4029,7 +4030,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) ...@@ -4029,7 +4030,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
active = kvm_active_vms; active = kvm_active_vms;
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
env = kzalloc(sizeof(*env), GFP_KERNEL); env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
if (!env) if (!env)
return; return;
...@@ -4045,7 +4046,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) ...@@ -4045,7 +4046,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
add_uevent_var(env, "PID=%d", kvm->userspace_pid); add_uevent_var(env, "PID=%d", kvm->userspace_pid);
if (kvm->debugfs_dentry) { if (kvm->debugfs_dentry) {
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
if (p) { if (p) {
tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
......
...@@ -219,7 +219,7 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) ...@@ -219,7 +219,7 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
} }
} }
kvg = kzalloc(sizeof(*kvg), GFP_KERNEL); kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
if (!kvg) { if (!kvg) {
mutex_unlock(&kv->lock); mutex_unlock(&kv->lock);
kvm_vfio_group_put_external_user(vfio_group); kvm_vfio_group_put_external_user(vfio_group);
...@@ -405,7 +405,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type) ...@@ -405,7 +405,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
if (tmp->ops == &kvm_vfio_ops) if (tmp->ops == &kvm_vfio_ops)
return -EBUSY; return -EBUSY;
kv = kzalloc(sizeof(*kv), GFP_KERNEL); kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
if (!kv) if (!kv)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment