Commit b90d77e5 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: Fix remap of arena.

The bpf arena logic didn't account for mremap operation. Add a refcnt for
multiple mmap events to prevent use-after-free in arena_vm_close.

Fixes: 31746031 ("bpf: Introduce bpf_arena.")
Reported-by: default avatarPengfei Xu <pengfei.xu@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarBarret Rhoden <brho@google.com>
Tested-by: default avatarPengfei Xu <pengfei.xu@intel.com>
Closes: https://lore.kernel.org/bpf/Zmuw29IhgyPNKnIM@xpf.sh.intel.com
Link: https://lore.kernel.org/bpf/20240617171812.76634-1-alexei.starovoitov@gmail.com
parent bfbcb2c9
...@@ -212,6 +212,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map) ...@@ -212,6 +212,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
struct vma_list { struct vma_list {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct list_head head; struct list_head head;
atomic_t mmap_count;
}; };
static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma) static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
...@@ -221,20 +222,30 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma) ...@@ -221,20 +222,30 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
vml = kmalloc(sizeof(*vml), GFP_KERNEL); vml = kmalloc(sizeof(*vml), GFP_KERNEL);
if (!vml) if (!vml)
return -ENOMEM; return -ENOMEM;
atomic_set(&vml->mmap_count, 1);
vma->vm_private_data = vml; vma->vm_private_data = vml;
vml->vma = vma; vml->vma = vma;
list_add(&vml->head, &arena->vma_list); list_add(&vml->head, &arena->vma_list);
return 0; return 0;
} }
static void arena_vm_open(struct vm_area_struct *vma)
{
struct vma_list *vml = vma->vm_private_data;
atomic_inc(&vml->mmap_count);
}
static void arena_vm_close(struct vm_area_struct *vma) static void arena_vm_close(struct vm_area_struct *vma)
{ {
struct bpf_map *map = vma->vm_file->private_data; struct bpf_map *map = vma->vm_file->private_data;
struct bpf_arena *arena = container_of(map, struct bpf_arena, map); struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
struct vma_list *vml; struct vma_list *vml = vma->vm_private_data;
if (!atomic_dec_and_test(&vml->mmap_count))
return;
guard(mutex)(&arena->lock); guard(mutex)(&arena->lock);
vml = vma->vm_private_data; /* update link list under lock */
list_del(&vml->head); list_del(&vml->head);
vma->vm_private_data = NULL; vma->vm_private_data = NULL;
kfree(vml); kfree(vml);
...@@ -287,6 +298,7 @@ static vm_fault_t arena_vm_fault(struct vm_fault *vmf) ...@@ -287,6 +298,7 @@ static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
} }
static const struct vm_operations_struct arena_vm_ops = { static const struct vm_operations_struct arena_vm_ops = {
.open = arena_vm_open,
.close = arena_vm_close, .close = arena_vm_close,
.fault = arena_vm_fault, .fault = arena_vm_fault,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment