Commit 927cbb47 authored by Hou Tao's avatar Hou Tao Committed by Andrii Nakryiko

libbpf: Handle size overflow for ringbuf mmap

The maximum size of ringbuf is 2GB on x86-64 host, so 2 * max_entries
will overflow u32 when mapping producer page and data pages. Only
casting max_entries to size_t is not enough, because for 32-bits
application on 64-bits kernel the size of read-only mmap region
also could overflow size_t.

So fixing it by casting the size of read-only mmap region into a __u64
and checking whether or not there will be overflow during mmap.

Fixes: bf99c936 ("libbpf: Add BPF ring buffer support")
Signed-off-by: default avatarHou Tao <houtao1@huawei.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221116072351.1168938-3-houtao@huaweicloud.com
parent 689eb2f1
...@@ -77,6 +77,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, ...@@ -77,6 +77,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
__u32 len = sizeof(info); __u32 len = sizeof(info);
struct epoll_event *e; struct epoll_event *e;
struct ring *r; struct ring *r;
__u64 mmap_sz;
void *tmp; void *tmp;
int err; int err;
...@@ -115,8 +116,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, ...@@ -115,8 +116,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
r->mask = info.max_entries - 1; r->mask = info.max_entries - 1;
/* Map writable consumer page */ /* Map writable consumer page */
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
map_fd, 0);
if (tmp == MAP_FAILED) { if (tmp == MAP_FAILED) {
err = -errno; err = -errno;
pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n", pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
...@@ -129,8 +129,12 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, ...@@ -129,8 +129,12 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
* data size to allow simple reading of samples that wrap around the * data size to allow simple reading of samples that wrap around the
* end of a ring buffer. See kernel implementation for details. * end of a ring buffer. See kernel implementation for details.
* */ * */
tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ, mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
MAP_SHARED, map_fd, rb->page_size); if (mmap_sz != (__u64)(size_t)mmap_sz) {
pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
return libbpf_err(-E2BIG);
}
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
if (tmp == MAP_FAILED) { if (tmp == MAP_FAILED) {
err = -errno; err = -errno;
ringbuf_unmap_ring(rb, r); ringbuf_unmap_ring(rb, r);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment