Commit 2467cda1 authored by Beau Belgrave's avatar Beau Belgrave Committed by Steven Rostedt (Google)

user_events: Validate user payloads for size and null termination

Add validation to ensure data is at or greater than the min size for the
fields of the event. If a dynamic array is used and is a type of char,
ensure null termination of the array exists.

Link: https://lkml.kernel.org/r/20220118204326.2169-7-beaub@linux.microsoft.comAcked-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarBeau Belgrave <beaub@linux.microsoft.com>
Signed-off-by: default avatarSteven Rostedt (Google) <rostedt@goodmis.org>
parent 0279400a
...@@ -64,9 +64,11 @@ struct user_event { ...@@ -64,9 +64,11 @@ struct user_event {
struct dyn_event devent; struct dyn_event devent;
struct hlist_node node; struct hlist_node node;
struct list_head fields; struct list_head fields;
struct list_head validators;
atomic_t refcnt; atomic_t refcnt;
int index; int index;
int flags; int flags;
int min_size;
}; };
/* /*
...@@ -81,8 +83,17 @@ struct user_event_refs { ...@@ -81,8 +83,17 @@ struct user_event_refs {
struct user_event *events[]; struct user_event *events[];
}; };
#define VALIDATOR_ENSURE_NULL (1 << 0)
#define VALIDATOR_REL (1 << 1)
struct user_event_validator {
struct list_head link;
int offset;
int flags;
};
typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i, typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
void *tpdata); void *tpdata, bool *faulted);
static int user_event_parse(char *name, char *args, char *flags, static int user_event_parse(char *name, char *args, char *flags,
struct user_event **newuser); struct user_event **newuser);
...@@ -215,6 +226,17 @@ static int user_field_size(const char *type) ...@@ -215,6 +226,17 @@ static int user_field_size(const char *type)
return -EINVAL; return -EINVAL;
} }
static void user_event_destroy_validators(struct user_event *user)
{
struct user_event_validator *validator, *next;
struct list_head *head = &user->validators;
list_for_each_entry_safe(validator, next, head, link) {
list_del(&validator->link);
kfree(validator);
}
}
static void user_event_destroy_fields(struct user_event *user) static void user_event_destroy_fields(struct user_event *user)
{ {
struct ftrace_event_field *field, *next; struct ftrace_event_field *field, *next;
...@@ -230,13 +252,43 @@ static int user_event_add_field(struct user_event *user, const char *type, ...@@ -230,13 +252,43 @@ static int user_event_add_field(struct user_event *user, const char *type,
const char *name, int offset, int size, const char *name, int offset, int size,
int is_signed, int filter_type) int is_signed, int filter_type)
{ {
struct user_event_validator *validator;
struct ftrace_event_field *field; struct ftrace_event_field *field;
int validator_flags = 0;
field = kmalloc(sizeof(*field), GFP_KERNEL); field = kmalloc(sizeof(*field), GFP_KERNEL);
if (!field) if (!field)
return -ENOMEM; return -ENOMEM;
if (str_has_prefix(type, "__data_loc "))
goto add_validator;
if (str_has_prefix(type, "__rel_loc ")) {
validator_flags |= VALIDATOR_REL;
goto add_validator;
}
goto add_field;
add_validator:
if (strstr(type, "char") != 0)
validator_flags |= VALIDATOR_ENSURE_NULL;
validator = kmalloc(sizeof(*validator), GFP_KERNEL);
if (!validator) {
kfree(field);
return -ENOMEM;
}
validator->flags = validator_flags;
validator->offset = offset;
/* Want sequential access when validating */
list_add_tail(&validator->link, &user->validators);
add_field:
field->type = type; field->type = type;
field->name = name; field->name = name;
field->offset = offset; field->offset = offset;
...@@ -246,6 +298,12 @@ static int user_event_add_field(struct user_event *user, const char *type, ...@@ -246,6 +298,12 @@ static int user_event_add_field(struct user_event *user, const char *type,
list_add(&field->link, &user->fields); list_add(&field->link, &user->fields);
/*
* Min size from user writes that are required, this does not include
* the size of trace_entry (common fields).
*/
user->min_size = (offset + size) - sizeof(struct trace_entry);
return 0; return 0;
} }
...@@ -517,6 +575,7 @@ static int destroy_user_event(struct user_event *user) ...@@ -517,6 +575,7 @@ static int destroy_user_event(struct user_event *user)
clear_bit(user->index, page_bitmap); clear_bit(user->index, page_bitmap);
hash_del(&user->node); hash_del(&user->node);
user_event_destroy_validators(user);
kfree(user->call.print_fmt); kfree(user->call.print_fmt);
kfree(EVENT_NAME(user)); kfree(EVENT_NAME(user));
kfree(user); kfree(user);
...@@ -538,15 +597,49 @@ static struct user_event *find_user_event(char *name, u32 *outkey) ...@@ -538,15 +597,49 @@ static struct user_event *find_user_event(char *name, u32 *outkey)
return NULL; return NULL;
} }
static int user_event_validate(struct user_event *user, void *data, int len)
{
struct list_head *head = &user->validators;
struct user_event_validator *validator;
void *pos, *end = data + len;
u32 loc, offset, size;
list_for_each_entry(validator, head, link) {
pos = data + validator->offset;
/* Already done min_size check, no bounds check here */
loc = *(u32 *)pos;
offset = loc & 0xffff;
size = loc >> 16;
if (likely(validator->flags & VALIDATOR_REL))
pos += offset + sizeof(loc);
else
pos = data + offset;
pos += size;
if (unlikely(pos > end))
return -EFAULT;
if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
if (unlikely(*(char *)(pos - 1) != '\0'))
return -EFAULT;
}
return 0;
}
/* /*
* Writes the user supplied payload out to a trace file. * Writes the user supplied payload out to a trace file.
*/ */
static void user_event_ftrace(struct user_event *user, struct iov_iter *i, static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
void *tpdata) void *tpdata, bool *faulted)
{ {
struct trace_event_file *file; struct trace_event_file *file;
struct trace_entry *entry; struct trace_entry *entry;
struct trace_event_buffer event_buffer; struct trace_event_buffer event_buffer;
size_t size = sizeof(*entry) + i->count;
file = (struct trace_event_file *)tpdata; file = (struct trace_event_file *)tpdata;
...@@ -556,17 +649,25 @@ static void user_event_ftrace(struct user_event *user, struct iov_iter *i, ...@@ -556,17 +649,25 @@ static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
return; return;
/* Allocates and fills trace_entry, + 1 of this is data payload */ /* Allocates and fills trace_entry, + 1 of this is data payload */
entry = trace_event_buffer_reserve(&event_buffer, file, entry = trace_event_buffer_reserve(&event_buffer, file, size);
sizeof(*entry) + i->count);
if (unlikely(!entry)) if (unlikely(!entry))
return; return;
if (unlikely(!copy_nofault(entry + 1, i->count, i))) if (unlikely(!copy_nofault(entry + 1, i->count, i)))
__trace_event_discard_commit(event_buffer.buffer, goto discard;
event_buffer.event);
else if (!list_empty(&user->validators) &&
trace_event_buffer_commit(&event_buffer); unlikely(user_event_validate(user, entry, size)))
goto discard;
trace_event_buffer_commit(&event_buffer);
return;
discard:
*faulted = true;
__trace_event_discard_commit(event_buffer.buffer,
event_buffer.event);
} }
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
...@@ -621,7 +722,7 @@ static void user_event_bpf(struct user_event *user, struct iov_iter *i) ...@@ -621,7 +722,7 @@ static void user_event_bpf(struct user_event *user, struct iov_iter *i)
* Writes the user supplied payload out to perf ring buffer or eBPF program. * Writes the user supplied payload out to perf ring buffer or eBPF program.
*/ */
static void user_event_perf(struct user_event *user, struct iov_iter *i, static void user_event_perf(struct user_event *user, struct iov_iter *i,
void *tpdata) void *tpdata, bool *faulted)
{ {
struct hlist_head *perf_head; struct hlist_head *perf_head;
...@@ -644,14 +745,21 @@ static void user_event_perf(struct user_event *user, struct iov_iter *i, ...@@ -644,14 +745,21 @@ static void user_event_perf(struct user_event *user, struct iov_iter *i,
perf_fetch_caller_regs(regs); perf_fetch_caller_regs(regs);
if (unlikely(!copy_nofault(perf_entry + 1, i->count, i))) { if (unlikely(!copy_nofault(perf_entry + 1, i->count, i)))
perf_swevent_put_recursion_context(context); goto discard;
return;
} if (!list_empty(&user->validators) &&
unlikely(user_event_validate(user, perf_entry, size)))
goto discard;
perf_trace_buf_submit(perf_entry, size, context, perf_trace_buf_submit(perf_entry, size, context,
user->call.event.type, 1, regs, user->call.event.type, 1, regs,
perf_head, NULL); perf_head, NULL);
return;
discard:
*faulted = true;
perf_swevent_put_recursion_context(context);
} }
} }
#endif #endif
...@@ -971,6 +1079,7 @@ static int user_event_parse(char *name, char *args, char *flags, ...@@ -971,6 +1079,7 @@ static int user_event_parse(char *name, char *args, char *flags,
INIT_LIST_HEAD(&user->class.fields); INIT_LIST_HEAD(&user->class.fields);
INIT_LIST_HEAD(&user->fields); INIT_LIST_HEAD(&user->fields);
INIT_LIST_HEAD(&user->validators);
user->tracepoint.name = name; user->tracepoint.name = name;
...@@ -1019,6 +1128,7 @@ static int user_event_parse(char *name, char *args, char *flags, ...@@ -1019,6 +1128,7 @@ static int user_event_parse(char *name, char *args, char *flags,
return 0; return 0;
put_user: put_user:
user_event_destroy_fields(user); user_event_destroy_fields(user);
user_event_destroy_validators(user);
kfree(user); kfree(user);
return ret; return ret;
} }
...@@ -1076,6 +1186,9 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) ...@@ -1076,6 +1186,9 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
if (unlikely(user == NULL)) if (unlikely(user == NULL))
return -ENOENT; return -ENOENT;
if (unlikely(i->count < user->min_size))
return -EINVAL;
tp = &user->tracepoint; tp = &user->tracepoint;
/* /*
...@@ -1087,10 +1200,13 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) ...@@ -1087,10 +1200,13 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
user_event_func_t probe_func; user_event_func_t probe_func;
struct iov_iter copy; struct iov_iter copy;
void *tpdata; void *tpdata;
bool faulted;
if (unlikely(fault_in_iov_iter_readable(i, i->count))) if (unlikely(fault_in_iov_iter_readable(i, i->count)))
return -EFAULT; return -EFAULT;
faulted = false;
rcu_read_lock_sched(); rcu_read_lock_sched();
probe_func_ptr = rcu_dereference_sched(tp->funcs); probe_func_ptr = rcu_dereference_sched(tp->funcs);
...@@ -1100,11 +1216,14 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) ...@@ -1100,11 +1216,14 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
copy = *i; copy = *i;
probe_func = probe_func_ptr->func; probe_func = probe_func_ptr->func;
tpdata = probe_func_ptr->data; tpdata = probe_func_ptr->data;
probe_func(user, &copy, tpdata); probe_func(user, &copy, tpdata, &faulted);
} while ((++probe_func_ptr)->func); } while ((++probe_func_ptr)->func);
} }
rcu_read_unlock_sched(); rcu_read_unlock_sched();
if (unlikely(faulted))
return -EFAULT;
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment