Commit bde4a19f authored by Todd Kjos's avatar Todd Kjos Committed by Greg Kroah-Hartman

binder: use userspace pointer as base of buffer space

Now that alloc->buffer points to the userspace vm_area
rename buffer->data to buffer->user_data and rename
local pointers that hold user addresses. Also use the
"__user" tag to annotate all user pointers so sparse
can flag cases where user pointer vaues  are copied to
kernel pointers. Refactor code to use offsets instead
of user pointers.
Signed-off-by: default avatarTodd Kjos <tkjos@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c41358a5
This diff is collapsed.
...@@ -69,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, ...@@ -69,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer) struct binder_buffer *buffer)
{ {
if (list_is_last(&buffer->entry, &alloc->buffers)) if (list_is_last(&buffer->entry, &alloc->buffers))
return (u8 *)alloc->buffer + return alloc->buffer + alloc->buffer_size - buffer->user_data;
alloc->buffer_size - (u8 *)buffer->data; return binder_buffer_next(buffer)->user_data - buffer->user_data;
return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
} }
static void binder_insert_free_buffer(struct binder_alloc *alloc, static void binder_insert_free_buffer(struct binder_alloc *alloc,
...@@ -121,9 +120,9 @@ static void binder_insert_allocated_buffer_locked( ...@@ -121,9 +120,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node); buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free); BUG_ON(buffer->free);
if (new_buffer->data < buffer->data) if (new_buffer->user_data < buffer->user_data)
p = &parent->rb_left; p = &parent->rb_left;
else if (new_buffer->data > buffer->data) else if (new_buffer->user_data > buffer->user_data)
p = &parent->rb_right; p = &parent->rb_right;
else else
BUG(); BUG();
...@@ -138,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( ...@@ -138,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{ {
struct rb_node *n = alloc->allocated_buffers.rb_node; struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer; struct binder_buffer *buffer;
void *uptr; void __user *uptr;
uptr = (void *)user_ptr; uptr = (void __user *)user_ptr;
while (n) { while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free); BUG_ON(buffer->free);
if (uptr < buffer->data) if (uptr < buffer->user_data)
n = n->rb_left; n = n->rb_left;
else if (uptr > buffer->data) else if (uptr > buffer->user_data)
n = n->rb_right; n = n->rb_right;
else { else {
/* /*
...@@ -188,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, ...@@ -188,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
} }
static int binder_update_page_range(struct binder_alloc *alloc, int allocate, static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
void *start, void *end) void __user *start, void __user *end)
{ {
void *page_addr; void __user *page_addr;
unsigned long user_page_addr; unsigned long user_page_addr;
struct binder_lru_page *page; struct binder_lru_page *page;
struct vm_area_struct *vma = NULL; struct vm_area_struct *vma = NULL;
...@@ -357,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -357,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_buffer *buffer; struct binder_buffer *buffer;
size_t buffer_size; size_t buffer_size;
struct rb_node *best_fit = NULL; struct rb_node *best_fit = NULL;
void *has_page_addr; void __user *has_page_addr;
void *end_page_addr; void __user *end_page_addr;
size_t size, data_offsets_size; size_t size, data_offsets_size;
int ret; int ret;
...@@ -456,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -456,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size); alloc->pid, size, buffer, buffer_size);
has_page_addr = has_page_addr = (void __user *)
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
WARN_ON(n && buffer_size != size); WARN_ON(n && buffer_size != size);
end_page_addr = end_page_addr =
(void *)PAGE_ALIGN((uintptr_t)buffer->data + size); (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
if (end_page_addr > has_page_addr) if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr; end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1, ret = binder_update_page_range(alloc, 1, (void __user *)
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -477,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -477,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
__func__, alloc->pid); __func__, alloc->pid);
goto err_alloc_buf_struct_failed; goto err_alloc_buf_struct_failed;
} }
new_buffer->data = (u8 *)buffer->data + size; new_buffer->user_data = (u8 __user *)buffer->user_data + size;
list_add(&new_buffer->entry, &buffer->entry); list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1; new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer); binder_insert_free_buffer(alloc, new_buffer);
...@@ -503,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -503,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
return buffer; return buffer;
err_alloc_buf_struct_failed: err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0, binder_update_page_range(alloc, 0, (void __user *)
(void *)PAGE_ALIGN((uintptr_t)buffer->data), PAGE_ALIGN((uintptr_t)buffer->user_data),
end_page_addr); end_page_addr);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -539,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, ...@@ -539,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
return buffer; return buffer;
} }
static void *buffer_start_page(struct binder_buffer *buffer) static void __user *buffer_start_page(struct binder_buffer *buffer)
{ {
return (void *)((uintptr_t)buffer->data & PAGE_MASK); return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
} }
static void *prev_buffer_end_page(struct binder_buffer *buffer) static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
{ {
return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); return (void __user *)
(((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
} }
static void binder_delete_free_buffer(struct binder_alloc *alloc, static void binder_delete_free_buffer(struct binder_alloc *alloc,
...@@ -561,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, ...@@ -561,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
to_free = false; to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n", "%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, buffer->data, prev->data); alloc->pid, buffer->user_data,
prev->user_data);
} }
if (!list_is_last(&buffer->entry, &alloc->buffers)) { if (!list_is_last(&buffer->entry, &alloc->buffers)) {
...@@ -571,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, ...@@ -571,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n", "%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, alloc->pid,
buffer->data, buffer->user_data,
next->data); next->user_data);
} }
} }
if (PAGE_ALIGNED(buffer->data)) { if (PAGE_ALIGNED(buffer->user_data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer start %pK is page aligned\n", "%d: merge free, buffer start %pK is page aligned\n",
alloc->pid, buffer->data); alloc->pid, buffer->user_data);
to_free = false; to_free = false;
} }
if (to_free) { if (to_free) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n", "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer->data, alloc->pid, buffer->user_data,
prev->data, next ? next->data : NULL); prev->user_data,
next ? next->user_data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer), binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE); buffer_start_page(buffer) + PAGE_SIZE);
} }
...@@ -613,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, ...@@ -613,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free); BUG_ON(buffer->free);
BUG_ON(size > buffer_size); BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL); BUG_ON(buffer->transaction != NULL);
BUG_ON(buffer->data < alloc->buffer); BUG_ON(buffer->user_data < alloc->buffer);
BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) { if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer); alloc->free_async_space += size + sizeof(struct binder_buffer);
...@@ -625,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, ...@@ -625,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
} }
binder_update_page_range(alloc, 0, binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data), (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); (void __user *)(((uintptr_t)
buffer->user_data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers); rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1; buffer->free = 1;
...@@ -692,7 +695,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, ...@@ -692,7 +695,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_already_mapped; goto err_already_mapped;
} }
alloc->buffer = (void *)vma->vm_start; alloc->buffer = (void __user *)vma->vm_start;
mutex_unlock(&binder_alloc_mmap_lock); mutex_unlock(&binder_alloc_mmap_lock);
alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
...@@ -712,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, ...@@ -712,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed; goto err_alloc_buf_struct_failed;
} }
buffer->data = alloc->buffer; buffer->user_data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers); list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1; buffer->free = 1;
binder_insert_free_buffer(alloc, buffer); binder_insert_free_buffer(alloc, buffer);
...@@ -773,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) ...@@ -773,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i; int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
void *page_addr; void __user *page_addr;
bool on_lru; bool on_lru;
if (!alloc->pages[i].page_ptr) if (!alloc->pages[i].page_ptr)
...@@ -804,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix, ...@@ -804,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer) struct binder_buffer *buffer)
{ {
seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
prefix, buffer->debug_id, buffer->data, prefix, buffer->debug_id, buffer->user_data,
buffer->data_size, buffer->offsets_size, buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size, buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered"); buffer->transaction ? "active" : "delivered");
...@@ -1056,7 +1059,7 @@ static inline bool check_buffer(struct binder_alloc *alloc, ...@@ -1056,7 +1059,7 @@ static inline bool check_buffer(struct binder_alloc *alloc,
* @pgoffp: address to copy final page offset to * @pgoffp: address to copy final page offset to
* *
* Lookup the struct page corresponding to the address * Lookup the struct page corresponding to the address
* at @buffer_offset into @buffer->data. If @pgoffp is not * at @buffer_offset into @buffer->user_data. If @pgoffp is not
* NULL, the byte-offset into the page is written there. * NULL, the byte-offset into the page is written there.
* *
* The caller is responsible to ensure that the offset points * The caller is responsible to ensure that the offset points
...@@ -1073,7 +1076,7 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, ...@@ -1073,7 +1076,7 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp) pgoff_t *pgoffp)
{ {
binder_size_t buffer_space_offset = buffer_offset + binder_size_t buffer_space_offset = buffer_offset +
(buffer->data - alloc->buffer); (buffer->user_data - alloc->buffer);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT; size_t index = buffer_space_offset >> PAGE_SHIFT;
struct binder_lru_page *lru_page; struct binder_lru_page *lru_page;
......
...@@ -40,7 +40,7 @@ struct binder_transaction; ...@@ -40,7 +40,7 @@ struct binder_transaction;
* @data_size: size of @transaction data * @data_size: size of @transaction data
* @offsets_size: size of array of offsets * @offsets_size: size of array of offsets
* @extra_buffers_size: size of space for other objects (like sg lists) * @extra_buffers_size: size of space for other objects (like sg lists)
* @data: pointer to base of buffer space * @user_data: user pointer to base of buffer space
* *
* Bookkeeping structure for binder transaction buffers * Bookkeeping structure for binder transaction buffers
*/ */
...@@ -59,7 +59,7 @@ struct binder_buffer { ...@@ -59,7 +59,7 @@ struct binder_buffer {
size_t data_size; size_t data_size;
size_t offsets_size; size_t offsets_size;
size_t extra_buffers_size; size_t extra_buffers_size;
void *data; void __user *user_data;
}; };
/** /**
...@@ -102,7 +102,7 @@ struct binder_alloc { ...@@ -102,7 +102,7 @@ struct binder_alloc {
struct mutex mutex; struct mutex mutex;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm; struct mm_struct *vma_vm_mm;
void *buffer; void __user *buffer;
struct list_head buffers; struct list_head buffers;
struct rb_root free_buffers; struct rb_root free_buffers;
struct rb_root allocated_buffers; struct rb_root allocated_buffers;
......
...@@ -105,8 +105,8 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc, ...@@ -105,8 +105,8 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
void *page_addr, *end; void *page_addr, *end;
int page_index; int page_index;
end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); end = (void *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
page_addr = buffer->data; page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) { for (; page_addr < end; page_addr += PAGE_SIZE) {
page_index = (page_addr - alloc->buffer) / PAGE_SIZE; page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
if (!alloc->pages[page_index].page_ptr || if (!alloc->pages[page_index].page_ptr ||
......
...@@ -293,7 +293,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, ...@@ -293,7 +293,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TRACE_EVENT(binder_update_page_range, TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_alloc *alloc, bool allocate, TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end), void __user *start, void __user *end),
TP_ARGS(alloc, allocate, start, end), TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, proc) __field(int, proc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment