Commit 74310e06 authored by Sherry Yang's avatar Sherry Yang Committed by Greg Kroah-Hartman

android: binder: Move buffer out of area shared with user space

Binder driver allocates buffer meta data in a region that is mapped
in user space. These meta data contain pointers in the kernel.

This patch allocates buffer meta data on the kernel heap that is
not mapped in user space, and uses a pointer to refer to the data mapped.
Signed-off-by: default avatarSherry Yang <sherryy@android.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4175e2b4
...@@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, ...@@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer) struct binder_buffer *buffer)
{ {
if (list_is_last(&buffer->entry, &alloc->buffers)) if (list_is_last(&buffer->entry, &alloc->buffers))
return alloc->buffer + return (u8 *)alloc->buffer +
alloc->buffer_size - (void *)buffer->data; alloc->buffer_size - (u8 *)buffer->data;
return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data; return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
} }
static void binder_insert_free_buffer(struct binder_alloc *alloc, static void binder_insert_free_buffer(struct binder_alloc *alloc,
...@@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked( ...@@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node); buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free); BUG_ON(buffer->free);
if (new_buffer < buffer) if (new_buffer->data < buffer->data)
p = &parent->rb_left; p = &parent->rb_left;
else if (new_buffer > buffer) else if (new_buffer->data > buffer->data)
p = &parent->rb_right; p = &parent->rb_right;
else else
BUG(); BUG();
...@@ -131,18 +131,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( ...@@ -131,18 +131,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{ {
struct rb_node *n = alloc->allocated_buffers.rb_node; struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer; struct binder_buffer *buffer;
struct binder_buffer *kern_ptr; void *kern_ptr;
kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
- offsetof(struct binder_buffer, data));
while (n) { while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free); BUG_ON(buffer->free);
if (kern_ptr < buffer) if (kern_ptr < buffer->data)
n = n->rb_left; n = n->rb_left;
else if (kern_ptr > buffer) else if (kern_ptr > buffer->data)
n = n->rb_right; n = n->rb_right;
else { else {
/* /*
...@@ -330,6 +329,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, ...@@ -330,6 +329,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));
while (n) { while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free); BUG_ON(!buffer->free);
...@@ -389,14 +391,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, ...@@ -389,14 +391,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
has_page_addr = has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
if (n == NULL) { WARN_ON(n && buffer_size != size);
if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
buffer_size = size; /* no room for other buffers */
else
buffer_size = size + sizeof(struct binder_buffer);
}
end_page_addr = end_page_addr =
(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr) if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr; end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1, ret = binder_update_page_range(alloc, 1,
...@@ -404,17 +401,25 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, ...@@ -404,17 +401,25 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
rb_erase(best_fit, &alloc->free_buffers);
buffer->free = 0;
buffer->free_in_progress = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) { if (buffer_size != size) {
struct binder_buffer *new_buffer = (void *)buffer->data + size; struct binder_buffer *new_buffer;
new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!new_buffer) {
pr_err("%s: %d failed to alloc new buffer struct\n",
__func__, alloc->pid);
goto err_alloc_buf_struct_failed;
}
new_buffer->data = (u8 *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry); list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1; new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer); binder_insert_free_buffer(alloc, new_buffer);
} }
rb_erase(best_fit, &alloc->free_buffers);
buffer->free = 0;
buffer->free_in_progress = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n", "%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer); alloc->pid, size, buffer);
...@@ -429,6 +434,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, ...@@ -429,6 +434,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
alloc->pid, size, alloc->free_async_space); alloc->pid, size, alloc->free_async_space);
} }
return buffer; return buffer;
err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
end_page_addr, NULL);
return ERR_PTR(-ENOMEM);
} }
/** /**
...@@ -463,56 +474,59 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, ...@@ -463,56 +474,59 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
static void *buffer_start_page(struct binder_buffer *buffer) static void *buffer_start_page(struct binder_buffer *buffer)
{ {
return (void *)((uintptr_t)buffer & PAGE_MASK); return (void *)((uintptr_t)buffer->data & PAGE_MASK);
} }
static void *buffer_end_page(struct binder_buffer *buffer) static void *prev_buffer_end_page(struct binder_buffer *buffer)
{ {
return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
} }
static void binder_delete_free_buffer(struct binder_alloc *alloc, static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer) struct binder_buffer *buffer)
{ {
struct binder_buffer *prev, *next = NULL; struct binder_buffer *prev, *next = NULL;
int free_page_end = 1; bool to_free = true;
int free_page_start = 1;
BUG_ON(alloc->buffers.next == &buffer->entry); BUG_ON(alloc->buffers.next == &buffer->entry);
prev = binder_buffer_prev(buffer); prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free); BUG_ON(!prev->free);
if (buffer_end_page(prev) == buffer_start_page(buffer)) { if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
free_page_start = 0; to_free = false;
if (buffer_end_page(prev) == buffer_end_page(buffer))
free_page_end = 0;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n", "%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, buffer, prev); alloc->pid, buffer->data, prev->data);
} }
if (!list_is_last(&buffer->entry, &alloc->buffers)) { if (!list_is_last(&buffer->entry, &alloc->buffers)) {
next = binder_buffer_next(buffer); next = binder_buffer_next(buffer);
if (buffer_start_page(next) == buffer_end_page(buffer)) { if (buffer_start_page(next) == buffer_start_page(buffer)) {
free_page_end = 0; to_free = false;
if (buffer_start_page(next) ==
buffer_start_page(buffer))
free_page_start = 0;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n", "%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, buffer, prev); alloc->pid,
buffer->data,
next->data);
} }
} }
list_del(&buffer->entry);
if (free_page_start || free_page_end) { if (PAGE_ALIGNED(buffer->data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer start %pK is page aligned\n",
alloc->pid, buffer->data);
to_free = false;
}
if (to_free) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n", "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer, free_page_start ? "" : " end", alloc->pid, buffer->data,
free_page_end ? "" : " start", prev, next); prev->data, next->data);
binder_update_page_range(alloc, 0, free_page_start ? binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) : buffer_end_page(buffer), buffer_start_page(buffer) + PAGE_SIZE,
(free_page_end ? buffer_end_page(buffer) : NULL);
buffer_start_page(buffer)) + PAGE_SIZE, NULL);
} }
list_del(&buffer->entry);
kfree(buffer);
} }
static void binder_free_buf_locked(struct binder_alloc *alloc, static void binder_free_buf_locked(struct binder_alloc *alloc,
...@@ -533,8 +547,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, ...@@ -533,8 +547,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free); BUG_ON(buffer->free);
BUG_ON(size > buffer_size); BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL); BUG_ON(buffer->transaction != NULL);
BUG_ON((void *)buffer < alloc->buffer); BUG_ON(buffer->data < alloc->buffer);
BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size); BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) { if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer); alloc->free_async_space += size + sizeof(struct binder_buffer);
...@@ -646,13 +660,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, ...@@ -646,13 +660,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
} }
alloc->buffer_size = vma->vm_end - vma->vm_start; alloc->buffer_size = vma->vm_end - vma->vm_start;
if (binder_update_page_range(alloc, 1, alloc->buffer, buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
alloc->buffer + PAGE_SIZE, vma)) { if (!buffer) {
ret = -ENOMEM; ret = -ENOMEM;
failure_string = "alloc small buf"; failure_string = "alloc buffer struct";
goto err_alloc_small_buf_failed; goto err_alloc_buf_struct_failed;
} }
buffer = alloc->buffer;
buffer->data = alloc->buffer;
INIT_LIST_HEAD(&alloc->buffers); INIT_LIST_HEAD(&alloc->buffers);
list_add(&buffer->entry, &alloc->buffers); list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1; buffer->free = 1;
...@@ -664,7 +679,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, ...@@ -664,7 +679,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
return 0; return 0;
err_alloc_small_buf_failed: err_alloc_buf_struct_failed:
kfree(alloc->pages); kfree(alloc->pages);
alloc->pages = NULL; alloc->pages = NULL;
err_alloc_pages_failed: err_alloc_pages_failed:
...@@ -684,14 +699,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) ...@@ -684,14 +699,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
{ {
struct rb_node *n; struct rb_node *n;
int buffers, page_count; int buffers, page_count;
struct binder_buffer *buffer;
BUG_ON(alloc->vma); BUG_ON(alloc->vma);
buffers = 0; buffers = 0;
mutex_lock(&alloc->mutex); mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) { while ((n = rb_first(&alloc->allocated_buffers))) {
struct binder_buffer *buffer;
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
/* Transaction should already have been freed */ /* Transaction should already have been freed */
...@@ -701,6 +715,16 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) ...@@ -701,6 +715,16 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers++; buffers++;
} }
while (!list_empty(&alloc->buffers)) {
buffer = list_first_entry(&alloc->buffers,
struct binder_buffer, entry);
WARN_ON(!buffer->free);
list_del(&buffer->entry);
WARN_ON_ONCE(!list_empty(&alloc->buffers));
kfree(buffer);
}
page_count = 0; page_count = 0;
if (alloc->pages) { if (alloc->pages) {
int i; int i;
......
...@@ -57,7 +57,7 @@ struct binder_buffer { ...@@ -57,7 +57,7 @@ struct binder_buffer {
size_t data_size; size_t data_size;
size_t offsets_size; size_t offsets_size;
size_t extra_buffers_size; size_t extra_buffers_size;
uint8_t data[0]; void *data;
}; };
/** /**
......
...@@ -105,8 +105,9 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc, ...@@ -105,8 +105,9 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
void *page_addr, *end; void *page_addr, *end;
int page_index; int page_index;
end = (void *)PAGE_ALIGN((uintptr_t)buffer + size); end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
for (page_addr = buffer; page_addr < end; page_addr += PAGE_SIZE) { page_addr = buffer->data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
page_index = (page_addr - alloc->buffer) / PAGE_SIZE; page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
if (!alloc->pages[page_index]) { if (!alloc->pages[page_index]) {
pr_err("incorrect alloc state at page index %d\n", pr_err("incorrect alloc state at page index %d\n",
...@@ -209,8 +210,7 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc, ...@@ -209,8 +210,7 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc,
* Only BUFFER_NUM - 1 buffer sizes are adjustable since * Only BUFFER_NUM - 1 buffer sizes are adjustable since
* we need one giant buffer before getting to the last page. * we need one giant buffer before getting to the last page.
*/ */
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1] back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
- sizeof(struct binder_buffer) * BUFFER_NUM;
binder_selftest_free_seq(alloc, front_sizes, seq, 0); binder_selftest_free_seq(alloc, front_sizes, seq, 0);
binder_selftest_free_seq(alloc, back_sizes, seq, 0); binder_selftest_free_seq(alloc, back_sizes, seq, 0);
} }
...@@ -228,8 +228,7 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc, ...@@ -228,8 +228,7 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
prev = index == 0 ? 0 : end_offset[index - 1]; prev = index == 0 ? 0 : end_offset[index - 1];
end = prev; end = prev;
BUILD_BUG_ON((BUFFER_MIN_SIZE + sizeof(struct binder_buffer)) BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
* BUFFER_NUM >= PAGE_SIZE);
for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
if (align % 2) if (align % 2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment