Commit 89f71743 authored by Carlos Llamas's avatar Carlos Llamas Committed by Greg Kroah-Hartman

binder: remove pid param in binder_alloc_new_buf()

Binder attributes the buffer allocation to the current->tgid everytime.
There is no need to pass this as a parameter so drop it.

Also add a few touchups to follow the coding guidelines. No functional
changes are introduced in this patch.
Reviewed-by: default avatarAlice Ryhl <aliceryhl@google.com>
Signed-off-by: default avatarCarlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20231201172212.1813387-13-cmllamas@google.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 377e1684
...@@ -3225,7 +3225,7 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3225,7 +3225,7 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size, tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY), current->tgid); !reply && (t->flags & TF_ONE_WAY));
if (IS_ERR(t->buffer)) { if (IS_ERR(t->buffer)) {
char *s; char *s;
......
...@@ -319,7 +319,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma( ...@@ -319,7 +319,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return smp_load_acquire(&alloc->vma); return smp_load_acquire(&alloc->vma);
} }
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) static bool debug_low_async_space_locked(struct binder_alloc *alloc)
{ {
/* /*
* Find the amount and size of buffers allocated by the current caller; * Find the amount and size of buffers allocated by the current caller;
...@@ -328,10 +328,11 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) ...@@ -328,10 +328,11 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
* and at some point we'll catch them in the act. This is more efficient * and at some point we'll catch them in the act. This is more efficient
* than keeping a map per pid. * than keeping a map per pid.
*/ */
struct rb_node *n;
struct binder_buffer *buffer; struct binder_buffer *buffer;
size_t total_alloc_size = 0; size_t total_alloc_size = 0;
int pid = current->tgid;
size_t num_buffers = 0; size_t num_buffers = 0;
struct rb_node *n;
for (n = rb_first(&alloc->allocated_buffers); n != NULL; for (n = rb_first(&alloc->allocated_buffers); n != NULL;
n = rb_next(n)) { n = rb_next(n)) {
...@@ -364,8 +365,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) ...@@ -364,8 +365,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
static struct binder_buffer *binder_alloc_new_buf_locked( static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc, struct binder_alloc *alloc,
size_t size, size_t size,
int is_async, int is_async)
int pid)
{ {
struct rb_node *n = alloc->free_buffers.rb_node; struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer; struct binder_buffer *buffer;
...@@ -476,7 +476,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -476,7 +476,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd got %pK\n", "%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer); alloc->pid, size, buffer);
buffer->async_transaction = is_async; buffer->async_transaction = is_async;
buffer->pid = pid;
buffer->oneway_spam_suspect = false; buffer->oneway_spam_suspect = false;
if (is_async) { if (is_async) {
alloc->free_async_space -= size; alloc->free_async_space -= size;
...@@ -489,7 +488,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -489,7 +488,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
* of async space left (which is less than 10% of total * of async space left (which is less than 10% of total
* buffer size). * buffer size).
*/ */
buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc);
} else { } else {
alloc->oneway_spam_detected = false; alloc->oneway_spam_detected = false;
} }
...@@ -532,7 +531,6 @@ static inline size_t sanitized_size(size_t data_size, ...@@ -532,7 +531,6 @@ static inline size_t sanitized_size(size_t data_size,
* @offsets_size: user specified buffer offset * @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context) * @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction * @is_async: buffer for async transaction
* @pid: pid to attribute allocation to (used for debugging)
* *
* Allocate a new buffer given the requested sizes. Returns * Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated * the kernel version of the buffer pointer. The size allocated
...@@ -545,8 +543,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, ...@@ -545,8 +543,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size, size_t data_size,
size_t offsets_size, size_t offsets_size,
size_t extra_buffers_size, size_t extra_buffers_size,
int is_async, int is_async)
int pid)
{ {
struct binder_buffer *buffer; struct binder_buffer *buffer;
size_t size; size_t size;
...@@ -569,7 +566,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, ...@@ -569,7 +566,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
} }
mutex_lock(&alloc->mutex); mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, size, is_async, pid); buffer = binder_alloc_new_buf_locked(alloc, size, is_async);
if (IS_ERR(buffer)) { if (IS_ERR(buffer)) {
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
goto out; goto out;
...@@ -578,6 +575,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, ...@@ -578,6 +575,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->data_size = data_size; buffer->data_size = data_size;
buffer->offsets_size = offsets_size; buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size; buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
out: out:
......
...@@ -49,15 +49,13 @@ struct binder_buffer { ...@@ -49,15 +49,13 @@ struct binder_buffer {
unsigned async_transaction:1; unsigned async_transaction:1;
unsigned oneway_spam_suspect:1; unsigned oneway_spam_suspect:1;
unsigned debug_id:27; unsigned debug_id:27;
struct binder_transaction *transaction; struct binder_transaction *transaction;
struct binder_node *target_node; struct binder_node *target_node;
size_t data_size; size_t data_size;
size_t offsets_size; size_t offsets_size;
size_t extra_buffers_size; size_t extra_buffers_size;
unsigned long user_data; unsigned long user_data;
int pid; int pid;
}; };
/** /**
...@@ -125,8 +123,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, ...@@ -125,8 +123,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size, size_t data_size,
size_t offsets_size, size_t offsets_size,
size_t extra_buffers_size, size_t extra_buffers_size,
int is_async, int is_async);
int pid);
void binder_alloc_init(struct binder_alloc *alloc); void binder_alloc_init(struct binder_alloc *alloc);
int binder_alloc_shrinker_init(void); int binder_alloc_shrinker_init(void);
void binder_alloc_shrinker_exit(void); void binder_alloc_shrinker_exit(void);
......
...@@ -119,7 +119,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc, ...@@ -119,7 +119,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
int i; int i;
for (i = 0; i < BUFFER_NUM; i++) { for (i = 0; i < BUFFER_NUM; i++) {
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
if (IS_ERR(buffers[i]) || if (IS_ERR(buffers[i]) ||
!check_buffer_pages_allocated(alloc, buffers[i], !check_buffer_pages_allocated(alloc, buffers[i],
sizes[i])) { sizes[i])) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment