Commit 957f3f8e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-6.4-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc fixes from Greg KH:
 "Here are some small driver fixes for 6.4-rc4. They are just two
  different types:

   - binder fixes and reverts for reported problems and regressions in
     the binder "driver".

   - coresight driver fixes for reported problems.

  All of these have been in linux-next for over a week with no reported
  problems"

* tag 'char-misc-6.4-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
  binder: fix UAF of alloc->vma in race with munmap()
  binder: add lockless binder_alloc_(set|get)_vma()
  Revert "android: binder: stop saving a pointer to the VMA"
  Revert "binder_alloc: add missing mmap_lock calls when using the VMA"
  binder: fix UAF caused by faulty buffer cleanup
  coresight: perf: Release Coresight path when alloc trace id failed
  coresight: Fix signedness bug in tmc_etr_buf_insert_barrier_packet()
parents 49572d53 d1d8875c
...@@ -1934,24 +1934,23 @@ static void binder_deferred_fd_close(int fd) ...@@ -1934,24 +1934,23 @@ static void binder_deferred_fd_close(int fd)
static void binder_transaction_buffer_release(struct binder_proc *proc, static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_thread *thread, struct binder_thread *thread,
struct binder_buffer *buffer, struct binder_buffer *buffer,
binder_size_t failed_at, binder_size_t off_end_offset,
bool is_failure) bool is_failure)
{ {
int debug_id = buffer->debug_id; int debug_id = buffer->debug_id;
binder_size_t off_start_offset, buffer_offset, off_end_offset; binder_size_t off_start_offset, buffer_offset;
binder_debug(BINDER_DEBUG_TRANSACTION, binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %llx\n", "%d buffer release %d, size %zd-%zd, failed at %llx\n",
proc->pid, buffer->debug_id, proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, buffer->data_size, buffer->offsets_size,
(unsigned long long)failed_at); (unsigned long long)off_end_offset);
if (buffer->target_node) if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0); binder_dec_node(buffer->target_node, 1, 0);
off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
off_end_offset = is_failure && failed_at ? failed_at :
off_start_offset + buffer->offsets_size;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) { buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr; struct binder_object_header *hdr;
...@@ -2111,6 +2110,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, ...@@ -2111,6 +2110,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} }
} }
/* Clean up all the objects in the buffer */
static inline void binder_release_entire_buffer(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer,
bool is_failure)
{
binder_size_t off_end_offset;
off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
off_end_offset += buffer->offsets_size;
binder_transaction_buffer_release(proc, thread, buffer,
off_end_offset, is_failure);
}
static int binder_translate_binder(struct flat_binder_object *fp, static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t, struct binder_transaction *t,
struct binder_thread *thread) struct binder_thread *thread)
...@@ -2806,7 +2820,7 @@ static int binder_proc_transaction(struct binder_transaction *t, ...@@ -2806,7 +2820,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
t_outdated->buffer = NULL; t_outdated->buffer = NULL;
buffer->transaction = NULL; buffer->transaction = NULL;
trace_binder_transaction_update_buffer_release(buffer); trace_binder_transaction_update_buffer_release(buffer);
binder_transaction_buffer_release(proc, NULL, buffer, 0, 0); binder_release_entire_buffer(proc, NULL, buffer, false);
binder_alloc_free_buf(&proc->alloc, buffer); binder_alloc_free_buf(&proc->alloc, buffer);
kfree(t_outdated); kfree(t_outdated);
binder_stats_deleted(BINDER_STAT_TRANSACTION); binder_stats_deleted(BINDER_STAT_TRANSACTION);
...@@ -3775,7 +3789,7 @@ binder_free_buf(struct binder_proc *proc, ...@@ -3775,7 +3789,7 @@ binder_free_buf(struct binder_proc *proc,
binder_node_inner_unlock(buf_node); binder_node_inner_unlock(buf_node);
} }
trace_binder_transaction_buffer_release(buffer); trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure); binder_release_entire_buffer(proc, thread, buffer, is_failure);
binder_alloc_free_buf(&proc->alloc, buffer); binder_alloc_free_buf(&proc->alloc, buffer);
} }
......
...@@ -212,8 +212,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, ...@@ -212,8 +212,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->mm; mm = alloc->mm;
if (mm) { if (mm) {
mmap_read_lock(mm); mmap_write_lock(mm);
vma = vma_lookup(mm, alloc->vma_addr); vma = alloc->vma;
} }
if (!vma && need_mm) { if (!vma && need_mm) {
...@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, ...@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index); trace_binder_alloc_page_end(alloc, index);
} }
if (mm) { if (mm) {
mmap_read_unlock(mm); mmap_write_unlock(mm);
mmput(mm); mmput(mm);
} }
return 0; return 0;
...@@ -303,21 +303,24 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, ...@@ -303,21 +303,24 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
} }
err_no_vma: err_no_vma:
if (mm) { if (mm) {
mmap_read_unlock(mm); mmap_write_unlock(mm);
mmput(mm); mmput(mm);
} }
return vma ? -ENOMEM : -ESRCH; return vma ? -ENOMEM : -ESRCH;
} }
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
/* pairs with smp_load_acquire in binder_alloc_get_vma() */
smp_store_release(&alloc->vma, vma);
}
static inline struct vm_area_struct *binder_alloc_get_vma( static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc) struct binder_alloc *alloc)
{ {
struct vm_area_struct *vma = NULL; /* pairs with smp_store_release in binder_alloc_set_vma() */
return smp_load_acquire(&alloc->vma);
if (alloc->vma_addr)
vma = vma_lookup(alloc->mm, alloc->vma_addr);
return vma;
} }
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
...@@ -380,15 +383,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -380,15 +383,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size; size_t size, data_offsets_size;
int ret; int ret;
mmap_read_lock(alloc->mm); /* Check binder_alloc is fully initialized */
if (!binder_alloc_get_vma(alloc)) { if (!binder_alloc_get_vma(alloc)) {
mmap_read_unlock(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR, binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n", "%d: binder_alloc_buf, no vma\n",
alloc->pid); alloc->pid);
return ERR_PTR(-ESRCH); return ERR_PTR(-ESRCH);
} }
mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) + data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *)); ALIGN(offsets_size, sizeof(void *));
...@@ -778,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, ...@@ -778,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1; buffer->free = 1;
binder_insert_free_buffer(alloc, buffer); binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2; alloc->free_async_space = alloc->buffer_size / 2;
alloc->vma_addr = vma->vm_start;
/* Signal binder_alloc is fully initialized */
binder_alloc_set_vma(alloc, vma);
return 0; return 0;
...@@ -808,8 +811,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) ...@@ -808,8 +811,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0; buffers = 0;
mutex_lock(&alloc->mutex); mutex_lock(&alloc->mutex);
BUG_ON(alloc->vma_addr && BUG_ON(alloc->vma);
vma_lookup(alloc->mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) { while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
...@@ -916,25 +918,17 @@ void binder_alloc_print_pages(struct seq_file *m, ...@@ -916,25 +918,17 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might * Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state. * read inconsistent state.
*/ */
if (binder_alloc_get_vma(alloc) != NULL) {
mmap_read_lock(alloc->mm); for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
if (binder_alloc_get_vma(alloc) == NULL) { page = &alloc->pages[i];
mmap_read_unlock(alloc->mm); if (!page->page_ptr)
goto uninitialized; free++;
} else if (list_empty(&page->lru))
active++;
mmap_read_unlock(alloc->mm); else
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { lru++;
page = &alloc->pages[i]; }
if (!page->page_ptr)
free++;
else if (list_empty(&page->lru))
active++;
else
lru++;
} }
uninitialized:
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
...@@ -969,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) ...@@ -969,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/ */
void binder_alloc_vma_close(struct binder_alloc *alloc) void binder_alloc_vma_close(struct binder_alloc *alloc)
{ {
alloc->vma_addr = 0; binder_alloc_set_vma(alloc, NULL);
} }
/** /**
......
...@@ -75,7 +75,7 @@ struct binder_lru_page { ...@@ -75,7 +75,7 @@ struct binder_lru_page {
/** /**
* struct binder_alloc - per-binder proc state for binder allocator * struct binder_alloc - per-binder proc state for binder allocator
* @mutex: protects binder_alloc fields * @mutex: protects binder_alloc fields
* @vma_addr: vm_area_struct->vm_start passed to mmap_handler * @vma: vm_area_struct passed to mmap_handler
* (invariant after mmap) * (invariant after mmap)
* @mm: copy of task->mm (invariant after open) * @mm: copy of task->mm (invariant after open)
* @buffer: base of per-proc address space mapped via mmap * @buffer: base of per-proc address space mapped via mmap
...@@ -99,7 +99,7 @@ struct binder_lru_page { ...@@ -99,7 +99,7 @@ struct binder_lru_page {
*/ */
struct binder_alloc { struct binder_alloc {
struct mutex mutex; struct mutex mutex;
unsigned long vma_addr; struct vm_area_struct *vma;
struct mm_struct *mm; struct mm_struct *mm;
void __user *buffer; void __user *buffer;
struct list_head buffers; struct list_head buffers;
......
...@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc) ...@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run) if (!binder_selftest_run)
return; return;
mutex_lock(&binder_selftest_lock); mutex_lock(&binder_selftest_lock);
if (!binder_selftest_run || !alloc->vma_addr) if (!binder_selftest_run || !alloc->vma)
goto done; goto done;
pr_info("STARTED\n"); pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0); binder_selftest_alloc_offset(alloc, end_offset, 0);
......
...@@ -402,6 +402,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, ...@@ -402,6 +402,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
trace_id = coresight_trace_id_get_cpu_id(cpu); trace_id = coresight_trace_id_get_cpu_id(cpu);
if (!IS_VALID_CS_TRACE_ID(trace_id)) { if (!IS_VALID_CS_TRACE_ID(trace_id)) {
cpumask_clear_cpu(cpu, mask); cpumask_clear_cpu(cpu, mask);
coresight_release_path(path);
continue; continue;
} }
......
...@@ -942,7 +942,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset) ...@@ -942,7 +942,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
len = tmc_etr_buf_get_data(etr_buf, offset, len = tmc_etr_buf_get_data(etr_buf, offset,
CORESIGHT_BARRIER_PKT_SIZE, &bufp); CORESIGHT_BARRIER_PKT_SIZE, &bufp);
if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE)) if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE))
return -EINVAL; return -EINVAL;
coresight_insert_barrier_packet(bufp); coresight_insert_barrier_packet(bufp);
return offset + CORESIGHT_BARRIER_PKT_SIZE; return offset + CORESIGHT_BARRIER_PKT_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment