Commit 57b5cd06 authored by Rebecca Schultz Zavin's avatar Rebecca Schultz Zavin Committed by Greg Kroah-Hartman

gpu: ion: ion_chunk_heap: Zero chunk heap memory at creation time

Allocations from the ion heap need to be zeroed to protect userspace
from seeing memory belonging to other processes.  First allocations
from this heap were not zero'd allowing users to see memory from other
processes on a warm reset.
Signed-off-by: default avatarRebecca Schultz Zavin <rebecca@android.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e09539a0
...@@ -140,6 +140,10 @@ static struct ion_heap_ops chunk_heap_ops = { ...@@ -140,6 +140,10 @@ static struct ion_heap_ops chunk_heap_ops = {
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
{ {
struct ion_chunk_heap *chunk_heap; struct ion_chunk_heap *chunk_heap;
struct vm_struct *vm_struct;
pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
int i, ret;
chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap) if (!chunk_heap)
...@@ -149,12 +153,30 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) ...@@ -149,12 +153,30 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
PAGE_SHIFT, -1); PAGE_SHIFT, -1);
if (!chunk_heap->pool) { if (!chunk_heap->pool) {
kfree(chunk_heap); ret = -ENOMEM;
return ERR_PTR(-ENOMEM); goto error_gen_pool_create;
} }
chunk_heap->base = heap_data->base; chunk_heap->base = heap_data->base;
chunk_heap->size = heap_data->size; chunk_heap->size = heap_data->size;
chunk_heap->allocated = 0; chunk_heap->allocated = 0;
vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
if (!vm_struct) {
ret = -ENOMEM;
goto error;
}
for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
struct page *page = phys_to_page(chunk_heap->base + i);
struct page **pages = &page;
ret = map_vm_area(vm_struct, pgprot, &pages);
if (ret)
goto error_map_vm_area;
memset(vm_struct->addr, 0, PAGE_SIZE);
unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
}
free_vm_area(vm_struct);
__dma_page_cpu_to_dev(phys_to_page(heap_data->base), 0, heap_data->size, __dma_page_cpu_to_dev(phys_to_page(heap_data->base), 0, heap_data->size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
...@@ -165,6 +187,14 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) ...@@ -165,6 +187,14 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
heap_data->size, heap_data->align); heap_data->size, heap_data->align);
return &chunk_heap->heap; return &chunk_heap->heap;
error_map_vm_area:
free_vm_area(vm_struct);
error:
gen_pool_destroy(chunk_heap->pool);
error_gen_pool_create:
kfree(chunk_heap);
return ERR_PTR(ret);
} }
void ion_chunk_heap_destroy(struct ion_heap *heap) void ion_chunk_heap_destroy(struct ion_heap *heap)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment