Commit ecd182cb authored by Tetsuo Handa's avatar Tetsuo Handa Committed by Greg Kroah-Hartman

staging: android: ashmem: Avoid range_alloc() allocation with ashmem_mutex held.

ashmem_pin() is calling range_shrink() without checking whether
range_alloc() succeeded. Also, doing memory allocation with ashmem_mutex
held should be avoided because ashmem_shrink_scan() tries to hold it.

Therefore, move memory allocation for range_alloc() to ashmem_pin_unpin()
and make range_alloc() not to fail.

This patch is mostly meant for backporting purpose for fuzz testing on
stable/distributor kernels, for there is a plan to remove this code in
near future.
Signed-off-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: stable@vger.kernel.org
Reviewed-by: default avatarJoel Fernandes <joel@joelfernandes.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 1d4ccf2b
...@@ -172,19 +172,15 @@ static inline void lru_del(struct ashmem_range *range) ...@@ -172,19 +172,15 @@ static inline void lru_del(struct ashmem_range *range)
* @end: The ending page (inclusive) * @end: The ending page (inclusive)
* *
* This function is protected by ashmem_mutex. * This function is protected by ashmem_mutex.
*
* Return: 0 if successful, or -ENOMEM if there is an error
*/ */
static int range_alloc(struct ashmem_area *asma, static void range_alloc(struct ashmem_area *asma,
struct ashmem_range *prev_range, unsigned int purged, struct ashmem_range *prev_range, unsigned int purged,
size_t start, size_t end) size_t start, size_t end,
struct ashmem_range **new_range)
{ {
struct ashmem_range *range; struct ashmem_range *range = *new_range;
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
if (!range)
return -ENOMEM;
*new_range = NULL;
range->asma = asma; range->asma = asma;
range->pgstart = start; range->pgstart = start;
range->pgend = end; range->pgend = end;
...@@ -194,8 +190,6 @@ static int range_alloc(struct ashmem_area *asma, ...@@ -194,8 +190,6 @@ static int range_alloc(struct ashmem_area *asma,
if (range_on_lru(range)) if (range_on_lru(range))
lru_add(range); lru_add(range);
return 0;
} }
/** /**
...@@ -597,7 +591,8 @@ static int get_name(struct ashmem_area *asma, void __user *name) ...@@ -597,7 +591,8 @@ static int get_name(struct ashmem_area *asma, void __user *name)
* *
* Caller must hold ashmem_mutex. * Caller must hold ashmem_mutex.
*/ */
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
struct ashmem_range **new_range)
{ {
struct ashmem_range *range, *next; struct ashmem_range *range, *next;
int ret = ASHMEM_NOT_PURGED; int ret = ASHMEM_NOT_PURGED;
...@@ -650,7 +645,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) ...@@ -650,7 +645,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
* second half and adjust the first chunk's endpoint. * second half and adjust the first chunk's endpoint.
*/ */
range_alloc(asma, range, range->purged, range_alloc(asma, range, range->purged,
pgend + 1, range->pgend); pgend + 1, range->pgend, new_range);
range_shrink(range, range->pgstart, pgstart - 1); range_shrink(range, range->pgstart, pgstart - 1);
break; break;
} }
...@@ -664,7 +659,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) ...@@ -664,7 +659,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
* *
* Caller must hold ashmem_mutex. * Caller must hold ashmem_mutex.
*/ */
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
struct ashmem_range **new_range)
{ {
struct ashmem_range *range, *next; struct ashmem_range *range, *next;
unsigned int purged = ASHMEM_NOT_PURGED; unsigned int purged = ASHMEM_NOT_PURGED;
...@@ -690,7 +686,8 @@ static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) ...@@ -690,7 +686,8 @@ static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
} }
} }
return range_alloc(asma, range, purged, pgstart, pgend); range_alloc(asma, range, purged, pgstart, pgend, new_range);
return 0;
} }
/* /*
...@@ -723,10 +720,17 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, ...@@ -723,10 +720,17 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
struct ashmem_pin pin; struct ashmem_pin pin;
size_t pgstart, pgend; size_t pgstart, pgend;
int ret = -EINVAL; int ret = -EINVAL;
struct ashmem_range *range = NULL;
if (copy_from_user(&pin, p, sizeof(pin))) if (copy_from_user(&pin, p, sizeof(pin)))
return -EFAULT; return -EFAULT;
if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
if (!range)
return -ENOMEM;
}
mutex_lock(&ashmem_mutex); mutex_lock(&ashmem_mutex);
wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight)); wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
...@@ -751,10 +755,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, ...@@ -751,10 +755,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
switch (cmd) { switch (cmd) {
case ASHMEM_PIN: case ASHMEM_PIN:
ret = ashmem_pin(asma, pgstart, pgend); ret = ashmem_pin(asma, pgstart, pgend, &range);
break; break;
case ASHMEM_UNPIN: case ASHMEM_UNPIN:
ret = ashmem_unpin(asma, pgstart, pgend); ret = ashmem_unpin(asma, pgstart, pgend, &range);
break; break;
case ASHMEM_GET_PIN_STATUS: case ASHMEM_GET_PIN_STATUS:
ret = ashmem_get_pin_status(asma, pgstart, pgend); ret = ashmem_get_pin_status(asma, pgstart, pgend);
...@@ -763,6 +767,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, ...@@ -763,6 +767,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
out_unlock: out_unlock:
mutex_unlock(&ashmem_mutex); mutex_unlock(&ashmem_mutex);
if (range)
kmem_cache_free(ashmem_range_cachep, range);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment