The patch below does not apply to the 4.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From ecd182cbf4e107928077866399100228d2359c60 Mon Sep 17 00:00:00 2001
From: Tetsuo Handa penguin-kernel@I-love.SAKURA.ne.jp Date: Fri, 22 Feb 2019 20:03:55 +0900 Subject: [PATCH] staging: android: ashmem: Avoid range_alloc() allocation with ashmem_mutex held.
ashmem_pin() is calling range_shrink() without checking whether range_alloc() succeeded. Also, doing memory allocation with ashmem_mutex held should be avoided because ashmem_shrink_scan() tries to hold it.
Therefore, move memory allocation for range_alloc() to ashmem_pin_unpin() and make range_alloc() not to fail.
This patch is mostly meant for backporting purpose for fuzz testing on stable/distributor kernels, for there is a plan to remove this code in near future.
Signed-off-by: Tetsuo Handa penguin-kernel@I-love.SAKURA.ne.jp Cc: stable@vger.kernel.org Reviewed-by: Joel Fernandes joel@joelfernandes.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 5d5b091b957a..74d497d39c5a 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -172,19 +172,15 @@ static inline void lru_del(struct ashmem_range *range) * @end: The ending page (inclusive) * * This function is protected by ashmem_mutex. - * - * Return: 0 if successful, or -ENOMEM if there is an error */ -static int range_alloc(struct ashmem_area *asma, - struct ashmem_range *prev_range, unsigned int purged, - size_t start, size_t end) +static void range_alloc(struct ashmem_area *asma, + struct ashmem_range *prev_range, unsigned int purged, + size_t start, size_t end, + struct ashmem_range **new_range) { - struct ashmem_range *range; - - range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); - if (!range) - return -ENOMEM; + struct ashmem_range *range = *new_range;
+ *new_range = NULL; range->asma = asma; range->pgstart = start; range->pgend = end; @@ -194,8 +190,6 @@ static int range_alloc(struct ashmem_area *asma,
if (range_on_lru(range)) lru_add(range); - - return 0; }
/** @@ -597,7 +591,8 @@ static int get_name(struct ashmem_area *asma, void __user *name) * * Caller must hold ashmem_mutex. */ -static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend, + struct ashmem_range **new_range) { struct ashmem_range *range, *next; int ret = ASHMEM_NOT_PURGED; @@ -650,7 +645,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) * second half and adjust the first chunk's endpoint. */ range_alloc(asma, range, range->purged, - pgend + 1, range->pgend); + pgend + 1, range->pgend, new_range); range_shrink(range, range->pgstart, pgstart - 1); break; } @@ -664,7 +659,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) * * Caller must hold ashmem_mutex. */ -static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend, + struct ashmem_range **new_range) { struct ashmem_range *range, *next; unsigned int purged = ASHMEM_NOT_PURGED; @@ -690,7 +686,8 @@ static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) } }
- return range_alloc(asma, range, purged, pgstart, pgend); + range_alloc(asma, range, purged, pgstart, pgend, new_range); + return 0; }
/* @@ -723,10 +720,17 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, struct ashmem_pin pin; size_t pgstart, pgend; int ret = -EINVAL; + struct ashmem_range *range = NULL;
if (copy_from_user(&pin, p, sizeof(pin))) return -EFAULT;
+ if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) { + range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); + if (!range) + return -ENOMEM; + } + mutex_lock(&ashmem_mutex); wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
@@ -751,10 +755,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
switch (cmd) { case ASHMEM_PIN: - ret = ashmem_pin(asma, pgstart, pgend); + ret = ashmem_pin(asma, pgstart, pgend, &range); break; case ASHMEM_UNPIN: - ret = ashmem_unpin(asma, pgstart, pgend); + ret = ashmem_unpin(asma, pgstart, pgend, &range); break; case ASHMEM_GET_PIN_STATUS: ret = ashmem_get_pin_status(asma, pgstart, pgend); @@ -763,6 +767,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
out_unlock: mutex_unlock(&ashmem_mutex); + if (range) + kmem_cache_free(ashmem_range_cachep, range);
return ret; }
linux-stable-mirror@lists.linaro.org