The parameter kfence_sample_interval can be set via boot parameter
and late shell command, which is convenient for automated tests and
KFENCE parameter optimization. However, KFENCE test case just uses
compile-time CONFIG_KFENCE_SAMPLE_INTERVAL, which will make KFENCE
test case not run as users desired. Export kfence_sample_interval,
so that KFENCE test case can use run-time-set sample interval.
Signed-off-by: Peng Liu <liupeng256(a)huawei.com>
---
v2->v3:
- Revise change log description
v1->v2:
- Use EXPORT_SYMBOL_GPL replace EXPORT_SYMBOL
include/linux/kfence.h | 2 ++
mm/kfence/core.c | 3 ++-
mm/kfence/kfence_test.c | 8 ++++----
3 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 4b5e3679a72c..f49e64222628 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -17,6 +17,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>
+extern unsigned long kfence_sample_interval;
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 5ad40e3add45..13128fa13062 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -47,7 +47,8 @@
static bool kfence_enabled __read_mostly;
-static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index a22b1af85577..50dbb815a2a8 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -268,13 +268,13 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
/*
* Especially for non-preemption kernels, ensure the allocation-gate
* timer can catch up: after @resched_after, every failed allocation
* attempt yields, to ensure the allocation-gate timer is scheduled.
*/
- resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
+ resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
do {
if (test_cache)
alloc = kmem_cache_alloc(test_cache, gfp);
@@ -608,7 +608,7 @@ static void test_gfpzero(struct kunit *test)
int i;
/* Skip if we think it'd take too long. */
- KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100);
+ KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
setup_test_cache(test, size, 0, NULL);
buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
@@ -739,7 +739,7 @@ static void test_memcache_alloc_bulk(struct kunit *test)
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
do {
void *objects[100];
int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
--
2.18.0.huawei.25
The parameter kfence_sample_interval can be set via boot parameter
and late shell command, which is convenient for automatical tests
and KFENCE parameter optimation. However, KFENCE test case just use
compile time CONFIG_KFENCE_SAMPLE_INTERVAL, this will make KFENCE
test case not run as user desired. This patch will make KFENCE test
case compatible with run-time-set sample interval.
v1->v2:
- Use EXPORT_SYMBOL_GPL replace EXPORT_SYMBOL
Signed-off-by: Peng Liu <liupeng256(a)huawei.com>
---
include/linux/kfence.h | 2 ++
mm/kfence/core.c | 3 ++-
mm/kfence/kfence_test.c | 8 ++++----
3 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 4b5e3679a72c..f49e64222628 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -17,6 +17,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>
+extern unsigned long kfence_sample_interval;
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 5ad40e3add45..13128fa13062 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -47,7 +47,8 @@
static bool kfence_enabled __read_mostly;
-static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index a22b1af85577..50dbb815a2a8 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -268,13 +268,13 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
/*
* Especially for non-preemption kernels, ensure the allocation-gate
* timer can catch up: after @resched_after, every failed allocation
* attempt yields, to ensure the allocation-gate timer is scheduled.
*/
- resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
+ resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
do {
if (test_cache)
alloc = kmem_cache_alloc(test_cache, gfp);
@@ -608,7 +608,7 @@ static void test_gfpzero(struct kunit *test)
int i;
/* Skip if we think it'd take too long. */
- KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100);
+ KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
setup_test_cache(test, size, 0, NULL);
buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
@@ -739,7 +739,7 @@ static void test_memcache_alloc_bulk(struct kunit *test)
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
do {
void *objects[100];
int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
--
2.18.0.huawei.25
It appears like nr could be a Spectre v1 gadget as it's supplied by a
user and used as an array index. Prevent the contents
of kernel memory from being leaked to userspace via speculative
execution by using array_index_nospec.
Signed-off-by: Jordy Zomer <jordy(a)pwning.systems>
---
drivers/dma-buf/dma-heap.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 56bf5ad01ad5..8f5848aa144f 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -14,6 +14,7 @@
#include <linux/xarray.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/dma-heap.h>
@@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
return -EINVAL;
+ nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
/* Get the kernel ioctl cmd that matches */
kcmd = dma_heap_ioctl_cmds[nr];
--
2.27.0
Hammer it a bit more in that iterators can be restarted and when that
matters, plus suggest to prefer the locked version whenver.
Also delete the two leftover kerneldoc for static functions plus
sprinkle some more links while at it.
Signed-off-by: Daniel Vetter <daniel.vetter(a)intel.com>
Cc: Sumit Semwal <sumit.semwal(a)linaro.org>
Cc: "Christian König" <christian.koenig(a)amd.com>
Cc: linux-media(a)vger.kernel.org
Cc: linaro-mm-sig(a)lists.linaro.org
---
drivers/dma-buf/dma-resv.c | 26 ++++++++++++--------------
include/linux/dma-resv.h | 13 ++++++++++++-
2 files changed, 24 insertions(+), 15 deletions(-)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 9eb2baa387d4..1453b664c405 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -323,12 +323,6 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_resv_add_excl_fence);
-/**
- * dma_resv_iter_restart_unlocked - restart the unlocked iterator
- * @cursor: The dma_resv_iter object to restart
- *
- * Restart the unlocked iteration by initializing the cursor object.
- */
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
@@ -344,14 +338,6 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
cursor->is_restarted = true;
}
-/**
- * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
- * @cursor: cursor to record the current position
- *
- * Return all the fences in the dma_resv object which are not yet signaled.
- * The returned fence has an extra local reference so will stay alive.
- * If a concurrent modify is detected the whole iteration is started over again.
- */
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
{
struct dma_resv *obj = cursor->obj;
@@ -387,6 +373,12 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
* dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
* @cursor: the cursor with the current position
*
+ * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
+ *
+ * Beware that the iterator can be restarted. Code which accumulates statistics
+ * or similar needs to check for this with dma_resv_iter_is_restarted(). For
+ * this reason prefer the locked dma_resv_iter_first() whenver possible.
+ *
* Returns the first fence from an unlocked dma_resv obj.
*/
struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
@@ -406,6 +398,10 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
* dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
* @cursor: the cursor with the current position
*
+ * Beware that the iterator can be restarted. Code which accumulates statistics
+ * or similar needs to check for this with dma_resv_iter_is_restarted(). For
+ * this reason prefer the locked dma_resv_iter_next() whenver possible.
+ *
* Returns the next fence from an unlocked dma_resv obj.
*/
struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
@@ -431,6 +427,8 @@ EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
* dma_resv_iter_first - first fence from a locked dma_resv object
* @cursor: cursor to record the current position
*
+ * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
+ *
* Return the first fence in the dma_resv object while holding the
* &dma_resv.lock.
*/
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index dbd235ab447f..ebe908592ac3 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -153,6 +153,13 @@ struct dma_resv {
* struct dma_resv_iter - current position into the dma_resv fences
*
* Don't touch this directly in the driver, use the accessor function instead.
+ *
+ * IMPORTANT
+ *
+ * When using the lockless iterators like dma_resv_iter_next_unlocked() or
+ * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
+ * Code which accumulates statistics or similar needs to check for this with
+ * dma_resv_iter_is_restarted().
*/
struct dma_resv_iter {
/** @obj: The dma_resv object we iterate over */
@@ -243,7 +250,11 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
* &dma_resv.lock and using RCU instead. The cursor needs to be initialized
* with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
* the iterator a reference to the dma_fence is held and the RCU lock dropped.
- * When the dma_resv is modified the iteration starts over again.
+ *
+ * Beware that the iterator can be restarted when the struct dma_resv for
+ * @cursor is modified. Code which accumulates statistics or similar needs to
+ * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
+ * lock iterator dma_resv_for_each_fence() whenever possible.
*/
#define dma_resv_for_each_fence_unlocked(cursor, fence) \
for (fence = dma_resv_iter_first_unlocked(cursor); \
--
2.33.0
Consolidate the wrapper functions to check for dma_fence
subclasses in the dma_fence header.
This makes it easier to document and also check the different
requirements for fence containers in the subclasses.
Signed-off-by: Christian König <christian.koenig(a)amd.com>
---
include/linux/dma-fence-array.h | 15 +------------
include/linux/dma-fence-chain.h | 3 +--
include/linux/dma-fence.h | 38 +++++++++++++++++++++++++++++++++
3 files changed, 40 insertions(+), 16 deletions(-)
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 303dd712220f..fec374f69e12 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -45,19 +45,6 @@ struct dma_fence_array {
struct irq_work work;
};
-extern const struct dma_fence_ops dma_fence_array_ops;
-
-/**
- * dma_fence_is_array - check if a fence is from the array subsclass
- * @fence: fence to test
- *
- * Return true if it is a dma_fence_array and false otherwise.
- */
-static inline bool dma_fence_is_array(struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_array_ops;
-}
-
/**
* to_dma_fence_array - cast a fence to a dma_fence_array
* @fence: fence to cast to a dma_fence_array
@@ -68,7 +55,7 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
static inline struct dma_fence_array *
to_dma_fence_array(struct dma_fence *fence)
{
- if (fence->ops != &dma_fence_array_ops)
+ if (!fence || !dma_fence_is_array(fence))
return NULL;
return container_of(fence, struct dma_fence_array, base);
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 54fe3443fd2c..ee906b659694 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -49,7 +49,6 @@ struct dma_fence_chain {
spinlock_t lock;
};
-extern const struct dma_fence_ops dma_fence_chain_ops;
/**
* to_dma_fence_chain - cast a fence to a dma_fence_chain
@@ -61,7 +60,7 @@ extern const struct dma_fence_ops dma_fence_chain_ops;
static inline struct dma_fence_chain *
to_dma_fence_chain(struct dma_fence *fence)
{
- if (!fence || fence->ops != &dma_fence_chain_ops)
+ if (!fence || !dma_fence_is_chain(fence))
return NULL;
return container_of(fence, struct dma_fence_chain, base);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 1ea691753bd3..775cdc0b4f24 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -587,4 +587,42 @@ struct dma_fence *dma_fence_get_stub(void);
struct dma_fence *dma_fence_allocate_private_stub(void);
u64 dma_fence_context_alloc(unsigned num);
+extern const struct dma_fence_ops dma_fence_array_ops;
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * dma_fence_is_chain - check if a fence is from the chain subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_chain and false otherwise.
+ */
+static inline bool dma_fence_is_chain(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+/**
+ * dma_fence_is_container - check if a fence is a container for other fences
+ * @fence: the fence to test
+ *
+ * Return true if this fence is a container for other fences, false otherwise.
+ * This is important since we can't build up large fence structure or otherwise
+ * we run into recursion during operation on those fences.
+ */
+static inline bool dma_fence_is_container(struct dma_fence *fence)
+{
+ return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+}
+
#endif /* __LINUX_DMA_FENCE_H */
--
2.25.1
While porting i915 to arm64 we noticed some issues accessing lmem.
Some writes were getting corrupted and the final state of the buffer
didn't have exactly what we wrote. This became evident when enabling
GuC submission: depending on the number of engines the ADS struct was
being corrupted and GuC would reject it, refusin to initialize.
From Documentation/core-api/bus-virt-phys-mapping.rst:
This memory is called "PCI memory" or "shared memory" or "IO memory" or
whatever, and there is only one way to access it: the readb/writeb and
related functions. You should never take the address of such memory, because
there is really nothing you can do with such an address: it's not
conceptually in the same memory space as "real memory" at all, so you cannot
just dereference a pointer. (Sadly, on x86 it **is** in the same memory space,
so on x86 it actually works to just deference a pointer, but it's not
portable).
When reading or writing words directly to IO memory, in order to be portable
the Linux kernel provides the abstraction detailed in section "Differences
between I/O access functions" of Documentation/driver-api/device-io.rst.
This limits our ability to simply overlay our structs on top a buffer
and directly access it since that buffer may come from IO memory rather than
system memory. Hence the approach taken in intel_guc_ads.c needs to be
refactored. This is not the only place in i915 that neeed to be changed, but
the one causing the most problems, with a real reproducer. This first set of
patch focuses on fixing the gem object to pass the ADS
After the addition of a few helpers in the dma_buf_map API, most of
intel_guc_ads.c can be converted to use it. The exception is the regset
initialization: we'd incur into a lot of extra indirection when
reading/writting each register. So the regset is converted to use a
temporary buffer allocated on probe, which is then copied to its
final location when finishing the initialization or on gt reset.
Testing on some discrete cards, after this change we can correctly pass the
ADS struct to GuC and have it initialized correctly.
thanks
Lucas De Marchi
Cc: linux-media(a)vger.kernel.org
Cc: dri-devel(a)lists.freedesktop.org
Cc: linaro-mm-sig(a)lists.linaro.org
Cc: linux-kernel(a)vger.kernel.org
Cc: Christian König <christian.koenig(a)amd.com>
Cc: Daniel Vetter <daniel(a)ffwll.ch>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio(a)intel.com>
Cc: David Airlie <airlied(a)linux.ie>
Cc: John Harrison <John.C.Harrison(a)Intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen(a)linux.intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst(a)linux.intel.com>
Cc: Matt Roper <matthew.d.roper(a)intel.com>
Cc: Matthew Auld <matthew.auld(a)intel.com>
Cc: Matthew Brost <matthew.brost(a)intel.com>
Cc: Sumit Semwal <sumit.semwal(a)linaro.org>
Cc: Thomas Hellström <thomas.hellstrom(a)linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin(a)linux.intel.com>
Lucas De Marchi (19):
dma-buf-map: Add read/write helpers
dma-buf-map: Add helper to initialize second map
drm/i915/gt: Add helper for shmem copy to dma_buf_map
drm/i915/guc: Keep dma_buf_map of ads_blob around
drm/i915/guc: Add read/write helpers for ADS blob
drm/i915/guc: Convert golden context init to dma_buf_map
drm/i915/guc: Convert policies update to dma_buf_map
drm/i915/guc: Convert engine record to dma_buf_map
dma-buf-map: Add wrapper over memset
drm/i915/guc: Convert guc_ads_private_data_reset to dma_buf_map
drm/i915/guc: Convert golden context prep to dma_buf_map
drm/i915/guc: Replace check for golden context size
drm/i915/guc: Convert mapping table to dma_buf_map
drm/i915/guc: Convert capture list to dma_buf_map
drm/i915/guc: Prepare for error propagation
drm/i915/guc: Use a single pass to calculate regset
drm/i915/guc: Convert guc_mmio_reg_state_init to dma_buf_map
drm/i915/guc: Convert __guc_ads_init to dma_buf_map
drm/i915/guc: Remove plain ads_blob pointer
drivers/gpu/drm/i915/gt/shmem_utils.c | 32 ++
drivers/gpu/drm/i915/gt/shmem_utils.h | 3 +
drivers/gpu/drm/i915/gt/uc/intel_guc.h | 14 +-
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 374 +++++++++++-------
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h | 3 +-
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 11 +-
include/linux/dma-buf-map.h | 127 ++++++
7 files changed, 405 insertions(+), 159 deletions(-)
--
2.35.0
On Thu, Jan 27, 2022 at 02:24:29PM +0100, Greg Kroah-Hartman wrote:
> On Thu, Jan 27, 2022 at 02:02:18PM +0100, Mathias Krause wrote:
> > If the copy back to userland fails for the FASTRPC_IOCTL_ALLOC_DMA_BUFF
> > ioctl(), we shouldn't assume that 'buf->dmabuf' is still valid. In fact,
> > dma_buf_fd() called fd_install() before, i.e. "consumed" one reference,
> > leaving us with none.
> >
> > Calling dma_buf_put() will therefore put a reference we no longer own,
> > leading to a valid file descritor table entry for an already released
> > 'file' object which is a straight use-after-free.
> >
> > Simply avoid calling dma_buf_put() and rely on the process exit code to
> > do the necessary cleanup, if needed, i.e. if the file descriptor is
> > still valid.
> >
> > Fixes: 6cffd79504ce ("misc: fastrpc: Add support for dmabuf exporter")
> > Signed-off-by: Mathias Krause <minipli(a)grsecurity.net>
> > ---
> > drivers/misc/fastrpc.c | 9 ++++++++-
> > 1 file changed, 8 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
> > index 4ccbf43e6bfa..aa1682b94a23 100644
> > --- a/drivers/misc/fastrpc.c
> > +++ b/drivers/misc/fastrpc.c
> > @@ -1288,7 +1288,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
> > }
> >
> > if (copy_to_user(argp, &bp, sizeof(bp))) {
> > - dma_buf_put(buf->dmabuf);
> > + /*
> > + * The usercopy failed, but we can't do much about it, as
> > + * dma_buf_fd() already called fd_install() and made the
> > + * file descriptor accessible for the current process. It
> > + * might already be closed and dmabuf no longer valid when
> > + * we reach this point. Therefore "leak" the fd and rely on
> > + * the process exit path to do any required cleanup.
> > + */
> > return -EFAULT;
> > }
> >
>
> This feels wrong. How do all other dma buf users handle this?
>
> And you forgot to cc: the dmabuf developers, I think get_maintainers.pl
> should have caught them on this patch.
Odd, it didn't, not your fault, my apologies.
DMA BUFFER maintainers, what happened to the MAINTAINERS regex that
caused the above patch to not catch you all?
thanks,
greg k-h
This series make KFENCE to be more convenient to adjust parameters in
not only debug process but also production situations. In different
production and development stage, the demands of memory and CPU
limitations for KFENCE is quite different. In order to satisfy these
demands with a uniform kernel release, dynamically adjust KFENCE
parameters is needed.
Signed-off-by: Peng Liu <liupeng256(a)huawei.com>
Peng Liu (3):
kfence: Add a module parameter to adjust kfence objects
kfence: Optimize branches prediction when sample interval is zero
kfence: Make test case compatible with run time set sample interval
Documentation/dev-tools/kfence.rst | 14 ++--
include/linux/kfence.h | 10 ++-
mm/kfence/core.c | 113 ++++++++++++++++++++++++-----
mm/kfence/kfence.h | 2 +-
mm/kfence/kfence_test.c | 10 +--
5 files changed, 116 insertions(+), 33 deletions(-)
--
2.18.0.huawei.25