No need to conditional compile that code, let the compilers dead code elimination handle it instead.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2bcf9ceca997..2305bb2cc1f1 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1141,8 +1141,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } mangle_sg_table(sg_table);
-#ifdef CONFIG_DMA_API_DEBUG - { + if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { struct scatterlist *sg; u64 addr; int len; @@ -1154,10 +1153,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { pr_debug("%s: addr %llx or len %x is not page aligned!\n", __func__, addr, len); + break; } } } -#endif /* CONFIG_DMA_API_DEBUG */ return sg_table;
error_unmap:
Instead of just mangling the page link create a copy of the sg_table but only copy over the DMA addresses and not the pages.
Still quite a hack but this at least allows the exporter to properly keeps it's sg_table intact.
This is important for example for the system DMA-heap which needs it's sg_table to sync CPU writes with device accesses.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 68 +++++++++++++++++++++++++++++++-------- 1 file changed, 54 insertions(+), 14 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2305bb2cc1f1..1fe5781d8862 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -828,21 +828,59 @@ void dma_buf_put(struct dma_buf *dmabuf) } EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
-static void mangle_sg_table(struct sg_table *sg_table) +static int dma_buf_mangle_sg_table(struct sg_table **sg_table) { -#ifdef CONFIG_DMABUF_DEBUG - int i; - struct scatterlist *sg; - - /* To catch abuse of the underlying struct page by importers mix - * up the bits, but take care to preserve the low SG_ bits to - * not corrupt the sgt. The mixing is undone on unmap - * before passing the sgt back to the exporter. + struct sg_table *to, *from = *sg_table; + struct scatterlist *to_sg, *from_sg; + int i, ret; + + if (!IS_ENABLED(CONFIG_DMABUF_DEBUG)) + return 0; + + /* + * To catch abuse of the underlying struct page by importers copy the + * sg_table without copying the page_link and give only the copy back to + * the importer. */ - for_each_sgtable_sg(sg_table, sg, i) - sg->page_link ^= ~0xffUL; -#endif + to = kzalloc(sizeof(*to), GFP_KERNEL); + if (!to) + return -ENOMEM;
+ ret = sg_alloc_table(to, from->nents, GFP_KERNEL); + if (ret) + goto free_to; + + to_sg = to->sgl; + for_each_sgtable_dma_sg(from, from_sg, i) { + sg_set_page(to_sg, NULL, 0, 0); + sg_dma_address(to_sg) = sg_dma_address(from_sg); + sg_dma_len(to_sg) = sg_dma_len(from_sg); + to_sg = sg_next(to_sg); + } + + /* + * Store the original sg_table in the first page_link of the copy so + * that we can revert everything back again on unmap. + */ + to->sgl[0].page_link = (unsigned long)from; + *sg_table = to; + return 0; + +free_to: + kfree(to); + return ret; +} + +static void dma_buf_demangle_sg_table(struct sg_table **sg_table) +{ + struct sg_table *copy = *sg_table; + + if (!IS_ENABLED(CONFIG_DMABUF_DEBUG)) + return; + + *sg_table = (void *)copy->sgl[0].page_link; + sg_free_table(copy); + kfree(copy); }
static inline bool @@ -1139,7 +1177,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, if (ret < 0) goto error_unmap; } - mangle_sg_table(sg_table); + ret = dma_buf_mangle_sg_table(&sg_table); + if (ret) + goto error_unmap;
if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { struct scatterlist *sg; @@ -1220,7 +1260,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- mangle_sg_table(sg_table); + dma_buf_demangle_sg_table(&sg_table); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_pin_on_map(attach))
-----Original Message----- From: dri-devel dri-devel-bounces@lists.freedesktop.org On Behalf Of Christian König Sent: Monday, October 6, 2025 9:47 AM To: sumit.semwal@linaro.org; linux-media@vger.kernel.org; dri- devel@lists.freedesktop.org; linaro-mm-sig@lists.linaro.org; simona.vetter@ffwll.ch Subject: [PATCH 2/2] dma-buf: improve sg_table debugging hack
Instead of just mangling the page link create a copy of the sg_table but only copy over the DMA addresses and not the pages.
This made the issue obvious. If I abuse this now how will I know I am doing the wrong thing?
Still quite a hack but this at least allows the exporter to properly keeps it's sg_table intact.
This is important for example for the system DMA-heap which needs it's sg_table to sync CPU writes with device accesses.
So the mangling actually breaks a usage? I thought that the usage was incorrect...is the dma-heap using this incorrectly?
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-buf.c | 68 +++++++++++++++++++++++++++++++-----
1 file changed, 54 insertions(+), 14 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2305bb2cc1f1..1fe5781d8862 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -828,21 +828,59 @@ void dma_buf_put(struct dma_buf *dmabuf) } EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
-static void mangle_sg_table(struct sg_table *sg_table) +static int dma_buf_mangle_sg_table(struct sg_table **sg_table) { -#ifdef CONFIG_DMABUF_DEBUG
- int i;
- struct scatterlist *sg;
- /* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to* not corrupt the sgt. The mixing is undone on unmap* before passing the sgt back to the exporter.
- struct sg_table *to, *from = *sg_table;
- struct scatterlist *to_sg, *from_sg;
- int i, ret;
- if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
return 0;- /*
* To catch abuse of the underlying struct page by importers copy the* sg_table without copying the page_link and give only the copy backto
*/* the importer.
- for_each_sgtable_sg(sg_table, sg, i)
sg->page_link ^= ~0xffUL;-#endif
to = kzalloc(sizeof(*to), GFP_KERNEL);
if (!to)
return -ENOMEM;ret = sg_alloc_table(to, from->nents, GFP_KERNEL);
if (ret)
goto free_to;to_sg = to->sgl;
for_each_sgtable_dma_sg(from, from_sg, i) {
sg_set_page(to_sg, NULL, 0, 0);sg_dma_address(to_sg) = sg_dma_address(from_sg);sg_dma_len(to_sg) = sg_dma_len(from_sg);
Is the indentation correct here?
M
to_sg = sg_next(to_sg);- }
- /*
* Store the original sg_table in the first page_link of the copy so* that we can revert everything back again on unmap.*/- to->sgl[0].page_link = (unsigned long)from;
- *sg_table = to;
- return 0;
+free_to:
- kfree(to);
- return ret;
+}
+static void dma_buf_demangle_sg_table(struct sg_table **sg_table) +{
- struct sg_table *copy = *sg_table;
- if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
return;- *sg_table = (void *)copy->sgl[0].page_link;
- sg_free_table(copy);
- kfree(copy);
}
static inline bool @@ -1139,7 +1177,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, if (ret < 0) goto error_unmap; }
- mangle_sg_table(sg_table);
ret = dma_buf_mangle_sg_table(&sg_table);
if (ret)
goto error_unmap;if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { struct scatterlist *sg;
@@ -1220,7 +1260,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- mangle_sg_table(sg_table);
dma_buf_demangle_sg_table(&sg_table); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_pin_on_map(attach))
-- 2.43.0
On 13.10.25 16:48, Ruhl, Michael J wrote:
-----Original Message----- From: dri-devel dri-devel-bounces@lists.freedesktop.org On Behalf Of Christian König Sent: Monday, October 6, 2025 9:47 AM To: sumit.semwal@linaro.org; linux-media@vger.kernel.org; dri- devel@lists.freedesktop.org; linaro-mm-sig@lists.linaro.org; simona.vetter@ffwll.ch Subject: [PATCH 2/2] dma-buf: improve sg_table debugging hack
Instead of just mangling the page link create a copy of the sg_table but only copy over the DMA addresses and not the pages.
This made the issue obvious. If I abuse this now how will I know I am doing the wrong thing?
You get a crash when you try to to convert a page link into a struct page pointer and then access fields in that struct page (the pointer is NULL for most entries now).
Still quite a hack but this at least allows the exporter to properly keeps it's sg_table intact.
This is important for example for the system DMA-heap which needs it's sg_table to sync CPU writes with device accesses.
So the mangling actually breaks a usage? I thought that the usage was incorrect...is the dma-heap using this incorrectly?
No, dma-heap as an exporter is using it perfectly correctly.
The problem was (or rather is) that some importers turned the page link into a struct page again and then tried to modify that struct page, e.g. grabbing references to it or similar.
That turned into tons of problems when the exporter used device private pages and didn't expect that somebody messing with it.
The only property importers are allowed to access in the sg_tables they get from dma_buf are the DMA-addresses.
This patch here is a first step to replace using sg_tables with something else like xarray of DMA-addresses or similar.
Regards, Christian.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-buf.c | 68 +++++++++++++++++++++++++++++++-----
1 file changed, 54 insertions(+), 14 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2305bb2cc1f1..1fe5781d8862 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -828,21 +828,59 @@ void dma_buf_put(struct dma_buf *dmabuf) } EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
-static void mangle_sg_table(struct sg_table *sg_table) +static int dma_buf_mangle_sg_table(struct sg_table **sg_table) { -#ifdef CONFIG_DMABUF_DEBUG
- int i;
- struct scatterlist *sg;
- /* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to* not corrupt the sgt. The mixing is undone on unmap* before passing the sgt back to the exporter.
- struct sg_table *to, *from = *sg_table;
- struct scatterlist *to_sg, *from_sg;
- int i, ret;
- if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
return 0;- /*
* To catch abuse of the underlying struct page by importers copy the* sg_table without copying the page_link and give only the copy backto
*/* the importer.
- for_each_sgtable_sg(sg_table, sg, i)
sg->page_link ^= ~0xffUL;-#endif
to = kzalloc(sizeof(*to), GFP_KERNEL);
if (!to)
return -ENOMEM;ret = sg_alloc_table(to, from->nents, GFP_KERNEL);
if (ret)
goto free_to;to_sg = to->sgl;
for_each_sgtable_dma_sg(from, from_sg, i) {
sg_set_page(to_sg, NULL, 0, 0);sg_dma_address(to_sg) = sg_dma_address(from_sg);sg_dma_len(to_sg) = sg_dma_len(from_sg);Is the indentation correct here?
M
to_sg = sg_next(to_sg);- }
- /*
* Store the original sg_table in the first page_link of the copy so* that we can revert everything back again on unmap.*/- to->sgl[0].page_link = (unsigned long)from;
- *sg_table = to;
- return 0;
+free_to:
- kfree(to);
- return ret;
+}
+static void dma_buf_demangle_sg_table(struct sg_table **sg_table) +{
- struct sg_table *copy = *sg_table;
- if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
return;- *sg_table = (void *)copy->sgl[0].page_link;
- sg_free_table(copy);
- kfree(copy);
}
static inline bool @@ -1139,7 +1177,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, if (ret < 0) goto error_unmap; }
- mangle_sg_table(sg_table);
ret = dma_buf_mangle_sg_table(&sg_table);
if (ret)
goto error_unmap;if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { struct scatterlist *sg;
@@ -1220,7 +1260,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- mangle_sg_table(sg_table);
dma_buf_demangle_sg_table(&sg_table); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_pin_on_map(attach))
-- 2.43.0
-----Original Message----- From: Christian König christian.koenig@amd.com Sent: Monday, October 13, 2025 11:02 AM To: Ruhl, Michael J michael.j.ruhl@intel.com; sumit.semwal@linaro.org; linux-media@vger.kernel.org; dri-devel@lists.freedesktop.org; linaro-mm- sig@lists.linaro.org; simona.vetter@ffwll.ch Subject: Re: [PATCH 2/2] dma-buf: improve sg_table debugging hack
On 13.10.25 16:48, Ruhl, Michael J wrote:
-----Original Message----- From: dri-devel dri-devel-bounces@lists.freedesktop.org On Behalf Of Christian König Sent: Monday, October 6, 2025 9:47 AM To: sumit.semwal@linaro.org; linux-media@vger.kernel.org; dri- devel@lists.freedesktop.org; linaro-mm-sig@lists.linaro.org; simona.vetter@ffwll.ch Subject: [PATCH 2/2] dma-buf: improve sg_table debugging hack
Instead of just mangling the page link create a copy of the sg_table but only copy over the DMA addresses and not the pages.
This made the issue obvious. If I abuse this now how will I know I am doing the wrong thing?
You get a crash when you try to to convert a page link into a struct page pointer and then access fields in that struct page (the pointer is NULL for most entries now).
Still quite a hack but this at least allows the exporter to properly keeps it's sg_table intact.
This is important for example for the system DMA-heap which needs it's sg_table to sync CPU writes with device accesses.
So the mangling actually breaks a usage? I thought that the usage was
incorrect...is
the dma-heap using this incorrectly?
No, dma-heap as an exporter is using it perfectly correctly.
The problem was (or rather is) that some importers turned the page link into a struct page again and then tried to modify that struct page, e.g. grabbing references to it or similar.
That turned into tons of problems when the exporter used device private pages and didn't expect that somebody messing with it.
The only property importers are allowed to access in the sg_tables they get from dma_buf are the DMA-addresses.
This patch here is a first step to replace using sg_tables with something else like xarray of DMA-addresses or similar.
Ok got it.
Some further comments below...
Regards, Christian.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-buf.c | 68 +++++++++++++++++++++++++++++++--
1 file changed, 54 insertions(+), 14 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2305bb2cc1f1..1fe5781d8862 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -828,21 +828,59 @@ void dma_buf_put(struct dma_buf *dmabuf) } EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
-static void mangle_sg_table(struct sg_table *sg_table) +static int dma_buf_mangle_sg_table(struct sg_table **sg_table) { -#ifdef CONFIG_DMABUF_DEBUG
- int i;
- struct scatterlist *sg;
- /* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to* not corrupt the sgt. The mixing is undone on unmap* before passing the sgt back to the exporter.
- struct sg_table *to, *from = *sg_table;
- struct scatterlist *to_sg, *from_sg;
- int i, ret;
- if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
return 0;- /*
* To catch abuse of the underlying struct page by importers copy the* sg_table without copying the page_link and give only the copy backto
*/* the importer.
- for_each_sgtable_sg(sg_table, sg, i)
sg->page_link ^= ~0xffUL;-#endif
to = kzalloc(sizeof(*to), GFP_KERNEL);
if (!to)
return -ENOMEM;ret = sg_alloc_table(to, from->nents, GFP_KERNEL);
if (ret)
goto free_to;to_sg = to->sgl;
for_each_sgtable_dma_sg(from, from_sg, i) {
sg_set_page(to_sg, NULL, 0, 0);sg_dma_address(to_sg) = sg_dma_address(from_sg);sg_dma_len(to_sg) = sg_dma_len(from_sg);Is the indentation correct here?
M
to_sg = sg_next(to_sg);- }
- /*
* Store the original sg_table in the first page_link of the copy so* that we can revert everything back again on unmap.*/- to->sgl[0].page_link = (unsigned long)from;
If the concern is the importer touching the table when it shouldn't...why is this ok?
I understand that you are only copying the dma info...but if I do something like for_each_sg()... will that take me to the original?
I kinda think that keeping this info inaccessible to the importer would make more sense? (however I see the problem that we can't make it part of the dma-buf struct because there could be multiple importers...)
Hmm. You do say that this is "still a hack"... so doing this is for the debug purposes and will go away with the future steps?
If that is the plan, (and with the indentation issue fixed):
Reviewed-by: Michael J. Ruhl michael.j.ruhl@intel.com
Mike
- *sg_table = to;
- return 0;
+free_to:
- kfree(to);
- return ret;
+}
+static void dma_buf_demangle_sg_table(struct sg_table **sg_table) +{
- struct sg_table *copy = *sg_table;
- if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
return;- *sg_table = (void *)copy->sgl[0].page_link;
- sg_free_table(copy);
- kfree(copy);
}
static inline bool @@ -1139,7 +1177,9 @@ struct sg_table
*dma_buf_map_attachment(struct
dma_buf_attachment *attach, if (ret < 0) goto error_unmap; }
- mangle_sg_table(sg_table);
ret = dma_buf_mangle_sg_table(&sg_table);
if (ret)
goto error_unmap;if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { struct scatterlist *sg;
@@ -1220,7 +1260,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- mangle_sg_table(sg_table);
dma_buf_demangle_sg_table(&sg_table); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_pin_on_map(attach))
-- 2.43.0
-----Original Message----- From: dri-devel dri-devel-bounces@lists.freedesktop.org On Behalf Of Christian König Sent: Monday, October 6, 2025 9:47 AM To: sumit.semwal@linaro.org; linux-media@vger.kernel.org; dri- devel@lists.freedesktop.org; linaro-mm-sig@lists.linaro.org; simona.vetter@ffwll.ch Subject: [PATCH 1/2] dma-buf: replace "#if" with just "if"
No need to conditional compile that code, let the compilers dead code elimination handle it instead.
Makes sense to me.
Reviewed-by: Michael J. Ruhl michael.j.ruhl@intel.com
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-buf.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2bcf9ceca997..2305bb2cc1f1 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1141,8 +1141,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } mangle_sg_table(sg_table);
-#ifdef CONFIG_DMA_API_DEBUG
- {
- if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { struct scatterlist *sg; u64 addr; int len;
@@ -1154,10 +1153,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { pr_debug("%s: addr %llx or len %x is not page aligned!\n", __func__, addr, len);
} }break; }-#endif /* CONFIG_DMA_API_DEBUG */ return sg_table;
error_unmap:
2.43.0
linaro-mm-sig@lists.linaro.org