Am 07.01.25 um 15:27 schrieb Xu Yilun:
Introduce a new API for dma-buf importer, also add a dma_buf_ops callback for dma-buf exporter. This API is for subsystem importers who map the dma-buf to some user defined address space, e.g. for IOMMUFD to map the dma-buf to userspace IOVA via IOMMU page table, or for KVM to map the dma-buf to GPA via KVM MMU (e.g. EPT).
Currently dma-buf is only used to get DMA address for device's default domain by using kernel DMA APIs. But for these new use-cases, importers only need the pfn of the dma-buf resource to build their own mapping tables.
As far as I can see I have to fundamentally reject this whole approach.
It's intentional DMA-buf design that we don't expose struct pages nor PFNs to the importer. Essentially DMA-buf only transports DMA addresses.
In other words the mapping is done by the exporter and *not* the importer.
What we certainly can do is to annotate those DMA addresses to a better specify in which domain they are applicable, e.g. if they are PCIe bus addresses or some inter device bus addresses etc...
But moving the functionality to map the pages/PFNs to DMA addresses into the importer is an absolutely clear NO-GO.
Regards, Christian.
So the map_dma_buf() callback is not mandatory for exporters anymore. Also the importers could choose not to provide struct device *dev on dma_buf_attach() if they don't call dma_buf_map_attachment().
Like dma_buf_map_attachment(), the importer should firstly call dma_buf_attach/dynamic_attach() then call dma_buf_get_pfn_unlocked(). If the importer choose to do dynamic attach, it also should handle the dma-buf move notification.
Only the unlocked version of dma_buf_get_pfn is implemented for now, just because no locked version is used for now.
Signed-off-by: Xu Yilun yilun.xu@linux.intel.com
IIUC, Only get_pfn() is needed but no put_pfn(). The whole dma-buf is de/referenced at dma-buf attach/detach time.
Specifically, for static attachment, the exporter should always make memory resource available/pinned on first dma_buf_attach(), and release/unpin memory resource on last dma_buf_detach(). For dynamic attachment, the exporter could populate & invalidate the memory resource at any time, it's OK as long as the importers follow dma-buf move notification. So no pinning is needed for get_pfn() and no put_pfn() is needed.
drivers/dma-buf/dma-buf.c | 90 +++++++++++++++++++++++++++++++-------- include/linux/dma-buf.h | 13 ++++++ 2 files changed, 86 insertions(+), 17 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 7eeee3a38202..83d1448b6dcc 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -630,10 +630,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) size_t alloc_size = sizeof(struct dma_buf); int ret;
- if (WARN_ON(!exp_info->priv || !exp_info->ops
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release))
- if (WARN_ON(!exp_info->priv || !exp_info->ops ||
(!!exp_info->ops->map_dma_buf != !!exp_info->ops->unmap_dma_buf) ||
(!exp_info->ops->map_dma_buf && !exp_info->ops->get_pfn) ||
return ERR_PTR(-EINVAL);!exp_info->ops->release))
if (WARN_ON(exp_info->ops->cache_sgt_mapping && @@ -909,7 +909,10 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, struct dma_buf_attachment *attach; int ret;
- if (WARN_ON(!dmabuf || !dev))
- if (WARN_ON(!dmabuf))
return ERR_PTR(-EINVAL);
- if (WARN_ON(dmabuf->ops->map_dma_buf && !dev)) return ERR_PTR(-EINVAL);
if (WARN_ON(importer_ops && !importer_ops->move_notify)) @@ -941,7 +944,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, */ if (dma_buf_attachment_is_dynamic(attach) != dma_buf_is_dynamic(dmabuf)) {
struct sg_table *sgt;
struct sg_table *sgt = NULL;
dma_resv_lock(attach->dmabuf->resv, NULL); if (dma_buf_is_dynamic(attach->dmabuf)) { @@ -950,13 +953,16 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, goto err_unlock; }
sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_unpin;
if (dmabuf->ops->map_dma_buf) {
sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_unpin;
}}
- dma_resv_unlock(attach->dmabuf->resv); attach->sgt = sgt; attach->dir = DMA_BIDIRECTIONAL;
@@ -1119,7 +1125,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, might_sleep();
- if (WARN_ON(!attach || !attach->dmabuf))
- if (WARN_ON(!attach || !attach->dmabuf ||
return ERR_PTR(-EINVAL);!attach->dmabuf->ops->map_dma_buf))
dma_resv_assert_held(attach->dmabuf->resv); @@ -1195,7 +1202,8 @@ dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, might_sleep();
- if (WARN_ON(!attach || !attach->dmabuf))
- if (WARN_ON(!attach || !attach->dmabuf ||
return ERR_PTR(-EINVAL);!attach->dmabuf->ops->map_dma_buf))
dma_resv_lock(attach->dmabuf->resv, NULL); @@ -1222,7 +1230,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, { might_sleep();
- if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
- if (WARN_ON(!attach || !attach->dmabuf ||
return;!attach->dmabuf->ops->unmap_dma_buf || !sg_table))
dma_resv_assert_held(attach->dmabuf->resv); @@ -1254,7 +1263,8 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, { might_sleep();
- if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
- if (WARN_ON(!attach || !attach->dmabuf ||
return;!attach->dmabuf->ops->unmap_dma_buf || !sg_table))
dma_resv_lock(attach->dmabuf->resv, NULL); @@ -1263,6 +1273,52 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, } EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF"); +/**
- dma_buf_get_pfn_unlocked -
- @attach: [in] attachment to get pfn from
- @pgoff: [in] page offset of the buffer against the start of dma_buf
- @pfn: [out] returns the pfn of the buffer
- @max_order [out] returns the max mapping order of the buffer
- */
+int dma_buf_get_pfn_unlocked(struct dma_buf_attachment *attach,
pgoff_t pgoff, u64 *pfn, int *max_order)
+{
- struct dma_buf *dmabuf = attach->dmabuf;
- int ret;
- if (WARN_ON(!attach || !attach->dmabuf ||
!attach->dmabuf->ops->get_pfn))
return -EINVAL;
- /*
* Open:
*
* When dma_buf is dynamic but dma_buf move is disabled, the buffer
* should be pinned before use, See dma_buf_map_attachment() for
* reference.
*
* But for now no pin is intended inside dma_buf_get_pfn(), otherwise
* need another API to unpin the dma_buf. So just fail out this case.
*/
- if (dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
return -ENOENT;
- dma_resv_lock(attach->dmabuf->resv, NULL);
- ret = dmabuf->ops->get_pfn(attach, pgoff, pfn, max_order);
- /*
* Open:
*
* Is dma_resv_wait_timeout() needed? I assume no. The DMA buffer
* content synchronization could be done when the buffer is to be
* mapped by importer.
*/
- dma_resv_unlock(attach->dmabuf->resv);
- return ret;
+} +EXPORT_SYMBOL_NS_GPL(dma_buf_get_pfn_unlocked, "DMA_BUF");
- /**
- dma_buf_move_notify - notify attachments that DMA-buf is moving
@@ -1662,7 +1718,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) attach_count = 0; list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
} dma_resv_unlock(buf_obj->resv);seq_printf(s, "\t%s\n", attach_obj->dev ? dev_name(attach_obj->dev) : NULL); attach_count++;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 36216d28d8bd..b16183edfb3a 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -194,6 +194,17 @@ struct dma_buf_ops { * if the call would block. */
- /**
* @get_pfn:
*
* This is called by dma_buf_get_pfn(). It is used to get the pfn
* of the buffer positioned by the page offset against the start of
* the dma_buf. It can only be called if @attach has been called
* successfully.
*/
- int (*get_pfn)(struct dma_buf_attachment *attach, pgoff_t pgoff,
u64 *pfn, int *max_order);
- /**
- @release:
@@ -629,6 +640,8 @@ dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, struct sg_table *sg_table, enum dma_data_direction direction); +int dma_buf_get_pfn_unlocked(struct dma_buf_attachment *attach,
pgoff_t pgoff, u64 *pfn, int *max_order);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);