This map function only works with SGT importers.
Signed-off-by: Jason Gunthorpe jgg@nvidia.com --- drivers/accel/amdxdna/amdxdna_gem.c | 2 +- drivers/accel/ivpu/ivpu_gem.c | 3 +- drivers/accel/qaic/qaic_data.c | 4 +-- drivers/dma-buf/dma-buf.c | 28 +++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +- drivers/gpu/drm/armada/armada_gem.c | 14 ++++++---- drivers/gpu/drm/drm_prime.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 4 +-- .../drm/i915/gem/selftests/i915_gem_dmabuf.c | 3 +- drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | 2 +- drivers/gpu/drm/tegra/gem.c | 6 ++-- drivers/gpu/drm/virtio/virtgpu_prime.c | 2 +- drivers/gpu/drm/xe/xe_bo.c | 2 +- drivers/iio/industrialio-buffer.c | 2 +- drivers/infiniband/core/umem_dmabuf.c | 4 +-- .../media/common/videobuf2/videobuf2-core.c | 2 +- .../common/videobuf2/videobuf2-dma-contig.c | 2 +- .../media/common/videobuf2/videobuf2-dma-sg.c | 2 +- .../platform/nvidia/tegra-vde/dmabuf-cache.c | 2 +- drivers/misc/fastrpc.c | 3 +- drivers/usb/gadget/function/f_fs.c | 2 +- drivers/xen/gntdev-dmabuf.c | 2 +- include/linux/dma-buf-mapping.h | 4 +-- include/linux/dma-buf.h | 10 +++---- io_uring/zcrx.c | 3 +- net/core/devmem.c | 4 +-- 26 files changed, 63 insertions(+), 54 deletions(-)
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c index fb7c8de960cd2a..ab7610375ad761 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.c +++ b/drivers/accel/amdxdna/amdxdna_gem.c @@ -610,7 +610,7 @@ amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) goto put_buf; }
- sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); + sgt = dma_buf_sgt_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto fail_detach; diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index ece68f570b7ead..850dc82c7857e2 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -54,7 +54,8 @@ static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct
sgt = bo->base.sgt; if (!sgt) { - sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL); + sgt = dma_buf_sgt_map_attachment(bo->base.base.import_attach, + DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt)); else diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 60cb4d65d48ee7..0a7b8b9620bf9a 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -844,7 +844,7 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_
drm_gem_private_object_init(dev, obj, attach->dmabuf->size); /* - * skipping dma_buf_map_attachment() as we do not know the direction + * skipping dma_buf_sgt_map_attachment() as we do not know the direction * just yet. Once the direction is known in the subsequent IOCTL to * attach slicing, we can do it then. */ @@ -870,7 +870,7 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h struct sg_table *sgt; int ret;
- sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir); + sgt = dma_buf_sgt_map_attachment(obj->import_attach, hdr->dir); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); return ret; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index e773441abab65d..73c599f84e121a 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -653,7 +653,7 @@ static struct file *dma_buf_getfile(size_t size, int flags) * * 3. Once the buffer is attached to all devices userspace can initiate DMA * access to the shared buffer. In the kernel this is done by calling - * dma_buf_map_attachment() and dma_buf_unmap_attachment(). + * dma_buf_sgt_map_attachment() and dma_buf_unmap_attachment(). * * 4. Once a driver is done with a shared buffer it needs to call * dma_buf_detach() (after cleaning up any mappings) and then release the @@ -867,7 +867,7 @@ dma_buf_pin_on_map(struct dma_buf_attachment *attach) * * - dma_buf_pin() * - dma_buf_unpin() - * - dma_buf_map_attachment() + * - dma_buf_sgt_map_attachment() * - dma_buf_unmap_attachment() * - dma_buf_vmap() * - dma_buf_vunmap() @@ -885,7 +885,7 @@ dma_buf_pin_on_map(struct dma_buf_attachment *attach) * - dma_buf_mmap() * - dma_buf_begin_cpu_access() * - dma_buf_end_cpu_access() - * - dma_buf_map_attachment_unlocked() + * - dma_buf_sgt_map_attachment_unlocked() * - dma_buf_unmap_attachment_unlocked() * - dma_buf_vmap_unlocked() * - dma_buf_vunmap_unlocked() @@ -1120,7 +1120,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach) EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
/** - * dma_buf_map_attachment - Returns the scatterlist table of the attachment; + * dma_buf_sgt_map_attachment - Returns the scatterlist table of the attachment; * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the * dma_buf_ops. * @attach: [in] attachment whose scatterlist is to be returned @@ -1140,8 +1140,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF"); * Important: Dynamic importers must wait for the exclusive fence of the struct * dma_resv attached to the DMA-BUF first. */ -struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, - enum dma_data_direction direction) +struct sg_table *dma_buf_sgt_map_attachment(struct dma_buf_attachment *attach, + enum dma_data_direction direction) { const struct dma_buf_mapping_sgt_exp_ops *sgt_exp_ops = dma_buf_get_sgt_ops(attach); @@ -1213,20 +1213,20 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return sg_table; } -EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF"); +EXPORT_SYMBOL_NS_GPL(dma_buf_sgt_map_attachment, "DMA_BUF");
/** - * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; + * dma_buf_sgt_map_attachment_unlocked - Returns the scatterlist table of the attachment; * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the * dma_buf_ops. * @attach: [in] attachment whose scatterlist is to be returned * @direction: [in] direction of DMA transfer * - * Unlocked variant of dma_buf_map_attachment(). + * Unlocked variant of dma_buf_sgt_map_attachment(). */ struct sg_table * -dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, - enum dma_data_direction direction) +dma_buf_sgt_map_attachment_unlocked(struct dma_buf_attachment *attach, + enum dma_data_direction direction) { struct sg_table *sg_table;
@@ -1236,12 +1236,12 @@ dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, return ERR_PTR(-EINVAL);
dma_resv_lock(attach->dmabuf->resv, NULL); - sg_table = dma_buf_map_attachment(attach, direction); + sg_table = dma_buf_sgt_map_attachment(attach, direction); dma_resv_unlock(attach->dmabuf->resv);
return sg_table; } -EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF"); +EXPORT_SYMBOL_NS_GPL(dma_buf_sgt_map_attachment_unlocked, "DMA_BUF");
/** * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might @@ -1251,7 +1251,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF"); * @sg_table: [in] scatterlist info of the buffer to unmap * @direction: [in] direction of DMA transfer * - * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). + * This unmaps a DMA mapping for @attached obtained by dma_buf_sgt_map_attachment(). */ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, struct sg_table *sg_table, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2b931e855abd9d..6c8b2a3dde1f54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -914,7 +914,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, struct sg_table *sgt;
attach = gtt->gobj->import_attach; - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + sgt = dma_buf_sgt_map_attachment(attach, + DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) return PTR_ERR(sgt);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index bf6968b1f22511..21b83b00b68254 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -531,7 +531,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) get_dma_buf(buf);
/* - * Don't call dma_buf_map_attachment() here - it maps the + * Don't call dma_buf_sgt_map_attachment() here - it maps the * scatterlist immediately for DMA, and this is not always * an appropriate thing to do. */ @@ -542,20 +542,22 @@ int armada_gem_map_import(struct armada_gem_object *dobj) { int ret;
- dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach, - DMA_TO_DEVICE); + dobj->sgt = dma_buf_sgt_map_attachment_unlocked(dobj->obj.import_attach, + DMA_TO_DEVICE); if (IS_ERR(dobj->sgt)) { ret = PTR_ERR(dobj->sgt); dobj->sgt = NULL; - DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret); + DRM_ERROR("dma_buf_sgt_map_attachment() error: %d\n", ret); return ret; } if (dobj->sgt->nents > 1) { - DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n"); + DRM_ERROR( + "dma_buf_sgt_map_attachment() returned an (unsupported) scattered list\n"); return -EINVAL; } if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) { - DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n"); + DRM_ERROR( + "dma_buf_sgt_map_attachment() returned a small buffer\n"); return -EINVAL; } dobj->dev_addr = sg_dma_address(dobj->sgt->sgl); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 0852c60a722b67..c1afb9e0886c4f 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -1005,7 +1005,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); + sgt = dma_buf_sgt_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto fail_detach; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index a119623aed254b..92e2677eb5a33b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -242,8 +242,8 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
assert_object_held(obj);
- sgt = dma_buf_map_attachment(obj->base.import_attach, - DMA_BIDIRECTIONAL); + sgt = dma_buf_sgt_map_attachment(obj->base.import_attach, + DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) return PTR_ERR(sgt);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index 2fda549dd82d2b..fcfa819caa389f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -293,7 +293,8 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, goto out_import; }
- st = dma_buf_map_attachment_unlocked(import_attach, DMA_BIDIRECTIONAL); + st = dma_buf_sgt_map_attachment_unlocked(import_attach, + DMA_BIDIRECTIONAL); if (IS_ERR(st)) { err = PTR_ERR(st); goto out_detach; diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 23beaeefab67d7..569ee2d3ab6f84 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -121,7 +121,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); + sgt = dma_buf_sgt_map_attachment_unlocked(attach, DMA_TO_DEVICE); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto fail_detach; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 244c01819d56b5..4866d639bbb026 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -86,7 +86,8 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_ goto free; }
- map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction); + map->sgt = dma_buf_sgt_map_attachment_unlocked(map->attach, + direction); if (IS_ERR(map->sgt)) { dma_buf_detach(buf, map->attach); err = PTR_ERR(map->sgt); @@ -477,7 +478,8 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm, goto free; }
- bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); + bo->sgt = dma_buf_sgt_map_attachment_unlocked(attach, + DMA_TO_DEVICE); if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto detach; diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index d7e1f741f941a3..3dbc1b41052068 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -163,7 +163,7 @@ int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents, if (ret <= 0) return ret < 0 ? ret : -ETIMEDOUT;
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + sgt = dma_buf_sgt_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) return PTR_ERR(sgt);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 71acd45aa33b00..e5e716c5f33fa8 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -764,7 +764,7 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo, ttm_bo->sg = NULL; }
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + sg = dma_buf_sgt_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) return PTR_ERR(sg);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 7daac53c502e50..7556c3c7675c2c 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -1701,7 +1701,7 @@ static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib, priv->dir = buffer->direction == IIO_BUFFER_DIRECTION_IN ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- priv->sgt = dma_buf_map_attachment(attach, priv->dir); + priv->sgt = dma_buf_sgt_map_attachment(attach, priv->dir); if (IS_ERR(priv->sgt)) { err = PTR_ERR(priv->sgt); dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", err); diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index 0ec2e4120cc94b..aac9f9d12f0f8f 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -29,8 +29,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) if (umem_dmabuf->sgt) goto wait_fence;
- sgt = dma_buf_map_attachment(umem_dmabuf->attach, - DMA_BIDIRECTIONAL); + sgt = dma_buf_sgt_map_attachment(umem_dmabuf->attach, + DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) return PTR_ERR(sgt);
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 2df566f409b65e..4fe30a21e1e687 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -1470,7 +1470,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) vb->planes[plane].mem_priv = mem_priv;
/* - * This pins the buffer(s) with dma_buf_map_attachment()). It's done + * This pins the buffer(s) with dma_buf_sgt_map_attachment()). It's done * here instead just before the DMA, while queueing the buffer(s) so * userspace knows sooner rather than later if the dma-buf map fails. */ diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index 7a3bc31699bb90..de3eb4121aadb0 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -706,7 +706,7 @@ static int vb2_dc_map_dmabuf(void *mem_priv) }
/* get the associated scatterlist for this buffer */ - sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir); + sgt = dma_buf_sgt_map_attachment_unlocked(buf->db_attach, buf->dma_dir); if (IS_ERR(sgt)) { pr_err("Error getting dmabuf scatterlist\n"); return -EINVAL; diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 03a836dce44f90..ed968d7e326449 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -568,7 +568,7 @@ static int vb2_dma_sg_map_dmabuf(void *mem_priv) }
/* get the associated scatterlist for this buffer */ - sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir); + sgt = dma_buf_sgt_map_attachment_unlocked(buf->db_attach, buf->dma_dir); if (IS_ERR(sgt)) { pr_err("Error getting dmabuf scatterlist\n"); return -EINVAL; diff --git a/drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c b/drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c index b34244ea14dd06..595b759de4f939 100644 --- a/drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c +++ b/drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c @@ -102,7 +102,7 @@ int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde, goto err_unlock; }
- sgt = dma_buf_map_attachment_unlocked(attachment, dma_dir); + sgt = dma_buf_sgt_map_attachment_unlocked(attachment, dma_dir); if (IS_ERR(sgt)) { dev_err(dev, "Failed to get dmabufs sg_table\n"); err = PTR_ERR(sgt); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 2ea57170e56b3e..52abf3290a580f 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -779,7 +779,8 @@ static int fastrpc_map_attach(struct fastrpc_user *fl, int fd, goto attach_err; }
- table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL); + table = dma_buf_sgt_map_attachment_unlocked(map->attach, + DMA_BIDIRECTIONAL); if (IS_ERR(table)) { err = PTR_ERR(table); goto map_err; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 5c81ea9afa1249..d5d4bfc390ebc6 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1520,7 +1520,7 @@ static int ffs_dmabuf_attach(struct file *file, int fd) if (err) goto err_free_priv;
- sg_table = dma_buf_map_attachment(attach, dir); + sg_table = dma_buf_sgt_map_attachment(attach, dir); dma_resv_unlock(dmabuf->resv);
if (IS_ERR(sg_table)) { diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index 91a31a22ba98aa..78125cc1aee322 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -590,7 +590,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
gntdev_dmabuf->u.imp.attach = attach;
- sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); + sgt = dma_buf_sgt_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = ERR_CAST(sgt); goto fail_detach; diff --git a/include/linux/dma-buf-mapping.h b/include/linux/dma-buf-mapping.h index f81e215401b49d..daddf30d0eceae 100644 --- a/include/linux/dma-buf-mapping.h +++ b/include/linux/dma-buf-mapping.h @@ -101,7 +101,7 @@ int dma_buf_match_mapping(struct dma_buf_match_args *args, * * When this type is matched the map/unmap functions are: * - * dma_buf_map_attachment() + * dma_buf_sgt_map_attachment() * dma_buf_unmap_attachment() * * The struct sg_table returned by those functions has only the DMA portions @@ -117,7 +117,7 @@ struct dma_buf_mapping_sgt_exp_ops { /** * @map_dma_buf: * - * This is called by dma_buf_map_attachment() and is used to map a + * This is called by dma_buf_sgt_map_attachment() and is used to map a * shared &dma_buf into device address space, and it is mandatory. It * can only be called if @attach has been called successfully. * diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 5feab8b8b5d517..1ed50ec261022e 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -475,7 +475,7 @@ struct dma_buf_attach_ops { * * An attachment is created by calling dma_buf_attach(), and released again by * calling dma_buf_detach(). The DMA mapping itself needed to initiate a - * transfer is created by dma_buf_map_attachment() and freed again by calling + * transfer is created by dma_buf_sgt_map_attachment() and freed again by calling * dma_buf_unmap_attachment(). */ struct dma_buf_attachment { @@ -580,8 +580,8 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); void dma_buf_put(struct dma_buf *dmabuf);
-struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, - enum dma_data_direction); +struct sg_table *dma_buf_sgt_map_attachment(struct dma_buf_attachment *, + enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); void dma_buf_move_notify(struct dma_buf *dma_buf); @@ -590,8 +590,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, int dma_buf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); struct sg_table * -dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, - enum dma_data_direction direction); +dma_buf_sgt_map_attachment_unlocked(struct dma_buf_attachment *attach, + enum dma_data_direction direction); void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, struct sg_table *sg_table, enum dma_data_direction direction); diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index b99cf2c6670aa8..3b8c9752208bdf 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -137,7 +137,8 @@ static int io_import_dmabuf(struct io_zcrx_ifq *ifq, goto err; }
- mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE); + mem->sgt = dma_buf_sgt_map_attachment_unlocked(mem->attach, + DMA_FROM_DEVICE); if (IS_ERR(mem->sgt)) { ret = PTR_ERR(mem->sgt); mem->sgt = NULL; diff --git a/net/core/devmem.c b/net/core/devmem.c index ec4217d6c0b4fd..ccdf3f70a4de9b 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -223,8 +223,8 @@ net_devmem_bind_dmabuf(struct net_device *dev, goto err_free_binding; }
- binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, - direction); + binding->sgt = dma_buf_sgt_map_attachment_unlocked(binding->attachment, + direction); if (IS_ERR(binding->sgt)) { err = PTR_ERR(binding->sgt); NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");