Now that all exporters are converted these compatibility things can be removed:
dma_buf_ops: map_dma_buf/unmap_dma_buf Moved to dma_buf_mapping_sgt_exp_ops
dma_buf_attachment: dev Moved to attach->map_type.importing_dma_device
dma_buf_attachment: peer2peer Moved to attach->map_type.exporter_requires_p2p accessed via dma_buf_sgt_p2p_allowed()
dma_buf_sgt_exp_compat_match: No compatibility exporters anymore
Signed-off-by: Jason Gunthorpe jgg@nvidia.com --- drivers/dma-buf/dma-buf-mapping.c | 40 ----------------- drivers/dma-buf/dma-buf.c | 24 +++------- drivers/gpu/drm/drm_prime.c | 2 - include/linux/dma-buf-mapping.h | 67 +++++++++++++++++++++++++++- include/linux/dma-buf.h | 73 ------------------------------- 5 files changed, 70 insertions(+), 136 deletions(-)
diff --git a/drivers/dma-buf/dma-buf-mapping.c b/drivers/dma-buf/dma-buf-mapping.c index b5f320be0f24bf..baa96b37e2c6bd 100644 --- a/drivers/dma-buf/dma-buf-mapping.c +++ b/drivers/dma-buf/dma-buf-mapping.c @@ -334,16 +334,6 @@ dma_buf_sgt_finish_match(struct dma_buf_match_args *args, .exporter_requires_p2p = exp->sgt_data.exporter_requires_p2p, }, }; - - /* - * Setup the SGT type variables stored in attach because importers and - * exporters that do not natively use mappings expect them to be there. - * When converting to use mappings users should use the match versions - * of these instead. - */ - attach->dev = imp->sgt_data.importing_dma_device; - attach->peer2peer = attach->map_type.sgt_data.importer_accepts_p2p == - DMA_SGT_IMPORTER_ACCEPTS_P2P; }
static void dma_buf_sgt_debugfs_dump(struct seq_file *s, @@ -359,33 +349,3 @@ struct dma_buf_mapping_type dma_buf_mapping_sgt_type = { .debugfs_dump = dma_buf_sgt_debugfs_dump, }; EXPORT_SYMBOL_NS_GPL(dma_buf_mapping_sgt_type, "DMA_BUF"); - -static struct sg_table * -dma_buf_sgt_compat_map_dma_buf(struct dma_buf_attachment *attach, - enum dma_data_direction dir) -{ - return attach->dmabuf->ops->map_dma_buf(attach, dir); -} - -static void dma_buf_sgt_compat_unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - attach->dmabuf->ops->unmap_dma_buf(attach, sgt, dir); -} - -/* Route the classic map/unmap ops through the exp ops for old importers */ -static const struct dma_buf_mapping_sgt_exp_ops dma_buf_sgt_compat_exp_ops = { - .map_dma_buf = dma_buf_sgt_compat_map_dma_buf, - .unmap_dma_buf = dma_buf_sgt_compat_unmap_dma_buf, -}; - -/* - * This mapping type is used for unaware exporters that do not support - * match_mapping(). It wraps the dma_buf ops for SGT mappings into a mapping - * type so aware importers can transparently work with unaware exporters. This - * does not require p2p because old exporters will check it through the - * attach->peer2peer mechanism. - */ -const struct dma_buf_mapping_match dma_buf_sgt_exp_compat_match = - DMA_BUF_EMAPPING_SGT(&dma_buf_sgt_compat_exp_ops); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index ac755f358dc7b3..e773441abab65d 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -693,19 +693,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) || !exp_info->ops->release)) return ERR_PTR(-EINVAL);
- if (exp_info->ops->match_mapping || - exp_info->ops->single_exporter_match) { - if (WARN_ON(exp_info->ops->map_dma_buf || - exp_info->ops->unmap_dma_buf)) - return ERR_PTR(-EINVAL); - if (WARN_ON(exp_info->ops->match_mapping && - exp_info->ops->single_exporter_match)) - return ERR_PTR(-EINVAL); - } else { - if (WARN_ON(!exp_info->ops->map_dma_buf || - !exp_info->ops->unmap_dma_buf)) - return ERR_PTR(-EINVAL); - } + if (WARN_ON(!exp_info->ops->match_mapping && + !exp_info->ops->single_exporter_match)) + return ERR_PTR(-EINVAL);
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) return ERR_PTR(-EINVAL); @@ -981,12 +971,8 @@ struct dma_buf_attachment *dma_buf_mapping_attach( if (ret) goto err_attach; } else { - const struct dma_buf_mapping_match *exp_match = - dmabuf->ops->single_exporter_match; - - if (!exp_match) - exp_match = &dma_buf_sgt_exp_compat_match; - ret = dma_buf_match_mapping(&match_args, exp_match, 1); + ret = dma_buf_match_mapping( + &match_args, dmabuf->ops->single_exporter_match, 1); if (ret) goto err_attach; } diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 94ec2483e40107..0852c60a722b67 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -593,8 +593,6 @@ static bool is_gem_map_dma_buf(struct dma_buf_attachment *attach) const struct dma_buf_mapping_sgt_exp_ops *sgt_exp_ops = dma_buf_get_sgt_ops(attach);
- if (attach->dmabuf->ops->map_dma_buf == drm_gem_map_dma_buf) - return true; if (sgt_exp_ops && sgt_exp_ops->map_dma_buf == drm_gem_map_dma_buf) return true; return false; diff --git a/include/linux/dma-buf-mapping.h b/include/linux/dma-buf-mapping.h index c11e32ef2a684f..f81e215401b49d 100644 --- a/include/linux/dma-buf-mapping.h +++ b/include/linux/dma-buf-mapping.h @@ -113,8 +113,73 @@ extern struct dma_buf_mapping_type dma_buf_mapping_sgt_type;
struct dma_buf_mapping_sgt_exp_ops { struct dma_buf_mapping_exp_ops ops; + + /** + * @map_dma_buf: + * + * This is called by dma_buf_map_attachment() and is used to map a + * shared &dma_buf into device address space, and it is mandatory. It + * can only be called if @attach has been called successfully. + * + * This call may sleep, e.g. when the backing storage first needs to be + * allocated, or moved to a location suitable for all currently attached + * devices. + * + * Note that any specific buffer attributes required for this function + * should get added to device_dma_parameters accessible via + * &device.dma_params from the &dma_buf_attachment. The @attach callback + * should also check these constraints. + * + * If this is being called for the first time, the exporter can now + * choose to scan through the list of attachments for this buffer, + * collate the requirements of the attached devices, and choose an + * appropriate backing storage for the buffer. + * + * Based on enum dma_data_direction, it might be possible to have + * multiple users accessing at the same time (for reading, maybe), or + * any other kind of sharing that the exporter might wish to make + * available to buffer-users. + * + * This is always called with the dmabuf->resv object locked when + * the dynamic_mapping flag is true. + * + * Note that for non-dynamic exporters the driver must guarantee that + * that the memory is available for use and cleared of any old data by + * the time this function returns. Drivers which pipeline their buffer + * moves internally must wait for all moves and clears to complete. + * Dynamic exporters do not need to follow this rule: For non-dynamic + * importers the buffer is already pinned through @pin, which has the + * same requirements. Dynamic importers otoh are required to obey the + * dma_resv fences. + * + * Returns: + * + * A &sg_table scatter list of the backing storage of the DMA buffer, + * already mapped into the device address space of the &device attached + * with the provided &dma_buf_attachment. The addresses and lengths in + * the scatter list are PAGE_SIZE aligned. + * + * On failure, returns a negative error value wrapped into a pointer. + * May also return -EINTR when a signal was received while being + * blocked. + * + * Note that exporters should not try to cache the scatter list, or + * return the same one for multiple calls. Caching is done either by the + * DMA-BUF code (for non-dynamic importers) or the importer. Ownership + * of the scatter list is transferred to the caller, and returned by + * @unmap_dma_buf. + */ struct sg_table *(*map_dma_buf)(struct dma_buf_attachment *attach, enum dma_data_direction dir); + + /** + * @unmap_dma_buf: + * + * This is called by dma_buf_unmap_attachment() and should unmap and + * release the &sg_table allocated in @map_dma_buf, and it is mandatory. + * For static dma_buf handling this might also unpin the backing + * storage if this is the last mapping of the DMA buffer. + */ void (*unmap_dma_buf)(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir); @@ -189,8 +254,6 @@ DMA_BUF_EMAPPING_SGT_P2P(const struct dma_buf_mapping_sgt_exp_ops *exp_ops, return match; }
-extern const struct dma_buf_mapping_match dma_buf_sgt_exp_compat_match; - /* * dma_buf_ops initializer helper for simple drivers that use a single * SGT map/unmap operation without P2P. diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index a8cfbbafbe31fe..5feab8b8b5d517 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -145,75 +145,6 @@ struct dma_buf_ops { */ void (*unpin)(struct dma_buf_attachment *attach);
- /** - * @map_dma_buf: - * - * This is called by dma_buf_map_attachment() and is used to map a - * shared &dma_buf into device address space, and it is mandatory. It - * can only be called if @attach has been called successfully. - * - * This call may sleep, e.g. when the backing storage first needs to be - * allocated, or moved to a location suitable for all currently attached - * devices. - * - * Note that any specific buffer attributes required for this function - * should get added to device_dma_parameters accessible via - * &device.dma_params from the &dma_buf_attachment. The @attach callback - * should also check these constraints. - * - * If this is being called for the first time, the exporter can now - * choose to scan through the list of attachments for this buffer, - * collate the requirements of the attached devices, and choose an - * appropriate backing storage for the buffer. - * - * Based on enum dma_data_direction, it might be possible to have - * multiple users accessing at the same time (for reading, maybe), or - * any other kind of sharing that the exporter might wish to make - * available to buffer-users. - * - * This is always called with the dmabuf->resv object locked when - * the dynamic_mapping flag is true. - * - * Note that for non-dynamic exporters the driver must guarantee that - * that the memory is available for use and cleared of any old data by - * the time this function returns. Drivers which pipeline their buffer - * moves internally must wait for all moves and clears to complete. - * Dynamic exporters do not need to follow this rule: For non-dynamic - * importers the buffer is already pinned through @pin, which has the - * same requirements. Dynamic importers otoh are required to obey the - * dma_resv fences. - * - * Returns: - * - * A &sg_table scatter list of the backing storage of the DMA buffer, - * already mapped into the device address space of the &device attached - * with the provided &dma_buf_attachment. The addresses and lengths in - * the scatter list are PAGE_SIZE aligned. - * - * On failure, returns a negative error value wrapped into a pointer. - * May also return -EINTR when a signal was received while being - * blocked. - * - * Note that exporters should not try to cache the scatter list, or - * return the same one for multiple calls. Caching is done either by the - * DMA-BUF code (for non-dynamic importers) or the importer. Ownership - * of the scatter list is transferred to the caller, and returned by - * @unmap_dma_buf. - */ - struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, - enum dma_data_direction); - /** - * @unmap_dma_buf: - * - * This is called by dma_buf_unmap_attachment() and should unmap and - * release the &sg_table allocated in @map_dma_buf, and it is mandatory. - * For static dma_buf handling this might also unpin the backing - * storage if this is the last mapping of the DMA buffer. - */ - void (*unmap_dma_buf)(struct dma_buf_attachment *, - struct sg_table *, - enum dma_data_direction); - /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY * if the call would block. */ @@ -530,9 +461,7 @@ struct dma_buf_attach_ops { /** * struct dma_buf_attachment - holds device-buffer attachment data * @dmabuf: buffer for this attachment. - * @dev: device attached to the buffer. * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. - * @peer2peer: true if the importer can handle peer resources without pages. * @priv: exporter specific attachment data. * @importer_ops: importer operations for this attachment, if provided * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. @@ -551,9 +480,7 @@ struct dma_buf_attach_ops { */ struct dma_buf_attachment { struct dma_buf *dmabuf; - struct device *dev; struct list_head node; - bool peer2peer; const struct dma_buf_attach_ops *importer_ops; void *importer_priv; void *priv;