From: Leon Romanovsky leonro@nvidia.com
The .invalidate_mapping() callback is documented as optional, yet it effectively became mandatory whenever importer_ops were provided. This led to cases where RDMA non-ODP code had to supply an empty stub just to provide allow_peer2peer.
Document this behavior by creating a dedicated export for the dma_buf_unsupported_invalidate_mappings() function. This function is intended solely for the RDMA non-ODP case and must not be used by any other dma-buf importer.
This makes it possible to rely on a valid .invalidate_mappings() callback to determine whether an importer supports revocation.
Signed-off-by: Leon Romanovsky leonro@nvidia.com --- drivers/dma-buf/dma-buf.c | 14 ++++++++++++++ drivers/infiniband/core/umem_dmabuf.c | 11 +---------- include/linux/dma-buf.h | 4 +++- 3 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index cd3b60ce4863..c4fa35034b92 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1238,6 +1238,20 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, } EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
+/* + * This function shouldn't be used by anyone except RDMA non-ODP case. + * The reason to it is UAPI mistake where dma-buf was exported to the + * userspace without knowing that .invalidate_mappings() can be called + * for pinned memory too. + * + * This warning shouldn't be seen in real production scenario. + */ +void dma_buf_unsupported_invalidate_mappings(struct dma_buf_attachment *attach) +{ + pr_warn("Invalidate callback should not be called when memory is pinned\n"); +} +EXPORT_SYMBOL_FOR_MODULES(dma_buf_unsupported_invalidate_mappings, "ib_uverbs"); + /** * dma_buf_move_notify - notify attachments that DMA-buf is moving * diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index d77a739cfe7a..81442a887b48 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -184,18 +184,9 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, } EXPORT_SYMBOL(ib_umem_dmabuf_get);
-static void -ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach) -{ - struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; - - ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev, - "Invalidate callback should not be called when memory is pinned\n"); -} - static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = { .allow_peer2peer = true, - .invalidate_mappings = ib_umem_dmabuf_unsupported_move_notify, + .invalidate_mappings = dma_buf_unsupported_invalidate_mappings, };
struct ib_umem_dmabuf * diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 1b397635c793..7d7d0a4fb762 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -458,7 +458,7 @@ struct dma_buf_attach_ops { bool allow_peer2peer;
/** - * @invalidate_mappings: [optional] notification that the DMA-buf is moving + * @invalidate_mappings: notification that the DMA-buf is moving * * If this callback is provided the framework can avoid pinning the * backing store while mappings exists. @@ -601,6 +601,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); void dma_buf_move_notify(struct dma_buf *dma_buf); +void dma_buf_unsupported_invalidate_mappings(struct dma_buf_attachment *attach); + int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); int dma_buf_end_cpu_access(struct dma_buf *dma_buf,