These helpers only work when matched to a SGT mapping type, call dma_buf_sgt_dma_device() to get the DMA device.
Signed-off-by: Jason Gunthorpe jgg@nvidia.com --- drivers/dma-buf/dma-buf-mapping.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/drivers/dma-buf/dma-buf-mapping.c b/drivers/dma-buf/dma-buf-mapping.c index 02f5cf8b3def40..b5f320be0f24bf 100644 --- a/drivers/dma-buf/dma-buf-mapping.c +++ b/drivers/dma-buf/dma-buf-mapping.c @@ -97,6 +97,7 @@ struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach, size_t nr_ranges, size_t size, enum dma_data_direction dir) { + struct device *dma_dev = dma_buf_sgt_dma_device(attach); unsigned int nents, mapped_len = 0; struct dma_buf_dma *dma; struct scatterlist *sgl; @@ -114,7 +115,7 @@ struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach, if (!dma) return ERR_PTR(-ENOMEM);
- switch (pci_p2pdma_map_type(provider, attach->dev)) { + switch (pci_p2pdma_map_type(provider, dma_dev)) { case PCI_P2PDMA_MAP_BUS_ADDR: /* * There is no need in IOVA at all for this flow. @@ -127,7 +128,7 @@ struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach, goto err_free_dma; }
- dma_iova_try_alloc(attach->dev, dma->state, 0, size); + dma_iova_try_alloc(dma_dev, dma->state, 0, size); break; default: ret = -EINVAL; @@ -146,7 +147,7 @@ struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach, addr = pci_p2pdma_bus_addr_map(provider, phys_vec[i].paddr); } else if (dma_use_iova(dma->state)) { - ret = dma_iova_link(attach->dev, dma->state, + ret = dma_iova_link(dma_dev, dma->state, phys_vec[i].paddr, 0, phys_vec[i].len, dir, DMA_ATTR_MMIO); @@ -155,10 +156,10 @@ struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
mapped_len += phys_vec[i].len; } else { - addr = dma_map_phys(attach->dev, phys_vec[i].paddr, + addr = dma_map_phys(dma_dev, phys_vec[i].paddr, phys_vec[i].len, dir, DMA_ATTR_MMIO); - ret = dma_mapping_error(attach->dev, addr); + ret = dma_mapping_error(dma_dev, addr); if (ret) goto err_unmap_dma; } @@ -169,7 +170,7 @@ struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
if (dma->state && dma_use_iova(dma->state)) { WARN_ON_ONCE(mapped_len != size); - ret = dma_iova_sync(attach->dev, dma->state, 0, mapped_len); + ret = dma_iova_sync(dma_dev, dma->state, 0, mapped_len); if (ret) goto err_unmap_dma;
@@ -196,11 +197,11 @@ struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach, if (!i || !dma->state) { ; /* Do nothing */ } else if (dma_use_iova(dma->state)) { - dma_iova_destroy(attach->dev, dma->state, mapped_len, dir, + dma_iova_destroy(dma_dev, dma->state, mapped_len, dir, DMA_ATTR_MMIO); } else { for_each_sgtable_dma_sg(&dma->sgt, sgl, i) - dma_unmap_phys(attach->dev, sg_dma_address(sgl), + dma_unmap_phys(dma_dev, sg_dma_address(sgl), sg_dma_len(sgl), dir, DMA_ATTR_MMIO); } sg_free_table(&dma->sgt); @@ -225,6 +226,7 @@ void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { struct dma_buf_dma *dma = container_of(sgt, struct dma_buf_dma, sgt); + struct device *dma_dev = dma_buf_sgt_dma_device(attach); int i;
dma_resv_assert_held(attach->dmabuf->resv); @@ -232,13 +234,13 @@ void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt, if (!dma->state) { ; /* Do nothing */ } else if (dma_use_iova(dma->state)) { - dma_iova_destroy(attach->dev, dma->state, dma->size, dir, + dma_iova_destroy(dma_dev, dma->state, dma->size, dir, DMA_ATTR_MMIO); } else { struct scatterlist *sgl;
for_each_sgtable_dma_sg(sgt, sgl, i) - dma_unmap_phys(attach->dev, sg_dma_address(sgl), + dma_unmap_phys(dma_dev, sg_dma_address(sgl), sg_dma_len(sgl), dir, DMA_ATTR_MMIO); }