From: David Stevens stevensd@chromium.org
commit ee9d4097cc145dcaebedf6b113d17c91c21333a0 upstream.
Calling the iommu_dma_sync_*_for_cpu functions during unmap can cause two copies out of the swiotlb buffer. Do the arch sync directly in __iommu_dma_unmap_swiotlb instead to avoid this. This makes the call to iommu_dma_sync_sg_for_cpu for untrusted devices in iommu_dma_unmap_sg no longer necessary, so move that invocation later in the function.
Signed-off-by: David Stevens stevensd@chromium.org Reviewed-by: Christoph Hellwig hch@lst.de Reviewed-by: Robin Murphy robin.murphy@arm.com Link: https://lore.kernel.org/r/20210929023300.335969-4-stevensd@google.com Signed-off-by: Joerg Roedel jroedel@suse.de Cc: Mario Limonciello Mario.Limonciello@amd.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- drivers/iommu/dma-iommu.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-)
--- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -521,6 +521,9 @@ static void __iommu_dma_unmap_swiotlb(st if (WARN_ON(!phys)) return;
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, size, dir); + __iommu_dma_unmap(dev, dma_addr, size);
if (unlikely(is_swiotlb_buffer(dev, phys))) @@ -871,8 +874,6 @@ static dma_addr_t iommu_dma_map_page(str static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs); }
@@ -1089,14 +1090,14 @@ static void iommu_dma_unmap_sg(struct de struct scatterlist *tmp; int i;
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); - if (dev_is_untrusted(dev)) { iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); return; }
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); + /* * The scatterlist segments are mapped into a single * contiguous IOVA allocation, so this is incredibly easy.