The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function returns the number of the created entries in the DMA address space. However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and dma_unmap_sg must be called with the original number of the entries passed to the dma_map_sg().
struct sg_table is a common structure used for describing a non-contiguous memory buffer, used commonly in the DRM and graphics subsystems. It consists of a scatterlist with memory pages and DMA addresses (sgl entry), as well as the number of scatterlist entries: CPU pages (orig_nents entry) and DMA mapped pages (nents entry).
It turned out that it was a common mistake to misuse nents and orig_nents entries, calling DMA-mapping functions with a wrong number of entries or ignoring the number of mapped entries returned by the dma_map_sg() function.
To avoid such issues, lets use a common dma-mapping wrappers operating directly on the struct sg_table objects and use scatterlist page iterators where possible. This, almost always, hides references to the nents and orig_nents entries, making the code robust, easier to follow and copy/paste safe.
Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com --- drivers/gpu/host1x/job.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 89b6c14b7392..82d0a60ba3f7 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -170,11 +170,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; }
- err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); - if (!err) { - err = -ENOMEM; + err = dma_map_sgtable(dev, sgt, dir, 0); + if (err) goto unpin; - }
job->unpins[job->num_unpins].dev = dev; job->unpins[job->num_unpins].dir = dir; @@ -228,7 +226,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) }
if (host->domain) { - for_each_sg(sgt->sgl, sg, sgt->nents, j) + for_each_sgtable_sg(sgt, sg, j) gather_size += sg->length; gather_size = iova_align(&host->iova, gather_size);
@@ -240,9 +238,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto put; }
- err = iommu_map_sg(host->domain, + err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc), - sgt->sgl, sgt->nents, IOMMU_READ); + sgt, IOMMU_READ); if (err == 0) { __free_iova(&host->iova, alloc); err = -EINVAL; @@ -252,12 +250,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) job->unpins[job->num_unpins].size = gather_size; phys_addr = iova_dma_addr(&host->iova, alloc); } else if (sgt) { - err = dma_map_sg(host->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - if (!err) { - err = -ENOMEM; + err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0); + if (err) goto put; - }
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; job->unpins[job->num_unpins].dev = host->dev; @@ -660,8 +655,7 @@ void host1x_job_unpin(struct host1x_job *job) }
if (unpin->dev && sgt) - dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents, - unpin->dir); + dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
host1x_bo_unpin(dev, unpin->bo, sgt); host1x_bo_put(unpin->bo);