Hi
Am 18.08.25 um 14:40 schrieb Christian König: [...]
+static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) +{
- struct drm_gem_object *obj = dma_buf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int ret;
- /*
* Pin to keep buffer in place while it's vmap'ed. The actual
* location is not important as long as it's mapable.
Yeah, exactly that won't work here. Most of the locations are not CPU accessible.
You could use AMDGPU_GEM_DOMAIN_GTT, that should most likely work in all cases but isn't necessarily the most optimal solution.
No problem about that, but why not a bit more flexibility? When udl copies from the buffer, it is likely pinned to VRAM.
A bit mask of _CPU, _GTT, and _VRAM should work fine. The other domains are probably irrelevant for our use case.
Best regards Thomas
Regards, Christian.
*
* This code is required for exporting to GEM-SHMEM without S/G table.
* Once GEM-SHMEM supports dynamic imports, it should be dropped.
*/
- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_MASK);
- if (ret)
return ret;
- ret = drm_gem_dmabuf_vmap(dma_buf, map);
- if (ret)
amdgpu_bo_unpin(bo);
- return ret;
+}
+static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) +{
- struct drm_gem_object *obj = dma_buf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- drm_gem_dmabuf_vunmap(dma_buf, map);
- amdgpu_bo_unpin(bo);
+}
- const struct dma_buf_ops amdgpu_dmabuf_ops = { .attach = amdgpu_dma_buf_attach, .pin = amdgpu_dma_buf_pin,
@@ -294,8 +326,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = { .release = drm_gem_dmabuf_release, .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, .mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
- .vmap = amdgpu_dma_buf_vmap,
- .vunmap = amdgpu_dma_buf_vunmap, };
/**