On Tue, May 05, 2026 at 04:05:09PM +0200, Ketil Johnsen wrote:
Handle the sync to device of FW memory sections inside panthor_fw_init_section_mem() so that the callers do not have to.
This small improvement is also critical for protected FW sections, so we avoid issuing memory transactions to protected memory from CPU running in normal mode.
Signed-off-by: Ketil Johnsen ketil.johnsen@arm.com
Reviewed-by: Liviu Dudau liviu.dudau@arm.com
Best regards, Liviu
drivers/gpu/drm/panthor/panthor_fw.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index be0da5b1f3abf..0d07a133dc3af 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -446,6 +446,7 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev, struct panthor_fw_section *section) { bool was_mapped = !!section->mem->kmap;
- struct sg_table *sgt; int ret;
if (!section->data.size && @@ -464,6 +465,11 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev, if (!was_mapped) panthor_kernel_bo_vunmap(section->mem);
- /* An sgt should have been requested when the kernel BO was GPU-mapped. */
- sgt = to_panthor_bo(section->mem->obj)->dmap.sgt;
- if (!drm_WARN_ON_ONCE(&ptdev->base, !sgt))
dma_sync_sgtable_for_device(ptdev->base.dev, sgt, DMA_TO_DEVICE);} /** @@ -626,7 +632,6 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, section_size = hdr.va.end - hdr.va.start; if (section_size) { u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK;
u32 vm_map_flags = 0; u64 va = hdr.va.start;struct panthor_gem_object *bo;@@ -663,14 +668,6 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, } panthor_fw_init_section_mem(ptdev, section);
bo = to_panthor_bo(section->mem->obj);/* An sgt should have been requested when the kernel BO was GPU-mapped. */if (drm_WARN_ON_ONCE(&ptdev->base, !bo->dmap.sgt))return -EINVAL; }dma_sync_sgtable_for_device(ptdev->base.dev, bo->dmap.sgt, DMA_TO_DEVICE);if (hdr.va.start == CSF_MCU_SHARED_REGION_START) @@ -724,17 +721,10 @@ panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload) struct panthor_fw_section *section; list_for_each_entry(section, &ptdev->fw->sections, node) {
struct sg_table *sgt;- if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_WR)) continue;
panthor_fw_init_section_mem(ptdev, section);
/* An sgt should have been requested when the kernel BO was GPU-mapped. */sgt = to_panthor_bo(section->mem->obj)->dmap.sgt;if (!drm_WARN_ON_ONCE(&ptdev->base, !sgt)) }dma_sync_sgtable_for_device(ptdev->base.dev, sgt, DMA_TO_DEVICE);} -- 2.43.0
linaro-mm-sig@lists.linaro.org