It simplifies the code in a few places, allowing direct returns instead of gotos. It also helps identifying the sections under the dev_access guard when scoped_cond_guard() is used.
Signed-off-by: Boris Brezillon boris.brezillon@collabora.com --- drivers/gpu/drm/panthor/panthor_device.c | 127 ++++++++++++++----------------- drivers/gpu/drm/panthor/panthor_drv.c | 58 ++++++-------- drivers/gpu/drm/panthor/panthor_mmu.c | 29 +++---- drivers/gpu/drm/panthor/panthor_sched.c | 10 +-- 4 files changed, 95 insertions(+), 129 deletions(-)
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index cb9cd8d0448b..988a9a34f753 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -123,7 +123,7 @@ static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data) static void panthor_device_reset_work(struct work_struct *work) { struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work); - int ret = 0, cookie; + int ret = 0;
/* If the device is entering suspend, we don't reset. A slow reset will * be forced at resume time instead. @@ -131,19 +131,17 @@ static void panthor_device_reset_work(struct work_struct *work) if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) return;
- if (!drm_dev_enter(&ptdev->base, &cookie)) - return; - - panthor_sched_pre_reset(ptdev); - panthor_fw_pre_reset(ptdev, true); - panthor_mmu_pre_reset(ptdev); - panthor_hw_soft_reset(ptdev); - panthor_hw_l2_power_on(ptdev); - panthor_mmu_post_reset(ptdev); - ret = panthor_fw_post_reset(ptdev); - atomic_set(&ptdev->reset.pending, 0); - panthor_sched_post_reset(ptdev, ret != 0); - drm_dev_exit(cookie); + scoped_cond_guard(drm_dev_access, return, &ptdev->base) { + panthor_sched_pre_reset(ptdev); + panthor_fw_pre_reset(ptdev, true); + panthor_mmu_pre_reset(ptdev); + panthor_hw_soft_reset(ptdev); + panthor_hw_l2_power_on(ptdev); + panthor_mmu_post_reset(ptdev); + ret = panthor_fw_post_reset(ptdev); + atomic_set(&ptdev->reset.pending, 0); + panthor_sched_post_reset(ptdev, ret != 0); + }
if (ret) { panthor_device_unplug(ptdev); @@ -394,38 +392,31 @@ static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf) u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT; unsigned long pfn; pgprot_t pgprot; - vm_fault_t ret; bool active; - int cookie;
- if (!drm_dev_enter(&ptdev->base, &cookie)) + ACQUIRE(drm_dev_access, dev_guard)(&ptdev->base); + if (ACQUIRE_ERR(drm_dev_access, &dev_guard)) return VM_FAULT_SIGBUS;
- scoped_guard(mutex, &ptdev->pm.mmio_lock) { - active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE; + guard(mutex)(&ptdev->pm.mmio_lock); + active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
- switch (offset) { - case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET: - if (active) - pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID); - else - pfn = page_to_pfn(ptdev->pm.dummy_latest_flush); + switch (offset) { + case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET: + if (active) + pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID); + else + pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
- pgprot = vma->vm_page_prot; - if (active) - pgprot = pgprot_noncached(pgprot); + pgprot = vma->vm_page_prot; + if (active) + pgprot = pgprot_noncached(pgprot);
- ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot); - break; + return vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
- default: - ret = VM_FAULT_SIGBUS; - break; - } + default: + return VM_FAULT_SIGBUS; } - - drm_dev_exit(cookie); - return ret; }
static const struct vm_operations_struct panthor_mmio_vm_ops = { @@ -482,7 +473,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev) int panthor_device_resume(struct device *dev) { struct panthor_device *ptdev = dev_get_drvdata(dev); - int ret, cookie; + int ret;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED) return -EINVAL; @@ -503,28 +494,27 @@ int panthor_device_resume(struct device *dev)
panthor_devfreq_resume(ptdev);
- if (panthor_device_is_initialized(ptdev) && - drm_dev_enter(&ptdev->base, &cookie)) { - /* If there was a reset pending at the time we suspended the - * device, we force a slow reset. - */ - if (atomic_read(&ptdev->reset.pending)) { - ptdev->reset.fast = false; - atomic_set(&ptdev->reset.pending, 0); - } + if (panthor_device_is_initialized(ptdev)) { + scoped_cond_guard(drm_dev_access, ret = 0, &ptdev->base) { + /* If there was a reset pending at the time we suspended the + * device, we force a slow reset. + */ + if (atomic_read(&ptdev->reset.pending)) { + ptdev->reset.fast = false; + atomic_set(&ptdev->reset.pending, 0); + }
- ret = panthor_device_resume_hw_components(ptdev); - if (ret && ptdev->reset.fast) { - drm_err(&ptdev->base, "Fast reset failed, trying a slow reset"); - ptdev->reset.fast = false; ret = panthor_device_resume_hw_components(ptdev); + if (ret && ptdev->reset.fast) { + drm_err(&ptdev->base, "Fast reset failed, trying a slow reset"); + ptdev->reset.fast = false; + ret = panthor_device_resume_hw_components(ptdev); + } + + if (!ret) + panthor_sched_resume(ptdev); }
- if (!ret) - panthor_sched_resume(ptdev); - - drm_dev_exit(cookie); - if (ret) goto err_suspend_devfreq; } @@ -559,7 +549,6 @@ int panthor_device_resume(struct device *dev) int panthor_device_suspend(struct device *dev) { struct panthor_device *ptdev = dev_get_drvdata(dev); - int cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) return -EINVAL; @@ -577,19 +566,19 @@ int panthor_device_suspend(struct device *dev) DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1); }
- if (panthor_device_is_initialized(ptdev) && - drm_dev_enter(&ptdev->base, &cookie)) { - cancel_work_sync(&ptdev->reset.work); + if (panthor_device_is_initialized(ptdev)) { + scoped_guard(drm_dev_access, &ptdev->base) { + cancel_work_sync(&ptdev->reset.work);
- /* We prepare everything as if we were resetting the GPU. - * The end of the reset will happen in the resume path though. - */ - panthor_sched_suspend(ptdev); - panthor_fw_suspend(ptdev); - panthor_mmu_suspend(ptdev); - panthor_gpu_suspend(ptdev); - panthor_pwr_suspend(ptdev); - drm_dev_exit(cookie); + /* We prepare everything as if we were resetting the GPU. + * The end of the reset will happen in the resume path though. + */ + panthor_sched_suspend(ptdev); + panthor_fw_suspend(ptdev); + panthor_mmu_suspend(ptdev); + panthor_gpu_suspend(ptdev); + panthor_pwr_suspend(ptdev); + } }
panthor_devfreq_suspend(ptdev); diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index e8dc4096c1d2..789ddc0ff7ef 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -997,9 +997,10 @@ static int panthor_ioctl_vm_create(struct drm_device *ddev, void *data, struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); struct panthor_file *pfile = file->driver_priv; struct drm_panthor_vm_create *args = data; - int cookie, ret; + int ret;
- if (!drm_dev_enter(ddev, &cookie)) + ACQUIRE(drm_dev_access, dev_guard)(ddev); + if (ACQUIRE_ERR(drm_dev_access, &dev_guard)) return -ENODEV;
ret = panthor_vm_pool_create_vm(ptdev, pfile->vms, args); @@ -1008,7 +1009,6 @@ static int panthor_ioctl_vm_create(struct drm_device *ddev, void *data, ret = 0; }
- drm_dev_exit(cookie); return ret; }
@@ -1033,38 +1033,30 @@ static int panthor_ioctl_bo_create(struct drm_device *ddev, void *data, struct panthor_file *pfile = file->driver_priv; struct drm_panthor_bo_create *args = data; struct panthor_vm *vm = NULL; - int cookie, ret; + int ret;
- if (!drm_dev_enter(ddev, &cookie)) + ACQUIRE(drm_dev_access, dev_guard)(ddev); + if (ACQUIRE_ERR(drm_dev_access, &dev_guard)) return -ENODEV;
if (!args->size || args->pad || - (args->flags & ~PANTHOR_BO_FLAGS)) { - ret = -EINVAL; - goto out_dev_exit; - } + (args->flags & ~PANTHOR_BO_FLAGS)) + return -EINVAL;
if ((args->flags & DRM_PANTHOR_BO_NO_MMAP) && - (args->flags & DRM_PANTHOR_BO_WB_MMAP)) { - ret = -EINVAL; - goto out_dev_exit; - } + (args->flags & DRM_PANTHOR_BO_WB_MMAP)) + return -EINVAL;
if (args->exclusive_vm_id) { vm = panthor_vm_pool_get_vm(pfile->vms, args->exclusive_vm_id); - if (!vm) { - ret = -EINVAL; - goto out_dev_exit; - } + if (!vm) + return -EINVAL; }
ret = panthor_gem_create_with_handle(file, ddev, vm, &args->size, args->flags, &args->handle);
panthor_vm_put(vm); - -out_dev_exit: - drm_dev_exit(cookie); return ret; }
@@ -1107,17 +1099,18 @@ static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data, struct drm_panthor_group_submit *args = data; struct drm_panthor_queue_submit *jobs_args; struct panthor_submit_ctx ctx; - int ret = 0, cookie; + int ret = 0;
if (args->pad) return -EINVAL;
- if (!drm_dev_enter(ddev, &cookie)) + ACQUIRE(drm_dev_access, dev_guard)(ddev); + if (ACQUIRE_ERR(drm_dev_access, &dev_guard)) return -ENODEV;
ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->queue_submits); if (ret) - goto out_dev_exit; + return ret;
ret = panthor_submit_ctx_init(&ctx, file, args->queue_submits.count); if (ret) @@ -1201,8 +1194,6 @@ static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data, out_free_jobs_args: kvfree(jobs_args);
-out_dev_exit: - drm_dev_exit(cookie); return ret; }
@@ -1438,18 +1429,15 @@ static int panthor_ioctl_vm_bind(struct drm_device *ddev, void *data, struct drm_file *file) { struct drm_panthor_vm_bind *args = data; - int cookie, ret;
- if (!drm_dev_enter(ddev, &cookie)) + ACQUIRE(drm_dev_access, dev_guard)(ddev); + if (ACQUIRE_ERR(drm_dev_access, &dev_guard)) return -ENODEV;
if (args->flags & DRM_PANTHOR_VM_BIND_ASYNC) - ret = panthor_ioctl_vm_bind_async(ddev, args, file); - else - ret = panthor_ioctl_vm_bind_sync(ddev, args, file); + return panthor_ioctl_vm_bind_async(ddev, args, file);
- drm_dev_exit(cookie); - return ret; + return panthor_ioctl_vm_bind_sync(ddev, args, file); }
static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data, @@ -1671,9 +1659,10 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) struct panthor_device *ptdev = pfile->ptdev; u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT; u64 user_mmio_offset; - int ret, cookie; + int ret;
- if (!drm_dev_enter(file->minor->dev, &cookie)) + ACQUIRE(drm_dev_access, dev_guard)(file->minor->dev); + if (ACQUIRE_ERR(drm_dev_access, &dev_guard)) return -ENODEV;
/* Adjust the user MMIO offset to match the offset used kernel side. @@ -1691,7 +1680,6 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) ret = drm_gem_mmap(filp, vma); }
- drm_dev_exit(cookie); return ret; }
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index ab9a77e6a145..ff3beb9147e8 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -788,13 +788,13 @@ static int panthor_vm_active_locked(struct panthor_vm *vm) int panthor_vm_active(struct panthor_vm *vm) { struct panthor_device *ptdev = vm->ptdev; - int ret = 0, cookie;
- if (!drm_dev_enter(&ptdev->base, &cookie)) + ACQUIRE(drm_dev_access, dev_guard)(&ptdev->base); + if (ACQUIRE_ERR(drm_dev_access, &dev_guard)) return -ENODEV;
if (refcount_inc_not_zero(&vm->as.active_cnt)) - goto out_dev_exit; + return 0;
/* As soon as active is called, we place the VM at the end of the VM LRU. * If something fails after that, the only downside is that this VM that @@ -809,14 +809,10 @@ int panthor_vm_active(struct panthor_vm *vm) /* Make sure we don't race with lock/unlock_region() calls * happening around VM bind operations. */ - scoped_guard(mutex, &vm->op_lock) { - guard(mutex)(&ptdev->mmu->as.slots_lock); - ret = panthor_vm_active_locked(vm); - } + guard(mutex)(&vm->op_lock); + guard(mutex)(&ptdev->mmu->as.slots_lock);
-out_dev_exit: - drm_dev_exit(cookie); - return ret; + return panthor_vm_active_locked(vm); }
/** @@ -902,16 +898,15 @@ static size_t get_pgsize(u64 addr, size_t size, size_t *count) static void panthor_vm_declare_unusable(struct panthor_vm *vm) { struct panthor_device *ptdev = vm->ptdev; - int cookie;
if (vm->unusable) return;
vm->unusable = true; guard(mutex)(&ptdev->mmu->as.slots_lock); - if (vm->as.id >= 0 && drm_dev_enter(&ptdev->base, &cookie)) { - panthor_mmu_as_disable(ptdev, vm->as.id, false); - drm_dev_exit(cookie); + if (vm->as.id >= 0) { + scoped_guard(drm_dev_access, &ptdev->base) + panthor_mmu_as_disable(ptdev, vm->as.id, false); } }
@@ -1983,12 +1978,8 @@ static void panthor_vm_free(struct drm_gpuvm *gpuvm) scoped_guard(mutex, &vm->op_lock) { guard(mutex)(&ptdev->mmu->as.slots_lock); if (vm->as.id >= 0) { - int cookie; - - if (drm_dev_enter(&ptdev->base, &cookie)) { + scoped_guard(drm_dev_access, &ptdev->base) panthor_mmu_as_disable(ptdev, vm->as.id, false); - drm_dev_exit(cookie); - }
ptdev->mmu->as.slots[vm->as.id].vm = NULL; clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index d8cadd393fbe..9aa9941d2309 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -2624,21 +2624,19 @@ static void tick_work(struct work_struct *work) struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, tick_work.work); struct panthor_device *ptdev = sched->ptdev; - int ret, cookie; + int ret;
- if (!drm_dev_enter(&ptdev->base, &cookie)) + ACQUIRE(drm_dev_access, dev_guard)(&ptdev->base); + if (ACQUIRE_ERR(drm_dev_access, &dev_guard)) return;
ret = panthor_device_resume_and_get(ptdev); if (drm_WARN_ON(&ptdev->base, ret)) - goto out_dev_exit; + return;
tick(sched); pm_runtime_mark_last_busy(ptdev->base.dev); pm_runtime_put_autosuspend(ptdev->base.dev); - -out_dev_exit: - drm_dev_exit(cookie); }
static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)