The patch below does not apply to the 4.18-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From b2b599fb54f90ae395ddc51f0d49e4f28244a8f8 Mon Sep 17 00:00:00 2001
From: Hang Yuan hang.yuan@linux.intel.com Date: Wed, 29 Aug 2018 17:15:56 +0800 Subject: [PATCH] drm/i915/gvt: move intel_runtime_pm_get out of spin_lock in stop_schedule
pm_runtime_get_sync in intel_runtime_pm_get might sleep if i915 device is not active. When stop vgpu schedule, the device may be inactive. So need to move runtime_pm_get out of spin_lock/unlock.
Fixes: b24881e0b0b6("drm/i915/gvt: Add runtime_pm_get/put into gvt_switch_mmio Cc: stable@vger.kernel.org Signed-off-by: Hang Yuan hang.yuan@linux.intel.com Signed-off-by: Xiong Zhang xiong.y.zhang@intel.com Signed-off-by: Zhenyu Wang zhenyuw@linux.intel.com
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 42e1e6bdcc2c..e872f4847fbe 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, * performace for batch mmio read/write, so we need * handle forcewake mannually. */ - intel_runtime_pm_get(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); switch_mmio(pre, next, ring_id); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); }
/** diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 09d7bb72b4ff..985fe81794dd 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -426,6 +426,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) &vgpu->gvt->scheduler; int ring_id; struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!vgpu_data->active) return; @@ -444,6 +445,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; }
+ intel_runtime_pm_get(dev_priv); spin_lock_bh(&scheduler->mmio_context_lock); for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { if (scheduler->engine_owner[ring_id] == vgpu) { @@ -452,5 +454,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); + intel_runtime_pm_put(dev_priv); mutex_unlock(&vgpu->gvt->sched_lock); }