when creating a vGPU workload, the guest context head pointer should be updated correctly by comparing with the exsiting workload in the guest worklod queue including the current running context.
in some situation, there is a running context A and then received 2 new vGPU workload context B and A. in the new workload context A, it's head pointer should be updated with the running context A's tail.
Fixes: 09975b861aa0 ("drm/i915/execlists: Disable preemption under GVT") Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy")
Cc: stable@vger.kernel.org Signed-off-by: Xiaolin Zhang xiaolin.zhang@intel.com --- drivers/gpu/drm/i915/gvt/scheduler.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 8940fa8..89057c6 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1438,9 +1438,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload) #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca))
-#define get_last_workload(q) \ - (list_empty(q) ? NULL : container_of(q->prev, \ - struct intel_vgpu_workload, list)) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU @@ -1460,7 +1457,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, { struct intel_vgpu_submission *s = &vgpu->submission; struct list_head *q = workload_q_head(vgpu, ring_id); - struct intel_vgpu_workload *last_workload = get_last_workload(q); + struct list_head *pos; + struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *workload = NULL; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u64 ring_context_gpa; @@ -1486,15 +1484,22 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, head &= RB_HEAD_OFF_MASK; tail &= RB_TAIL_OFF_MASK;
- if (last_workload && same_context(&last_workload->ctx_desc, desc)) { - gvt_dbg_el("ring id %d cur workload == last\n", ring_id); - gvt_dbg_el("ctx head %x real head %lx\n", head, - last_workload->rb_tail); - /* - * cannot use guest context head pointer here, - * as it might not be updated at this time - */ - head = last_workload->rb_tail; + list_for_each(pos, q) { + last_workload = container_of(pos, struct intel_vgpu_workload, + list); + if (!last_workload) + continue; + if (same_context(&last_workload->ctx_desc, desc)) { + gvt_dbg_el("ring id %d cur workload == last\n", + ring_id); + gvt_dbg_el("ctx head %x real head %lx\n", head, + last_workload->rb_tail); + /* + * cannot use guest context head pointer here, + * as it might not be updated at this time + */ + head = last_workload->rb_tail; + } }
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
On 2019.08.27 13:38:58 +0800, Xiaolin Zhang wrote:
when creating a vGPU workload, the guest context head pointer should be updated correctly by comparing with the exsiting workload in the guest worklod queue including the current running context.
in some situation, there is a running context A and then received 2 new vGPU workload context B and A. in the new workload context A, it's head pointer should be updated with the running context A's tail.
Fixes: 09975b861aa0 ("drm/i915/execlists: Disable preemption under GVT") Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy")
Cc: stable@vger.kernel.org Signed-off-by: Xiaolin Zhang xiaolin.zhang@intel.com
drivers/gpu/drm/i915/gvt/scheduler.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 8940fa8..89057c6 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1438,9 +1438,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload) #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) -#define get_last_workload(q) \
- (list_empty(q) ? NULL : container_of(q->prev, \
- struct intel_vgpu_workload, list))
/**
- intel_vgpu_create_workload - create a vGPU workload
- @vgpu: a vGPU
@@ -1460,7 +1457,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, { struct intel_vgpu_submission *s = &vgpu->submission; struct list_head *q = workload_q_head(vgpu, ring_id);
- struct intel_vgpu_workload *last_workload = get_last_workload(q);
- struct list_head *pos;
- struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *workload = NULL; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u64 ring_context_gpa;
@@ -1486,15 +1484,22 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, head &= RB_HEAD_OFF_MASK; tail &= RB_TAIL_OFF_MASK;
- if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
gvt_dbg_el("ctx head %x real head %lx\n", head,
last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
*/
head = last_workload->rb_tail;
- list_for_each(pos, q) {
last_workload = container_of(pos, struct intel_vgpu_workload,
list);
if (!last_workload)
continue;
Just use list_for_each_entry() and to look up last uncompleted workload should try to search backward, hopefully we can just fix it once.
if (same_context(&last_workload->ctx_desc, desc)) {
gvt_dbg_el("ring id %d cur workload == last\n",
ring_id);
gvt_dbg_el("ctx head %x real head %lx\n", head,
last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
*/
head = last_workload->rb_tail;
}}
gvt_dbg_el("ring id %d begin a new workload\n", ring_id); -- 2.7.4
intel-gvt-dev mailing list intel-gvt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
linux-stable-mirror@lists.linaro.org