From: Laurent Pinchart <laurent.pinchart+renesas(a)ideasonboard.com>
commit 641307df71fe77d7b38a477067495ede05d47295 upstream.
When stopping the CRTC the driver must disable all planes and wait for
the change to take effect at the next vblank. Merely calling
drm_crtc_wait_one_vblank() is not enough, as the function doesn't
include any mechanism to handle the race with vblank interrupts.
Replace the drm_crtc_wait_one_vblank() call with a manual mechanism that
handles the vblank interrupt race.
Cc: stable(a)vger.kernel.org
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas(a)ideasonboard.com>
Reviewed-by: Kieran Bingham <kieran.bingham+renesas(a)ideasonboard.com>
Signed-off-by: thongsyho <thong.ho.px(a)rvc.renesas.com>
Signed-off-by: Nhan Nguyen <nhan.nguyen.yb(a)renesas.com>
---
drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 53 ++++++++++++++++++++++++++++++----
drivers/gpu/drm/rcar-du/rcar_du_crtc.h | 8 +++++
2 files changed, 55 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 848f7f2..3322b15 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -392,6 +392,31 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
rcrtc->started = true;
}
+static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
+{
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct drm_crtc *crtc = &rcrtc->crtc;
+ u32 status;
+ /* Make sure vblank interrupts are enabled. */
+ drm_crtc_vblank_get(crtc);
+ /*
+ * Disable planes and calculate how many vertical blanking interrupts we
+ * have to wait for. If a vertical blanking interrupt has been triggered
+ * but not processed yet, we don't know whether it occurred before or
+ * after the planes got disabled. We thus have to wait for two vblank
+ * interrupts in that case.
+ */
+ spin_lock_irq(&rcrtc->vblank_lock);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
+ status = rcar_du_crtc_read(rcrtc, DSSR);
+ rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
+ spin_unlock_irq(&rcrtc->vblank_lock);
+ if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
+ msecs_to_jiffies(100)))
+ dev_warn(rcdu->dev, "vertical blanking timeout\n");
+ drm_crtc_vblank_put(crtc);
+}
+
static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
@@ -400,17 +425,16 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
return;
/* Disable all planes and wait for the change to take effect. This is
- * required as the DSnPR registers are updated on vblank, and no vblank
- * will occur once the CRTC is stopped. Disabling planes when starting
- * the CRTC thus wouldn't be enough as it would start scanning out
- * immediately from old frame buffers until the next vblank.
+ * required as the plane enable registers are updated on vblank, and no
+ * vblank will occur once the CRTC is stopped. Disabling planes when
+ * starting the CRTC thus wouldn't be enough as it would start scanning
+ * out immediately from old frame buffers until the next vblank.
*
* This increases the CRTC stop delay, especially when multiple CRTCs
* are stopped in one operation as we now wait for one vblank per CRTC.
* Whether this can be improved needs to be researched.
*/
- rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
- drm_crtc_wait_one_vblank(crtc);
+ rcar_du_crtc_disable_planes(rcrtc);
/* Disable vertical blanking interrupt reporting. We first need to wait
* for page flip completion before stopping the CRTC as userspace
@@ -548,10 +572,25 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
u32 status;
+ spin_lock(&rcrtc->vblank_lock);
+
status = rcar_du_crtc_read(rcrtc, DSSR);
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
if (status & DSSR_VBK) {
+ /*
+ * Wake up the vblank wait if the counter reaches 0. This must
+ * be protected by the vblank_lock to avoid races in
+ * rcar_du_crtc_disable_planes().
+ */
+ if (rcrtc->vblank_count) {
+ if (--rcrtc->vblank_count == 0)
+ wake_up(&rcrtc->vblank_wait);
+ }
+ }
+ spin_unlock(&rcrtc->vblank_lock);
+
+ if (status & DSSR_VBK) {
drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
@@ -606,6 +645,8 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
}
init_waitqueue_head(&rcrtc->flip_wait);
+ init_waitqueue_head(&rcrtc->vblank_wait);
+ spin_lock_init(&rcrtc->vblank_lock);
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 6f08b7e..48bef05 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,6 +15,7 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
#include <drm/drmP.h>
@@ -33,6 +34,9 @@
* @started: whether the CRTC has been started and is running
* @event: event to post when the pending page flip completes
* @flip_wait: wait queue used to signal page flip completion
+ * @vblank_lock: protects vblank_wait and vblank_count
+ * @vblank_wait: wait queue used to signal vertical blanking
+ * @vblank_count: number of vertical blanking interrupts to wait for
* @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
* @group: CRTC group this CRTC belongs to
*/
@@ -48,6 +52,10 @@ struct rcar_du_crtc {
struct drm_pending_vblank_event *event;
wait_queue_head_t flip_wait;
+ spinlock_t vblank_lock;
+ wait_queue_head_t vblank_wait;
+ unsigned int vblank_count;
+
unsigned int outputs;
struct rcar_du_group *group;
--
1.9.1
From: Kuninori Morimoto <kuninori.morimoto.gx(a)renesas.com>
commit 1f8754d4daea5f257370a52a30fcb22798c54516 upstream.
If SSI uses shared pin, some SSI will be used as parent SSI.
Then, normal SSI's remove and Parent SSI's remove
(these are same SSI) will be called when unbind or remove timing.
In this case, free_irq() will be called twice.
This patch solve this issue.
Cc: stable(a)vger.kernel.org
Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx(a)renesas.com>
Tested-by: Hiroyuki Yokoyama <hiroyuki.yokoyama.vx(a)renesas.com>
Reported-by: Hiroyuki Yokoyama <hiroyuki.yokoyama.vx(a)renesas.com>
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Signed-off-by: thongsyho <thong.ho.px(a)rvc.renesas.com>
Signed-off-by: Nhan Nguyen <nhan.nguyen.yb(a)renesas.com>
---
sound/soc/sh/rcar/ssi.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 6cb6db0..9472d99 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -694,9 +694,14 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
struct device *dev = rsnd_priv_to_dev(priv);
int irq = ssi->irq;
+ /* Do nothing for SSI parent mod */
+ if (ssi_parent_mod == mod)
+ return 0;
+
/* PIO will request IRQ again */
devm_free_irq(dev, irq, mod);
--
1.9.1
I see this was just applied to Linus's tree. This too probably should be
tagged for stable as well.
-- Steve
On Tue, 6 Feb 2018 03:54:42 -0800
"tip-bot for Steven Rostedt (VMware)" <tipbot(a)zytor.com> wrote:
> Commit-ID: 364f56653708ba8bcdefd4f0da2a42904baa8eeb
> Gitweb: https://git.kernel.org/tip/364f56653708ba8bcdefd4f0da2a42904baa8eeb
> Author: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
> AuthorDate: Tue, 23 Jan 2018 20:45:38 -0500
> Committer: Ingo Molnar <mingo(a)kernel.org>
> CommitDate: Tue, 6 Feb 2018 10:20:33 +0100
>
> sched/rt: Up the root domain ref count when passing it around via IPIs
>
> When issuing an IPI RT push, where an IPI is sent to each CPU that has more
> than one RT task scheduled on it, it references the root domain's rto_mask,
> that contains all the CPUs within the root domain that has more than one RT
> task in the runable state. The problem is, after the IPIs are initiated, the
> rq->lock is released. This means that the root domain that is associated to
> the run queue could be freed while the IPIs are going around.
>
> Add a sched_get_rd() and a sched_put_rd() that will increment and decrement
> the root domain's ref count respectively. This way when initiating the IPIs,
> the scheduler will up the root domain's ref count before releasing the
> rq->lock, ensuring that the root domain does not go away until the IPI round
> is complete.
>
> Reported-by: Pavan Kondeti <pkondeti(a)codeaurora.org>
> Signed-off-by: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
> Signed-off-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
> Cc: Andrew Morton <akpm(a)linux-foundation.org>
> Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
> Cc: Mike Galbraith <efault(a)gmx.de>
> Cc: Peter Zijlstra <peterz(a)infradead.org>
> Cc: Thomas Gleixner <tglx(a)linutronix.de>
> Fixes: 4bdced5c9a292 ("sched/rt: Simplify the IPI based RT balancing logic")
> Link: http://lkml.kernel.org/r/CAEU1=PkiHO35Dzna8EQqNSKW1fr1y1zRQ5y66X117MG06sQtN…
> Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
> ---
> kernel/sched/rt.c | 9 +++++++--
> kernel/sched/sched.h | 2 ++
> kernel/sched/topology.c | 13 +++++++++++++
> 3 files changed, 22 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 2fb627d..89a086e 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -1990,8 +1990,11 @@ static void tell_cpu_to_push(struct rq *rq)
>
> rto_start_unlock(&rq->rd->rto_loop_start);
>
> - if (cpu >= 0)
> + if (cpu >= 0) {
> + /* Make sure the rd does not get freed while pushing */
> + sched_get_rd(rq->rd);
> irq_work_queue_on(&rq->rd->rto_push_work, cpu);
> + }
> }
>
> /* Called from hardirq context */
> @@ -2021,8 +2024,10 @@ void rto_push_irq_work_func(struct irq_work *work)
>
> raw_spin_unlock(&rd->rto_lock);
>
> - if (cpu < 0)
> + if (cpu < 0) {
> + sched_put_rd(rd);
> return;
> + }
>
> /* Try the next RT overloaded CPU */
> irq_work_queue_on(&rd->rto_push_work, cpu);
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 2e95505..fb5fc45 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -691,6 +691,8 @@ extern struct mutex sched_domains_mutex;
> extern void init_defrootdomain(void);
> extern int sched_init_domains(const struct cpumask *cpu_map);
> extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
> +extern void sched_get_rd(struct root_domain *rd);
> +extern void sched_put_rd(struct root_domain *rd);
>
> #ifdef HAVE_RT_PUSH_IPI
> extern void rto_push_irq_work_func(struct irq_work *work);
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 034cbed..519b024 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -259,6 +259,19 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
> call_rcu_sched(&old_rd->rcu, free_rootdomain);
> }
>
> +void sched_get_rd(struct root_domain *rd)
> +{
> + atomic_inc(&rd->refcount);
> +}
> +
> +void sched_put_rd(struct root_domain *rd)
> +{
> + if (!atomic_dec_and_test(&rd->refcount))
> + return;
> +
> + call_rcu_sched(&rd->rcu, free_rootdomain);
> +}
> +
> static int init_rootdomain(struct root_domain *rd)
> {
> if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
I see this was just applied to Linus's tree. It probably should be
tagged for stable as well.
-- Steve
On Tue, 6 Feb 2018 03:54:16 -0800
"tip-bot for Steven Rostedt (VMware)" <tipbot(a)zytor.com> wrote:
> Commit-ID: ad0f1d9d65938aec72a698116cd73a980916895e
> Gitweb: https://git.kernel.org/tip/ad0f1d9d65938aec72a698116cd73a980916895e
> Author: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
> AuthorDate: Tue, 23 Jan 2018 20:45:37 -0500
> Committer: Ingo Molnar <mingo(a)kernel.org>
> CommitDate: Tue, 6 Feb 2018 10:20:33 +0100
>
> sched/rt: Use container_of() to get root domain in rto_push_irq_work_func()
>
> When the rto_push_irq_work_func() is called, it looks at the RT overloaded
> bitmask in the root domain via the runqueue (rq->rd). The problem is that
> during CPU up and down, nothing here stops rq->rd from changing between
> taking the rq->rd->rto_lock and releasing it. That means the lock that is
> released is not the same lock that was taken.
>
> Instead of using this_rq()->rd to get the root domain, as the irq work is
> part of the root domain, we can simply get the root domain from the irq work
> that is passed to the routine:
>
> container_of(work, struct root_domain, rto_push_work)
>
> This keeps the root domain consistent.
>
> Reported-by: Pavan Kondeti <pkondeti(a)codeaurora.org>
> Signed-off-by: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
> Signed-off-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
> Cc: Andrew Morton <akpm(a)linux-foundation.org>
> Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
> Cc: Mike Galbraith <efault(a)gmx.de>
> Cc: Peter Zijlstra <peterz(a)infradead.org>
> Cc: Thomas Gleixner <tglx(a)linutronix.de>
> Fixes: 4bdced5c9a292 ("sched/rt: Simplify the IPI based RT balancing logic")
> Link: http://lkml.kernel.org/r/CAEU1=PkiHO35Dzna8EQqNSKW1fr1y1zRQ5y66X117MG06sQtN…
> Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
> ---
> kernel/sched/rt.c | 15 ++++++++-------
> 1 file changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 862a513..2fb627d 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -1907,9 +1907,8 @@ static void push_rt_tasks(struct rq *rq)
> * the rt_loop_next will cause the iterator to perform another scan.
> *
> */
> -static int rto_next_cpu(struct rq *rq)
> +static int rto_next_cpu(struct root_domain *rd)
> {
> - struct root_domain *rd = rq->rd;
> int next;
> int cpu;
>
> @@ -1985,7 +1984,7 @@ static void tell_cpu_to_push(struct rq *rq)
> * Otherwise it is finishing up and an ipi needs to be sent.
> */
> if (rq->rd->rto_cpu < 0)
> - cpu = rto_next_cpu(rq);
> + cpu = rto_next_cpu(rq->rd);
>
> raw_spin_unlock(&rq->rd->rto_lock);
>
> @@ -1998,6 +1997,8 @@ static void tell_cpu_to_push(struct rq *rq)
> /* Called from hardirq context */
> void rto_push_irq_work_func(struct irq_work *work)
> {
> + struct root_domain *rd =
> + container_of(work, struct root_domain, rto_push_work);
> struct rq *rq;
> int cpu;
>
> @@ -2013,18 +2014,18 @@ void rto_push_irq_work_func(struct irq_work *work)
> raw_spin_unlock(&rq->lock);
> }
>
> - raw_spin_lock(&rq->rd->rto_lock);
> + raw_spin_lock(&rd->rto_lock);
>
> /* Pass the IPI to the next rt overloaded queue */
> - cpu = rto_next_cpu(rq);
> + cpu = rto_next_cpu(rd);
>
> - raw_spin_unlock(&rq->rd->rto_lock);
> + raw_spin_unlock(&rd->rto_lock);
>
> if (cpu < 0)
> return;
>
> /* Try the next RT overloaded CPU */
> - irq_work_queue_on(&rq->rd->rto_push_work, cpu);
> + irq_work_queue_on(&rd->rto_push_work, cpu);
> }
> #endif /* HAVE_RT_PUSH_IPI */
>