This reverts commit 191cf872190de28a92e1bd2b56d8860e37e07443.
That commit should never have been backported since it relies on a change in locking semantics that was introduced in v4.8 and not backported. Because of this, the backported commit to sch_fq leads to lockups because of the double locking.
Signed-off-by: Toke Høiland-Jørgensen toke@redhat.com --- net/sched/sch_fq.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index f4aa2ab4713a..eb814ffc0902 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -830,24 +830,20 @@ nla_put_failure: static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct fq_sched_data *q = qdisc_priv(sch); - struct tc_fq_qd_stats st; - - sch_tree_lock(sch); - - st.gc_flows = q->stat_gc_flows; - st.highprio_packets = q->stat_internal_packets; - st.tcp_retrans = q->stat_tcp_retrans; - st.throttled = q->stat_throttled; - st.flows_plimit = q->stat_flows_plimit; - st.pkts_too_long = q->stat_pkts_too_long; - st.allocation_errors = q->stat_allocation_errors; - st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns(); - st.flows = q->flows; - st.inactive_flows = q->inactive_flows; - st.throttled_flows = q->throttled_flows; - st.pad = 0; - - sch_tree_unlock(sch); + u64 now = ktime_get_ns(); + struct tc_fq_qd_stats st = { + .gc_flows = q->stat_gc_flows, + .highprio_packets = q->stat_internal_packets, + .tcp_retrans = q->stat_tcp_retrans, + .throttled = q->stat_throttled, + .flows_plimit = q->stat_flows_plimit, + .pkts_too_long = q->stat_pkts_too_long, + .allocation_errors = q->stat_allocation_errors, + .flows = q->flows, + .inactive_flows = q->inactive_flows, + .throttled_flows = q->throttled_flows, + .time_next_delayed_flow = q->time_next_delayed_flow - now, + };
return gnet_stats_copy_app(d, &st, sizeof(st)); }
On Tue, Jun 23, 2020 at 05:00:53PM +0200, Toke Høiland-Jørgensen wrote:
This reverts commit 191cf872190de28a92e1bd2b56d8860e37e07443.
That commit should never have been backported since it relies on a change in locking semantics that was introduced in v4.8 and not backported. Because of this, the backported commit to sch_fq leads to lockups because of the double locking.
Signed-off-by: Toke Høiland-Jørgensen toke@redhat.com
net/sched/sch_fq.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index f4aa2ab4713a..eb814ffc0902 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -830,24 +830,20 @@ nla_put_failure: static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct fq_sched_data *q = qdisc_priv(sch);
- struct tc_fq_qd_stats st;
- sch_tree_lock(sch);
- st.gc_flows = q->stat_gc_flows;
- st.highprio_packets = q->stat_internal_packets;
- st.tcp_retrans = q->stat_tcp_retrans;
- st.throttled = q->stat_throttled;
- st.flows_plimit = q->stat_flows_plimit;
- st.pkts_too_long = q->stat_pkts_too_long;
- st.allocation_errors = q->stat_allocation_errors;
- st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
- st.flows = q->flows;
- st.inactive_flows = q->inactive_flows;
- st.throttled_flows = q->throttled_flows;
- st.pad = 0;
- sch_tree_unlock(sch);
- u64 now = ktime_get_ns();
- struct tc_fq_qd_stats st = {
.gc_flows = q->stat_gc_flows,
.highprio_packets = q->stat_internal_packets,
.tcp_retrans = q->stat_tcp_retrans,
.throttled = q->stat_throttled,
.flows_plimit = q->stat_flows_plimit,
.pkts_too_long = q->stat_pkts_too_long,
.allocation_errors = q->stat_allocation_errors,
.flows = q->flows,
.inactive_flows = q->inactive_flows,
.throttled_flows = q->throttled_flows,
.time_next_delayed_flow = q->time_next_delayed_flow - now,
- };
return gnet_stats_copy_app(d, &st, sizeof(st)); } -- 2.27.0
Thanks, now applied.
greg k-h
linux-stable-mirror@lists.linaro.org