From: Jan Kara jack@suse.cz
[ Upstream commit e3fce68cdbed297d927e993b3ea7b8b1cee545da ]
Currently dax_iomap_rw() takes care of invalidating page tables and evicting hole pages from the radix tree when write(2) to the file happens. This invalidation is only necessary when there is some block allocation resulting from write(2). Furthermore in current place the invalidation is racy wrt page fault instantiating a hole page just after we have invalidated it.
So perform the page invalidation inside dax_iomap_actor() where we can do it only when really necessary and after blocks have been allocated so nobody will be instantiating new hole pages anymore.
Reviewed-by: Christoph Hellwig hch@lst.de Reviewed-by: Ross Zwisler ross.zwisler@linux.intel.com Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Dan Williams dan.j.williams@intel.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- fs/dax.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c index bf6218da7928..800748f10b3d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1265,6 +1265,17 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data, if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) return -EIO;
+ /* + * Write can allocate block for an area which has a hole page mapped + * into page tables. We have to tear down these mappings so that data + * written by write(2) is visible in mmap. + */ + if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) { + invalidate_inode_pages2_range(inode->i_mapping, + pos >> PAGE_SHIFT, + (end - 1) >> PAGE_SHIFT); + } + while (pos < end) { unsigned offset = pos & (PAGE_SIZE - 1); struct blk_dax_ctl dax = { 0 }; @@ -1329,23 +1340,6 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, if (iov_iter_rw(iter) == WRITE) flags |= IOMAP_WRITE;
- /* - * Yes, even DAX files can have page cache attached to them: A zeroed - * page is inserted into the pagecache when we have to serve a write - * fault on a hole. It should never be dirtied and can simply be - * dropped from the pagecache once we get real data for the page. - * - * XXX: This is racy against mmap, and there's nothing we can do about - * it. We'll eventually need to shift this down even further so that - * we can check if we allocated blocks over a hole first. - */ - if (mapping->nrpages) { - ret = invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, - (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT); - WARN_ON_ONCE(ret); - } - while (iov_iter_count(iter)) { ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, iter, iomap_dax_actor);
From: Slava Shwartsman slavash@mellanox.com
[ Upstream commit 61b6034c6cfdcb265bb453505c3d688e7567727a ]
is_power_of_2 expects unsigned long and we pass u64 max_val_cycles, this will be truncated on 32 bit systems, and the result is not what we were expecting. div_u64 expects u32 as a second argument and we pass max_val_cycles_rounded which is u64 hence it will always be truncated. Fix was tested on both 64 and 32 bit systems and got same results for max_val_cycles and max_val_cycles_rounded.
Fixes: 4850cf458157 ("net/mlx4_en: Resolve dividing by zero in 32-bit system") Signed-off-by: Slava Shwartsman slavash@mellanox.com Signed-off-by: Tariq Toukan tariqt@mellanox.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/mellanox/mlx4/en_clock.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index d4d97ca12e83..f9897d17f01d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -251,13 +251,9 @@ static u32 freq_to_shift(u16 freq) { u32 freq_khz = freq * 1000; u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; - u64 tmp_rounded = - roundup_pow_of_two(max_val_cycles) > max_val_cycles ? - roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX; - u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? - max_val_cycles : tmp_rounded; + u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1); /* calculate max possible multiplier in order to fit in 64bit */ - u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); + u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
/* This comes from the reverse of clocksource_khz2mult */ return ilog2(div_u64(max_mul * freq_khz, 1000000));
From: Guillaume Nault g.nault@alphalink.fr
[ Upstream commit a9b2dff80be979432484afaf7f8d8e73f9e8838a ]
For connected sockets, __l2tp_ip{,6}_bind_lookup() needs to check the remote IP when looking for a matching socket. Otherwise a connected socket can receive traffic not originating from its peer.
Drop l2tp_ip_bind_lookup() and l2tp_ip6_bind_lookup() instead of updating their prototype, as these functions aren't used.
Signed-off-by: Guillaume Nault g.nault@alphalink.fr Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/l2tp/l2tp_ip.c | 19 ++++++------------- net/l2tp/l2tp_ip6.c | 20 ++++++-------------- 2 files changed, 12 insertions(+), 27 deletions(-)
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 3468d5635d0a..9d77a54e8854 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -48,7 +48,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) return (struct l2tp_ip_sock *)sk; }
-static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) +static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr, + __be32 raddr, int dif, u32 tunnel_id) { struct sock *sk;
@@ -62,6 +63,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif if ((l2tp->conn_id == tunnel_id) && net_eq(sock_net(sk), net) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && + (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) && (!sk->sk_bound_dev_if || !dif || sk->sk_bound_dev_if == dif)) goto found; @@ -72,15 +74,6 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif return sk; }
-static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) -{ - struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id); - if (sk) - sock_hold(sk); - - return sk; -} - /* When processing receive frames, there are two cases to * consider. Data frames consist of a non-zero session-id and an * optional cookie. Control frames consist of a regular L2TP header @@ -186,8 +179,8 @@ static int l2tp_ip_recv(struct sk_buff *skb) struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock); - sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb), - tunnel_id); + sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, + inet_iif(skb), tunnel_id); if (!sk) { read_unlock_bh(&l2tp_ip_lock); goto discard; @@ -289,7 +282,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_saddr = 0; /* Use device */
write_lock_bh(&l2tp_ip_lock); - if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, + if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0, sk->sk_bound_dev_if, addr->l2tp_conn_id)) { write_unlock_bh(&l2tp_ip_lock); ret = -EADDRINUSE; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 1d522ce833e6..247097289fd0 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
static struct sock *__l2tp_ip6_bind_lookup(struct net *net, struct in6_addr *laddr, + const struct in6_addr *raddr, int dif, u32 tunnel_id) { struct sock *sk;
sk_for_each_bound(sk, &l2tp_ip6_bind_table) { const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk); + const struct in6_addr *sk_raddr = &sk->sk_v6_daddr; struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
if (l2tp == NULL) @@ -73,6 +75,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net, if ((l2tp->conn_id == tunnel_id) && net_eq(sock_net(sk), net) && (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) && + (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) && (!sk->sk_bound_dev_if || !dif || sk->sk_bound_dev_if == dif)) goto found; @@ -83,17 +86,6 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net, return sk; }
-static inline struct sock *l2tp_ip6_bind_lookup(struct net *net, - struct in6_addr *laddr, - int dif, u32 tunnel_id) -{ - struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id); - if (sk) - sock_hold(sk); - - return sk; -} - /* When processing receive frames, there are two cases to * consider. Data frames consist of a non-zero session-id and an * optional cookie. Control frames consist of a regular L2TP header @@ -200,8 +192,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock); - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb), - tunnel_id); + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, + inet6_iif(skb), tunnel_id); if (!sk) { read_unlock_bh(&l2tp_ip6_lock); goto discard; @@ -339,7 +331,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) rcu_read_unlock();
write_lock_bh(&l2tp_ip6_lock); - if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if, + if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if, addr->l2tp_conn_id)) { write_unlock_bh(&l2tp_ip6_lock); err = -EADDRINUSE;
From: M'boumba Cedric Madianga cedric.madianga@gmail.com
[ Upstream commit 7e96304d99477de1f70db42035071e56439da817 ]
This patch sets the right number of arguments to be used for DMA clients which request channels from DT.
Signed-off-by: M'boumba Cedric Madianga cedric.madianga@gmail.com Reviewed-by: Ludovic BARRE ludovic.barre@st.com Signed-off-by: Vinod Koul vinod.koul@intel.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/dma/stm32-dma.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 307547f4848d..bd758e67843c 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -976,21 +976,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, struct stm32_dma_chan *chan; struct dma_chan *c;
- if (dma_spec->args_count < 3) + if (dma_spec->args_count < 4) return NULL;
cfg.channel_id = dma_spec->args[0]; cfg.request_line = dma_spec->args[1]; cfg.stream_config = dma_spec->args[2]; - cfg.threshold = 0; + cfg.threshold = dma_spec->args[3];
if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) return NULL;
- if (dma_spec->args_count > 3) - cfg.threshold = dma_spec->args[3]; - chan = &dmadev->chan[cfg.channel_id];
c = dma_get_slave_channel(&chan->vchan.chan);
From: Vincent Pelletier plr.vincent@gmail.com
[ Upstream commit 354bc45bf329494ef6051f3229ef50b9e2a7ea2a ]
Reserved1 is documented as expected to be set to 0, but this test fails when it it set to 0. Reverse the condition.
Signed-off-by: Vincent Pelletier plr.vincent@gmail.com Signed-off-by: Felipe Balbi felipe.balbi@linux.intel.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/usb/gadget/function/f_fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 273320fa30ae..4fce83266926 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -2263,7 +2263,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) || d->bFirstInterfaceNumber >= ffs->interfaces_count || - !d->Reserved1) + d->Reserved1) return -EINVAL; for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) if (d->Reserved2[i])
From: Florian Fainelli f.fainelli@gmail.com
[ Upstream commit bb7da333d0a9f3bddc08f84187b7579a3f68fd24 ]
Since we need to pad our packets, utilize skb_put_padto() which increases skb->len by how much we need to pad, allowing us to eliminate the test on skb->len right below.
Signed-off-by: Florian Fainelli f.fainelli@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/broadcom/bcmsysport.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index be7ec5a76a54..c89e89979267 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1039,13 +1039,12 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, * (including FCS and tag) because the length verification is done after * the Broadcom tag is stripped off the ingress packet. */ - if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { + if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { ret = NETDEV_TX_OK; goto out; }
- skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? - ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; + skb_len = skb->len;
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); if (dma_mapping_error(kdev, mapping)) {
From: M'boumba Cedric Madianga cedric.madianga@gmail.com
[ Upstream commit 57b5a32135c813f2ab669039fb4ec16b30cb3305 ]
chan->desc is always set to NULL when a DMA transfer is complete. As a DMA transfer could be complete during the call of stm32_dma_tx_status, we need to be sure that chan->desc is not NULL before using this variable to avoid a null pointer deference issue.
Signed-off-by: M'boumba Cedric Madianga cedric.madianga@gmail.com Reviewed-by: Ludovic BARRE ludovic.barre@st.com Signed-off-by: Vinod Koul vinod.koul@intel.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/dma/stm32-dma.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index bd758e67843c..ae3f60be7759 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -884,7 +884,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, struct virt_dma_desc *vdesc; enum dma_status status; unsigned long flags; - u32 residue; + u32 residue = 0;
status = dma_cookie_status(c, cookie, state); if ((status == DMA_COMPLETE) || (!state)) @@ -892,16 +892,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
spin_lock_irqsave(&chan->vchan.lock, flags); vdesc = vchan_find_desc(&chan->vchan, cookie); - if (cookie == chan->desc->vdesc.tx.cookie) { + if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) residue = stm32_dma_desc_residue(chan, chan->desc, chan->next_sg); - } else if (vdesc) { + else if (vdesc) residue = stm32_dma_desc_residue(chan, to_stm32_dma_desc(vdesc), 0); - } else { - residue = 0; - } - dma_set_residue(state, residue);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
From: Varun Prakash varun@chelsio.com
[ Upstream commit a9a8cdb368d99bb655b5cdabea560446db0527cc ]
ip6_route_output() never returns NULL so check dst->error instead of !dst.
Signed-off-by: Varun Prakash varun@chelsio.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c index 0f0de5b63622..d04a6c163445 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c @@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi, if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) fl6.flowi6_oif = sin6_scope_id; dst = ip6_route_output(&init_net, NULL, &fl6); - if (!dst) - goto out; - if (!cxgb_our_interface(lldi, get_real_dev, - ip6_dst_idev(dst)->dev) && - !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { + if (dst->error || + (!cxgb_our_interface(lldi, get_real_dev, + ip6_dst_idev(dst)->dev) && + !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) { dst_release(dst); - dst = NULL; + return NULL; } }
-out: return dst; } EXPORT_SYMBOL(cxgb_find_route6);
From: Florian Fainelli f.fainelli@gmail.com
[ Upstream commit 38e5a85562a6cd911fc26d951d576551a688574c ]
Inserting the TSB means adding an extra 8 bytes in front the of packet that is going to be used as metadata information by the TDMA engine, but stripped off, so it does not really help with the packet padding.
For some odd packet sizes that fall below the 60 bytes payload (e.g: ARP) we can end-up padding them after the TSB insertion, thus making them 64 bytes, but with the TDMA stripping off the first 8 bytes, they could still be smaller than 64 bytes which is required to ingress the switch.
Fix this by swapping the padding and TSB insertion, guaranteeing that the packets have the right sizes.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver") Signed-off-by: Florian Fainelli f.fainelli@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/broadcom/bcmsysport.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c89e89979267..744ed6ddaf37 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1023,15 +1023,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, goto out; }
- /* Insert TSB and checksum infos */ - if (priv->tsb_en) { - skb = bcm_sysport_insert_tsb(skb, dev); - if (!skb) { - ret = NETDEV_TX_OK; - goto out; - } - } - /* The Ethernet switch we are interfaced with needs packets to be at * least 64 bytes (including FCS) otherwise they will be discarded when * they enter the switch port logic. When Broadcom tags are enabled, we @@ -1044,6 +1035,15 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, goto out; }
+ /* Insert TSB and checksum infos */ + if (priv->tsb_en) { + skb = bcm_sysport_insert_tsb(skb, dev); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + } + skb_len = skb->len;
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
From: Adam Ford aford173@gmail.com
[ Upstream commit a3ac350793d90d1da631c8beeee9352387974ed5 ]
Commit 485fa1261f78 ("ARM: OMAP2+: LogicPD Torpedo + Wireless: Add Bluetooth") set the wrong baud rate for the UART. The Baud rate was 300,000 and it should be 3,000,000 for WL1283.
Signed-off-by: Adam Ford aford173@gmail.com Signed-off-by: Tony Lindgren tony@atomide.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- arch/arm/mach-omap2/pdata-quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 770216baa737..da310bb779b9 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -162,7 +162,7 @@ static struct ti_st_plat_data wilink7_pdata = { .nshutdown_gpio = 162, .dev_name = "/dev/ttyO1", .flow_cntrl = 1, - .baud_rate = 300000, + .baud_rate = 3000000, };
static struct platform_device wl128x_device = {
From: David Forster dforster@brocade.com
[ Upstream commit 93e246f783e6bd1bc64fdfbfe68b18161f69b28e ]
vti6 interface is registered before the rtnl_link_ops block is attached. As a result the resulting RTM_NEWLINK is missing IFLA_INFO_KIND. Re-order attachment of rtnl_link_ops block to fix.
Signed-off-by: Dave Forster dforster@brocade.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/ipv6/ip6_vti.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 816f79d1a8a3..67e882d49195 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev) struct vti6_net *ip6n = net_generic(net, vti6_net_id); int err;
+ dev->rtnl_link_ops = &vti6_link_ops; err = register_netdevice(dev); if (err < 0) goto out;
strcpy(t->parms.name, dev->name); - dev->rtnl_link_ops = &vti6_link_ops;
dev_hold(dev); vti6_tnl_link(ip6n, t);
From: Peter Ujfalusi peter.ujfalusi@ti.com
[ Upstream commit 657279778af54f35e54b07b6687918f254a2992c ]
OMAP1510, OMAP5910 and OMAP310 have only 9 logical channels. OMAP1610, OMAP5912, OMAP1710, OMAP730, and OMAP850 have 16 logical channels available.
The wired 17 for the lch_count must have been used to cover the 16 + 1 dedicated LCD channel, in reality we can only use 9 or 16 channels.
The d->chan_count is not used by the omap-dma stack, so we can skip the setup. chan_count was configured to the number of logical channels and not the actual number of physical channels anyways.
Signed-off-by: Peter Ujfalusi peter.ujfalusi@ti.com Acked-by: Aaro Koskinen aaro.koskinen@iki.fi Signed-off-by: Tony Lindgren tony@atomide.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- arch/arm/mach-omap1/dma.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c index f6ba589cd312..c821c1d5610e 100644 --- a/arch/arm/mach-omap1/dma.c +++ b/arch/arm/mach-omap1/dma.c @@ -32,7 +32,6 @@ #include "soc.h"
#define OMAP1_DMA_BASE (0xfffed800) -#define OMAP1_LOGICAL_DMA_CH_COUNT 17
static u32 enable_1510_mode;
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void) goto exit_iounmap; }
- d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; - /* Valid attributes for omap1 plus processors */ if (cpu_is_omap15xx()) d->dev_caps = ENABLE_1510_MODE; @@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void) d->dev_caps |= CLEAR_CSR_ON_READ; d->dev_caps |= IS_WORD_16;
- if (cpu_is_omap15xx()) - d->chan_count = 9; - else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { - if (!(d->dev_caps & ENABLE_1510_MODE)) - d->chan_count = 16; + /* available logical channels */ + if (cpu_is_omap15xx()) { + d->lch_count = 9; + } else { + if (d->dev_caps & ENABLE_1510_MODE) + d->lch_count = 9; else - d->chan_count = 9; + d->lch_count = 16; }
p = dma_plat_info;
From: Ivan Vecera cera@cera.cz
[ Upstream commit 1d0f110a2c6c4bca3dbcc4b0e27f1e3dc2d44a2c ]
Commit 988d44b "be2net: Avoid redundant addition of mac address in HW" introduced be_dev_mac_add & be_uc_mac_add helpers that incorrectly access adapter->uc_list as an array of bytes instead of an array of be_eth_addr. Consequently NIC is not filled with valid data so unicast filtering is broken.
Cc: Sathya Perla sathya.perla@broadcom.com Cc: Ajit Khaparde ajit.khaparde@broadcom.com Cc: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com Cc: Somnath Kotur somnath.kotur@broadcom.com Fixes: 988d44b be2net: Avoid redundant addition of mac address in HW Signed-off-by: Ivan Vecera cera@cera.cz Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/emulex/benet/be_main.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 5626908f3f7a..c82f22b244c1 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
/* Check if mac has already been added as part of uc-list */ for (i = 0; i < adapter->uc_macs; i++) { - if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN], - mac)) { + if (ether_addr_equal(adapter->uc_list[i].mac, mac)) { /* mac already added, skip addition */ adapter->pmac_id[0] = adapter->pmac_id[i + 1]; return 0; @@ -1679,14 +1678,12 @@ static void be_clear_mc_list(struct be_adapter *adapter)
static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx) { - if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN], - adapter->dev_mac)) { + if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) { adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0]; return 0; }
- return be_cmd_pmac_add(adapter, - (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN], + return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac, adapter->if_handle, &adapter->pmac_id[uc_idx + 1], 0); }
From: Ivan Vecera cera@cera.cz
[ Upstream commit 6052cd1af86f9833b6b0b60d5d4787c4a06d65ea ]
The adapter->pmac_id[0] item is used for primary MAC address but this is not true for adapter->uc_list[0] as is assumed in be_set_uc_list(). There are N UC addresses copied first from net_device to adapter->uc_list[1..N] and then N UC addresses from adapter->uc_list[0..N-1] are sent to HW. So the last UC address is never stored into HW and address 00:00:00:00;00:00 (from uc_list[0]) is used instead.
Cc: Sathya Perla sathya.perla@broadcom.com Cc: Ajit Khaparde ajit.khaparde@broadcom.com Cc: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com Cc: Somnath Kotur somnath.kotur@broadcom.com Fixes: b717241 be2net: replace polling with sleeping in the FW completion path Signed-off-by: Ivan Vecera cera@cera.cz Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/emulex/benet/be_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c82f22b244c1..fbb396b08495 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1719,9 +1719,8 @@ static void be_set_uc_list(struct be_adapter *adapter) }
if (adapter->update_uc_list) { - i = 1; /* First slot is claimed by the Primary MAC */ - /* cache the uc-list in adapter array */ + i = 0; netdev_for_each_uc_addr(ha, netdev) { ether_addr_copy(adapter->uc_list[i].mac, ha->addr); i++;
From: Vlad Tsyrklevich vlad@tsyrklevich.net
[ Upstream commit ce7e40c432ba84da104438f6799d460a4cad41bc ]
ipddp_route structs contain alignment padding so kernel heap memory is leaked when they are copied to user space in ipddp_ioctl(SIOCFINDIPDDPRT). Change kmalloc() to kzalloc() to clear that memory.
Signed-off-by: Vlad Tsyrklevich vlad@tsyrklevich.net Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/appletalk/ipddp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index e90c6a7333d7..2e4649655181 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev) */ static int ipddp_create(struct ipddp_route *new_rt) { - struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL); + struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (rt == NULL) return -ENOMEM;
From: Michal Hocko mhocko@suse.com
[ Upstream commit 2df26639e708a88dcc22171949da638a9998f3bc ]
Jia He has noticed that commit b9f00e147f27 ("mm, page_alloc: reduce branches in zone_statistics") has an unintentional side effect that remote node allocation requests are accounted as NUMA_MISS rathat than NUMA_HIT and NUMA_OTHER if such a request doesn't use __GFP_OTHER_NODE.
There are many of these potentially because the flag is used very rarely while we have many users of __alloc_pages_node.
Fix this by simply ignoring __GFP_OTHER_NODE (it can be removed in a follow up patch) and treat all allocations that were satisfied from the preferred zone's node as NUMA_HITS because this is the same node we requested the allocation from in most cases. If this is not the local node then we just account it as NUMA_OTHER rather than NUMA_LOCAL.
One downsize would be that an allocation request for a node which is outside of the mempolicy nodemask would be reported as a hit which is a bit weird but that was the case before b9f00e147f27 already.
Fixes: b9f00e147f27 ("mm, page_alloc: reduce branches in zone_statistics") Link: http://lkml.kernel.org/r/20170102153057.9451-2-mhocko@kernel.org Signed-off-by: Michal Hocko mhocko@suse.com Reported-by: Jia He hejianet@gmail.com Reviewed-by: Vlastimil Babka vbabka@suse.cz # with cbmc[1] superpowers Acked-by: Mel Gorman mgorman@suse.de Cc: Johannes Weiner hannes@cmpxchg.org Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Taku Izumi izumi.taku@jp.fujitsu.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@verizon.com --- mm/page_alloc.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7064aae8ded7..bed0fa6125f3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2583,30 +2583,23 @@ int __isolate_free_page(struct page *page, unsigned int order) * Update NUMA hit/miss statistics * * Must be called with interrupts disabled. - * - * When __GFP_OTHER_NODE is set assume the node of the preferred - * zone is the local node. This is useful for daemons who allocate - * memory on behalf of other processes. */ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags) { #ifdef CONFIG_NUMA - int local_nid = numa_node_id(); enum zone_stat_item local_stat = NUMA_LOCAL;
- if (unlikely(flags & __GFP_OTHER_NODE)) { + if (z->node != numa_node_id()) local_stat = NUMA_OTHER; - local_nid = preferred_zone->node; - }
- if (z->node == local_nid) { + if (z->node == preferred_zone->node) __inc_zone_state(z, NUMA_HIT); - __inc_zone_state(z, local_stat); - } else { + else { __inc_zone_state(z, NUMA_MISS); __inc_zone_state(preferred_zone, NUMA_FOREIGN); } + __inc_zone_state(z, local_stat); #endif }
From: "Eric W. Biederman" ebiederm@xmission.com
[ Upstream commit 75422726b0f717d67db3283c2eb5bc14fa2619c5 ]
Add MS_KERNMOUNT to the flags that are passed. Use sget_userns and force &init_user_ns instead of calling sget so that even if called from a weird context the internal filesystem will be considered to be in the intial user namespace.
Luis Ressel reported that the the failure to pass MS_KERNMOUNT into mount_pseudo broke his in development graphics driver that uses the generic drm infrastructure. I am not certain the deriver was bug free in it's usage of that infrastructure but since mount_pseudo_xattr can never be triggered by userspace it is clearer and less error prone, and less problematic for the code to be explicit.
Reported-by: Luis Ressel aranea@aixah.de Tested-by: Luis Ressel aranea@aixah.de Acked-by: Al Viro viro@ZenIV.linux.org.uk Signed-off-by: "Eric W. Biederman" ebiederm@xmission.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- fs/libfs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs/libfs.c b/fs/libfs.c index 48826d4da189..9588780ad43e 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name, struct inode *root; struct qstr d_name = QSTR_INIT(name, strlen(name));
- s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL); + s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER, + &init_user_ns, NULL); if (IS_ERR(s)) return ERR_CAST(s);
From: Johannes Berg johannes.berg@intel.com
[ Upstream commit 96aa2e7cf126773b16c6c19b7474a8a38d3c707e ]
In the current minimum chandef code there's an issue in that the recalculation can happen after rate control is initialized for a station that has a wider bandwidth than the current chanctx, and then rate control can immediately start using those higher rates which could cause problems.
Observe that first of all that this problem is because we don't take non-associated and non-uploaded stations into account. The restriction to non-associated is quite pointless and is one of the causes for the problem described above, since the rate init will happen before the station is set to associated; no frames could actually be sent until associated, but the rate table can already contain higher rates and that might cause problems.
Also, rejecting non-uploaded stations is wrong, since the rate control can select higher rates for those as well.
Secondly, it's then necessary to recalculate the minimal config before initializing rate control, so that when rate control is initialized, the higher rates are already available. This can be done easily by adding the necessary function call in rate init.
Change-Id: Ib9bc02d34797078db55459d196993f39dcd43070 Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/mac80211/chan.c | 3 --- net/mac80211/rate.c | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index e75cbf6ecc26..a0d901d8992e 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata) !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) continue;
- if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC)) - continue; - max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta)); } rcu_read_unlock(); diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 206698bc93f4..9e2641d45587 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -40,6 +40,8 @@ void rate_control_rate_init(struct sta_info *sta)
ieee80211_sta_set_rx_nss(sta);
+ ieee80211_recalc_min_chandef(sta->sdata); + if (!ref) return;
From: Stephen Boyd sboyd@codeaurora.org
[ Upstream commit 3512a1ad56174308a9fd3e10f4b1e3e152e9ec01 ]
Failure to mark this pointer as __le32 causes checkers like sparse to complain:
net/qrtr/qrtr.c:274:16: warning: incorrect type in assignment (different base types) net/qrtr/qrtr.c:274:16: expected unsigned int [unsigned] [usertype] <noident> net/qrtr/qrtr.c:274:16: got restricted __le32 [usertype] <noident> net/qrtr/qrtr.c:275:16: warning: incorrect type in assignment (different base types) net/qrtr/qrtr.c:275:16: expected unsigned int [unsigned] [usertype] <noident> net/qrtr/qrtr.c:275:16: got restricted __le32 [usertype] <noident> net/qrtr/qrtr.c:276:16: warning: incorrect type in assignment (different base types) net/qrtr/qrtr.c:276:16: expected unsigned int [unsigned] [usertype] <noident> net/qrtr/qrtr.c:276:16: got restricted __le32 [usertype] <noident>
Silence it.
Cc: Bjorn Andersson bjorn.andersson@linaro.org Signed-off-by: Stephen Boyd sboyd@codeaurora.org Acked-by: Bjorn Andersson bjorn.andersson@linaro.org Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/qrtr/qrtr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index c985ecbe9bd6..ae5ac175b2be 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node, const int pkt_len = 20; struct qrtr_hdr *hdr; struct sk_buff *skb; - u32 *buf; + __le32 *buf;
skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL); if (!skb) @@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node, hdr->dst_node_id = cpu_to_le32(dst_node); hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
- buf = (u32 *)skb_put(skb, pkt_len); + buf = (__le32 *)skb_put(skb, pkt_len); memset(buf, 0, pkt_len); buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX); buf[1] = cpu_to_le32(src_node);
From: Kazuya Mizuguchi kazuya.mizuguchi.ks@renesas.com
[ Upstream commit 18a3ed59d09cf81a6447aadf6931bf0c9ffec5e0 ]
Remove Rx overflow log messages as in an environment where logging results in network traffic logging may cause further overflows.
Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") Signed-off-by: Kazuya Mizuguchi kazuya.mizuguchi.ks@renesas.com [simon: reworked changelog] Signed-off-by: Simon Horman horms+renesas@verge.net.au Acked-by: Sergei Shtylyov sergei.shtylyov@cogentembedded.com Signed-off-by: David S. Miller davem@davemloft.net
Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/renesas/ravb_main.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 11623aad0e8e..10d3a9f6349e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -941,14 +941,10 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Receive error message handling */ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; - if (priv->rx_over_errors != ndev->stats.rx_over_errors) { + if (priv->rx_over_errors != ndev->stats.rx_over_errors) ndev->stats.rx_over_errors = priv->rx_over_errors; - netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); - } - if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) { + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; - netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n"); - } out: return budget - quota; }
From: Andrzej Hajda a.hajda@samsung.com
[ Upstream commit 821b40b79db7dedbfe15ab330dfd181e661a533f ]
STANDALONE_UPDATE_F should be set if something changed in plane configurations, including plane disable. The patch fixes page-faults bugs, caused by decon still using framebuffers of disabled planes.
v2: fixed clear-bit code (Thx Marek) v3: use test_and_clear_bit (Thx Joonyoung)
Signed-off-by: Andrzej Hajda a.hajda@samsung.com Tested-by: Joonyoung Shim jy0922.shim@samsung.com Signed-off-by: Inki Dae inki.dae@samsung.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index ef7fcb5f044b..09e8cc36948e 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -46,7 +46,8 @@ enum decon_flag_bits { BIT_CLKS_ENABLED, BIT_IRQS_ENABLED, BIT_WIN_UPDATED, - BIT_SUSPENDED + BIT_SUSPENDED, + BIT_REQUEST_UPDATE };
struct decon_context { @@ -313,6 +314,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
/* window enable */ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); }
static void decon_disable_plane(struct exynos_drm_crtc *crtc, @@ -325,6 +327,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, return;
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); }
static void decon_atomic_flush(struct exynos_drm_crtc *crtc) @@ -338,8 +341,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) for (i = ctx->first_win; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false);
- /* update iff there are active windows */ - if (crtc->base.state->plane_mask) + if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
if (ctx->out_type & IFTYPE_I80)
From: Andrzej Hajda a.hajda@samsung.com
[ Upstream commit f65a7c9cb3770ed4d3e7c57c66d7032689081b5e ]
Improper usage of DECON_UPDATE register leads to subtle errors. If it set in decon_commit when there are no active windows it results in slow registry updates - all subsequent shadow registry updates takes more than full vblank. On the other side if it is not set when there are active windows it results in garbage on the screen after suspend/resume of FB console.
The patch hopefully fixes it.
Signed-off-by: Andrzej Hajda a.hajda@samsung.com Signed-off-by: Inki Dae inki.dae@samsung.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 6ca1f3117fe8..ef7fcb5f044b 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -188,8 +188,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
/* enable output and display signal */ decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0); - - decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); }
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, @@ -340,8 +338,9 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) for (i = ctx->first_win; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false);
- /* standalone update */ - decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); + /* update iff there are active windows */ + if (crtc->base.state->plane_mask) + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
if (ctx->out_type & IFTYPE_I80) set_bit(BIT_WIN_UPDATED, &ctx->flags);
From: Benjamin Coddington bcodding@redhat.com
[ Upstream commit 4b09ec4b14a168bf2c687e1f598140c3c11e9222 ]
I have reports of a crash that look like __fput() was called twice for a NFSv4.0 file. It seems possible that the state manager could try to reclaim a lock and take a reference on the fl->fl_file at the same time the file is being released if, during the close(), a signal interrupts the wait for outstanding IO while removing locks which then skips the removal of that lock.
Since 83bfff23e9ed ("nfs4: have do_vfs_lock take an inode pointer") has removed the need to traverse fl->fl_file->f_inode in nfs4_lock_done(), taking that reference is no longer necessary.
Signed-off-by: Benjamin Coddington bcodding@redhat.com Reviewed-by: Jeff Layton jlayton@redhat.com Signed-off-by: Trond Myklebust trond.myklebust@primarydata.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- fs/nfs/nfs4proc.c | 3 --- 1 file changed, 3 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a53b8e0c896a..8edfca5d1eb2 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -38,7 +38,6 @@ #include <linux/mm.h> #include <linux/delay.h> #include <linux/errno.h> -#include <linux/file.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/printk.h> @@ -6009,7 +6008,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->server = server; atomic_inc(&lsp->ls_count); p->ctx = get_nfs_open_context(ctx); - get_file(fl->fl_file); memcpy(&p->fl, fl, sizeof(p->fl)); return p; out_free_seqid: @@ -6122,7 +6120,6 @@ static void nfs4_lock_release(void *calldata) nfs_free_seqid(data->arg.lock_seqid); nfs4_put_lock_state(data->lsp); put_nfs_open_context(data->ctx); - fput(data->fl.fl_file); kfree(data); dprintk("%s: done!\n", __func__); }
From: Michal Kazior michal.kazior@tieto.com
[ Upstream commit dbef53621116474bb883f76f0ba6b7640bc42332 ]
Station structure is considered as not uploaded (to driver) until drv_sta_state() finishes. This call is however done after the structure is attached to mac80211 internal lists and hashes. This means mac80211 can lookup (and use) station structure before it is uploaded to a driver.
If this happens (structure exists, but sta->uploaded is false) fast_tx path can still be taken. Deep in the fastpath call the sta->uploaded is checked against to derive "pubsta" argument for ieee80211_get_txq(). If sta->uploaded is false (and sta is actually non-NULL) ieee80211_get_txq() effectively downgraded to vif->txq.
At first glance this may look innocent but coerces mac80211 into a state that is almost guaranteed (codel may drop offending skb) to crash because a station-oriented skb gets queued up on vif-oriented txq. The ieee80211_tx_dequeue() ends up looking at info->control.flags and tries to use txq->sta which in the fail case is NULL.
It's probably pointless to pretend one can downgrade skb from sta-txq to vif-txq.
Since downgrading unicast traffic to vif->txq must not be done there's no txq to put a frame on if sta->uploaded is false. Therefore the code is made to fall back to regular tx() op path if the described condition is hit.
Only drivers using wake_tx_queue were affected.
Example crash dump before fix:
Unable to handle kernel paging request at virtual address ffffe26c PC is at ieee80211_tx_dequeue+0x204/0x690 [mac80211] [<bf4252a4>] (ieee80211_tx_dequeue [mac80211]) from [<bf4b1388>] (ath10k_mac_tx_push_txq+0x54/0x1c0 [ath10k_core]) [<bf4b1388>] (ath10k_mac_tx_push_txq [ath10k_core]) from [<bf4bdfbc>] (ath10k_htt_txrx_compl_task+0xd78/0x11d0 [ath10k_core]) [<bf4bdfbc>] (ath10k_htt_txrx_compl_task [ath10k_core]) [<bf51c5a4>] (ath10k_pci_napi_poll+0x54/0xe8 [ath10k_pci]) [<bf51c5a4>] (ath10k_pci_napi_poll [ath10k_pci]) from [<c0572e90>] (net_rx_action+0xac/0x160)
Reported-by: Mohammed Shafi Shajakhan mohammed@qti.qualcomm.com Signed-off-by: Michal Kazior michal.kazior@tieto.com Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/mac80211/tx.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 274c564bd9af..1ffd1e145c13 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1244,7 +1244,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, struct ieee80211_vif *vif, - struct ieee80211_sta *pubsta, + struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; @@ -1258,10 +1258,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, if (!ieee80211_is_data(hdr->frame_control)) return NULL;
- if (pubsta) { + if (sta) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- txq = pubsta->txq[tid]; + if (!sta->uploaded) + return NULL; + + txq = sta->sta.txq[tid]; } else if (vif) { txq = vif->txq; } @@ -1499,23 +1502,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local, struct fq *fq = &local->fq; struct ieee80211_vif *vif; struct txq_info *txqi; - struct ieee80211_sta *pubsta;
if (!local->ops->wake_tx_queue || sdata->vif.type == NL80211_IFTYPE_MONITOR) return false;
- if (sta && sta->uploaded) - pubsta = &sta->sta; - else - pubsta = NULL; - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
vif = &sdata->vif; - txqi = ieee80211_get_txq(local, vif, pubsta, skb); + txqi = ieee80211_get_txq(local, vif, sta, skb);
if (!txqi) return false;
From: Christoffer Dall christoffer.dall@linaro.org
[ Upstream commit 63e41226afc3f7a044b70325566fa86ac3142538 ]
When a VCPU blocks (WFI) and has programmed the vtimer, we program a soft timer to expire in the future to wake up the vcpu thread when appropriate. Because such as wake up involves a vcpu kick, and the timer expire function can get called from interrupt context, and the kick may sleep, we have to schedule the kick in the work function.
The work function currently has a warning that gets raised if it turns out that the timer shouldn't fire when it's run, which was added because the idea was that in that case the work should never have been cancelled.
However, it turns out that this whole thing is racy and we can get spurious warnings. The problem is that we clear the armed flag in the work function, which may run in parallel with the kvm_timer_unschedule->timer_disarm() call. This results in a possible situation where the timer_disarm() call does not call cancel_work_sync(), which effectively synchronizes the completion of the work function with running the VCPU. As a result, the VCPU thread proceeds before the work function completees, causing changes to the timer state such that kvm_timer_should_fire(vcpu) returns false in the work function.
All we do in the work function is to kick the VCPU, and an occasional rare extra kick never harmed anyone. Since the race above is extremely rare, we don't bother checking if the race happens but simply remove the check and the clearing of the armed flag from the work function.
Reported-by: Matthias Brugger mbrugger@suse.com Reviewed-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Christoffer Dall christoffer.dall@linaro.org Signed-off-by: Marc Zyngier marc.zyngier@arm.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- virt/kvm/arm/arch_timer.c | 3 --- 1 file changed, 3 deletions(-)
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 27a1f6341d41..7b49a1378c90 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -89,9 +89,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work) struct kvm_vcpu *vcpu;
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); - vcpu->arch.timer_cpu.armed = false; - - WARN_ON(!kvm_timer_should_fire(vcpu));
/* * If the vcpu is blocked we want to wake it up so that it will see
From: Trond Myklebust trond.myklebust@primarydata.com
[ Upstream commit c6180a6237174f481dc856ed6e890d8196b6f0fb ]
If the server reboots multiple times, the client should rely on the server to tell it that it cannot reclaim state as per section 9.6.3.4 in RFC7530 and section 8.4.2.1 in RFC5661. Currently, the client is being to conservative, and is assuming that if the server reboots while state recovery is in progress, then it must ignore state that was not recovered before the reboot.
Signed-off-by: Trond Myklebust trond.myklebust@primarydata.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- fs/nfs/nfs4state.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 92671914067f..71deeae6eefd 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1718,7 +1718,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) break; case -NFS4ERR_STALE_CLIENTID: set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); - nfs4_state_clear_reclaim_reboot(clp); nfs4_state_start_reclaim_reboot(clp); break; case -NFS4ERR_EXPIRED:
From: Jiri Olsa jolsa@kernel.org
[ Upstream commit 475113d937adfd150eb82b5e2c5507125a68e7af ]
It's possible to set up PEBS events to get only errors and not any data, like on SNB-X (model 45) and IVB-EP (model 62) via 2 perf commands running simultaneously:
taskset -c 1 ./perf record -c 4 -e branches:pp -j any -C 10
This leads to a soft lock up, because the error path of the intel_pmu_drain_pebs_nhm() does not account event->hw.interrupt for error PEBS interrupts, so in case you're getting ONLY errors you don't have a way to stop the event when it's over the max_samples_per_tick limit:
NMI watchdog: BUG: soft lockup - CPU#22 stuck for 22s! [perf_fuzzer:5816] ... RIP: 0010:[<ffffffff81159232>] [<ffffffff81159232>] smp_call_function_single+0xe2/0x140 ... Call Trace: ? trace_hardirqs_on_caller+0xf5/0x1b0 ? perf_cgroup_attach+0x70/0x70 perf_install_in_context+0x199/0x1b0 ? ctx_resched+0x90/0x90 SYSC_perf_event_open+0x641/0xf90 SyS_perf_event_open+0x9/0x10 do_syscall_64+0x6c/0x1f0 entry_SYSCALL64_slow_path+0x25/0x25
Add perf_event_account_interrupt() which does the interrupt and frequency checks and call it from intel_pmu_drain_pebs_nhm()'s error path.
We keep the pending_kill and pending_wakeup logic only in the __perf_event_overflow() path, because they make sense only if there's any data to deliver.
Signed-off-by: Jiri Olsa jolsa@kernel.org Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Arnaldo Carvalho de Melo acme@kernel.org Cc: Arnaldo Carvalho de Melo acme@redhat.com Cc: Jiri Olsa jolsa@redhat.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Stephane Eranian eranian@google.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Vince Weaver vince@deater.net Cc: Vince Weaver vincent.weaver@maine.edu Link: http://lkml.kernel.org/r/1482931866-6018-2-git-send-email-jolsa@kernel.org Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Sasha Levin alexander.levin@verizon.com --- arch/x86/events/intel/ds.c | 6 +++++- include/linux/perf_event.h | 1 + kernel/events/core.c | 47 ++++++++++++++++++++++++++++++---------------- 3 files changed, 37 insertions(+), 17 deletions(-)
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index be202390bbd3..9dfeeeca0ea8 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) continue;
/* log dropped samples number */ - if (error[bit]) + if (error[bit]) { perf_log_lost_samples(event, error[bit]);
+ if (perf_event_account_interrupt(event)) + x86_pmu_stop(event, 0); + } + if (counts[bit]) { __intel_pmu_pebs_event(event, iregs, base, top, bit, counts[bit]); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4741ecdb9817..78ed8105e64d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable_local(struct perf_event *event); extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); +extern int perf_event_account_interrupt(struct perf_event *event); #else /* !CONFIG_PERF_EVENTS: */ static inline void * perf_aux_output_begin(struct perf_output_handle *handle, diff --git a/kernel/events/core.c b/kernel/events/core.c index 36ff2d93f222..13b9784427b0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7088,25 +7088,12 @@ static void perf_log_itrace_start(struct perf_event *event) perf_output_end(&handle); }
-/* - * Generic event overflow handling, sampling. - */ - -static int __perf_event_overflow(struct perf_event *event, - int throttle, struct perf_sample_data *data, - struct pt_regs *regs) +static int +__perf_event_account_interrupt(struct perf_event *event, int throttle) { - int events = atomic_read(&event->event_limit); struct hw_perf_event *hwc = &event->hw; - u64 seq; int ret = 0; - - /* - * Non-sampling counters might still use the PMI to fold short - * hardware counters, ignore those. - */ - if (unlikely(!is_sampling_event(event))) - return 0; + u64 seq;
seq = __this_cpu_read(perf_throttled_seq); if (seq != hwc->interrupts_seq) { @@ -7134,6 +7121,34 @@ static int __perf_event_overflow(struct perf_event *event, perf_adjust_period(event, delta, hwc->last_period, true); }
+ return ret; +} + +int perf_event_account_interrupt(struct perf_event *event) +{ + return __perf_event_account_interrupt(event, 1); +} + +/* + * Generic event overflow handling, sampling. + */ + +static int __perf_event_overflow(struct perf_event *event, + int throttle, struct perf_sample_data *data, + struct pt_regs *regs) +{ + int events = atomic_read(&event->event_limit); + int ret = 0; + + /* + * Non-sampling counters might still use the PMI to fold short + * hardware counters, ignore those. + */ + if (unlikely(!is_sampling_event(event))) + return 0; + + ret = __perf_event_account_interrupt(event, throttle); + /* * XXX event_limit might not quite work as expected on inherited * events
From: Reza Arbab arbab@linux.vnet.ibm.com
[ Upstream commit 32b53c012e0bfe20b2745962a89db0dc72ef3270 ]
Memory hotplug is leading to hash page table calls, even on radix:
arch_add_memory create_section_mapping htab_bolt_mapping BUG_ON(!ppc_md.hpte_insert);
To fix, refactor {create,remove}_section_mapping() into hash__ and radix__ variants. Leave the radix versions stubbed for now.
Reviewed-by: Aneesh Kumar K.V aneesh.kumar@linux.vnet.ibm.com Acked-by: Balbir Singh bsingharora@gmail.com Signed-off-by: Reza Arbab arbab@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Sasha Levin alexander.levin@verizon.com --- arch/powerpc/include/asm/book3s/64/hash.h | 4 ++++ arch/powerpc/mm/hash_utils_64.c | 4 ++-- arch/powerpc/mm/pgtable-book3s64.c | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index f61cad3de4e6..4c935f7504f7 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start, unsigned long phys); extern void hash__vmemmap_remove_mapping(unsigned long start, unsigned long page_size); + +int hash__create_section_mapping(unsigned long start, unsigned long end); +int hash__remove_section_mapping(unsigned long start, unsigned long end); + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 78dabf065ba9..bd666287c5ed 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void) }
#ifdef CONFIG_MEMORY_HOTPLUG -int create_section_mapping(unsigned long start, unsigned long end) +int hash__create_section_mapping(unsigned long start, unsigned long end) { int rc = htab_bolt_mapping(start, end, __pa(start), pgprot_val(PAGE_KERNEL), mmu_linear_psize, @@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end) return rc; }
-int remove_section_mapping(unsigned long start, unsigned long end) +int hash__remove_section_mapping(unsigned long start, unsigned long end) { int rc = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index f4f437cbabf1..0fad7f6742ff 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -125,3 +125,21 @@ void mmu_cleanup_all(void) else if (mmu_hash_ops.hpte_clear_all) mmu_hash_ops.hpte_clear_all(); } + +#ifdef CONFIG_MEMORY_HOTPLUG +int create_section_mapping(unsigned long start, unsigned long end) +{ + if (radix_enabled()) + return -ENODEV; + + return hash__create_section_mapping(start, end); +} + +int remove_section_mapping(unsigned long start, unsigned long end) +{ + if (radix_enabled()) + return -ENODEV; + + return hash__remove_section_mapping(start, end); +} +#endif /* CONFIG_MEMORY_HOTPLUG */
From: Quinn Tran quinn.tran@cavium.com
[ Upstream commit bb1181c9a8b46b6f10e749d9ed94480336445d7f ]
qlt_reset is called with Immedidate Notify IOCB only. Current code wrongly cast it as ATIO IOCB.
Signed-off-by: Quinn Tran quinn.tran@cavium.com Signed-off-by: Himanshu Madhani himanshu.madhani@cavium.com Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Bart Van Assche bart.vanassche@sandisk.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/scsi/qla2xxx/qla_target.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 91f5f55a8a9b..59059ffbb98c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; - uint32_t unpacked_lun, lun = 0; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; - struct atio_from_isp *a = (struct atio_from_isp *)iocb; unsigned long flags;
loop_id = le16_to_cpu(n->u.isp24.nport_handle); @@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) "loop_id %d)\n", vha->host_no, sess, sess->port_name, mcmd, loop_id);
- lun = a->u.isp24.fcp_cmnd.lun; - unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); - - return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, - iocb, QLA24XX_MGMT_SEND_NACK); + return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); }
/* ha->tgt.sess_lock supposed to be held on entry */
From: Rex Zhu Rex.Zhu@amd.com
[ Upstream commit e05208ded1905e500cd5b369d624b071951c68b9 ]
Set the proper bits for clockgating setup.
Signed-off-by: Rex Zhu Rex.Zhu@amd.com Acked-by: Christian König christian.koenig@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 50f0cf2788b7..7522f796f19b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -182,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - data &= ~0xffc00000; + data &= ~0x3ff; WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
From: Andrzej Hajda a.hajda@samsung.com
[ Upstream commit 11d8bcef7a0399e1d2519f207fd575fc404306b4 ]
DECON_TV requires STANDALONE_UPDATE after output enabling, otherwise it does not start. This change is neutral for DECON.
Signed-off-by: Andrzej Hajda a.hajda@samsung.com Signed-off-by: Inki Dae inki.dae@samsung.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 09e8cc36948e..6dd09c306bc1 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -189,6 +189,8 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
/* enable output and display signal */ decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0); + + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); }
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
From: Kevin Hao haokexin@gmail.com
[ Upstream commit 4c833368f0bf748d4147bf301b1f95bc8eccb3c0 ]
I got the following calltrace on a Apollo Lake SoC with 32-bit kernel:
WARNING: CPU: 2 PID: 261 at arch/x86/include/asm/fpu/internal.h:363 fpu__restore+0x1f5/0x260 [...] Hardware name: Intel Corp. Broxton P/NOTEBOOK, BIOS APLIRVPA.X64.0138.B35.1608091058 08/09/2016 Call Trace: dump_stack() __warn() ? fpu__restore() warn_slowpath_null() fpu__restore() __fpu__restore_sig() fpu__restore_sig() restore_sigcontext.isra.9() sys_sigreturn() do_int80_syscall_32() entry_INT80_32()
The reason is that a #GP occurs when executing XRSTORS. The root cause is that we forget to set the xcomp_bv when we fake up the XSAVES area in the copyin_to_xsaves() function.
Signed-off-by: Kevin Hao haokexin@gmail.com Cc: Andy Lutomirski luto@kernel.org Cc: Borislav Petkov bp@alien8.de Cc: Brian Gerst brgerst@gmail.com Cc: Dave Hansen dave.hansen@linux.intel.com Cc: Denys Vlasenko dvlasenk@redhat.com Cc: Fenghua Yu fenghua.yu@intel.com Cc: H. Peter Anvin hpa@zytor.com Cc: Josh Poimboeuf jpoimboe@redhat.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Oleg Nesterov oleg@redhat.com Cc: Peter Zijlstra peterz@infradead.org Cc: Quentin Casasnovas quentin.casasnovas@oracle.com Cc: Rik van Riel riel@redhat.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Yu-cheng Yu yu-cheng.yu@intel.com Link: http://lkml.kernel.org/r/1485075023-30161-1-git-send-email-haokexin@gmail.co... Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Thomas Gleixner tglx@linutronix.de Signed-off-by: Sasha Levin alexander.levin@verizon.com --- arch/x86/kernel/fpu/xstate.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 095ef7ddd6ae..abfbb61b18b8 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1077,6 +1077,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, * Add back in the features that came in from userspace: */ xsave->header.xfeatures |= xfeatures; + xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xsave->header.xfeatures;
return 0; }
From: Colin Ian King colin.king@canonical.com
[ Upstream commit 0e73fc9a56f22f2eec4d2b2910c649f7af67b74d ]
The comparison on the timeout can lead to an array overrun read on sctp_timer_tbl because of an off-by-one error. Fix this by using < instead of <= and also compare to the array size rather than SCTP_EVENT_TIMEOUT_MAX.
Fixes CoverityScan CID#1397639 ("Out-of-bounds read")
Signed-off-by: Colin Ian King colin.king@canonical.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/sctp/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 95d7b15dad21..e371a0d90068 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = { /* Lookup timer debug name. */ const char *sctp_tname(const sctp_subtype_t id) { - if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX) + if (id.timeout < ARRAY_SIZE(sctp_timer_tbl)) return sctp_timer_tbl[id.timeout]; return "unknown_timer"; }
From: Xiangliang Yu Xiangliang.Yu@amd.com
[ Upstream commit 3a1d19a29670aa7eb58576a31883d0aa9fb77549 ]
Virtual display doesn't allocate amdgpu_encoder when initializing, so will get invaild pointer if try to free amdgpu_encoder when unloading driver.
Signed-off-by: Xiangliang Yu Xiangliang.Yu@amd.com Reviewed-by: Alex Deucher alexander.deucher@amd.com Acked-by: Christian König christian.koenig@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index c2bd9f045532..6d75fd0e3105 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -565,11 +565,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) { - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - - kfree(amdgpu_encoder->enc_priv); drm_encoder_cleanup(encoder); - kfree(amdgpu_encoder); + kfree(encoder); }
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
From: Johannes Berg johannes.berg@intel.com
[ Upstream commit 115865fa0826ed18ca04717cf72d0fe874c0fe7f ]
In my previous patch, I missed that rate_control_rate_init() is called from some places that cannot sleep, so it cannot call ieee80211_recalc_min_chandef(). Remove that call for now to fix the context bug, we'll have to find a different way to fix the minimum channel width issue.
Fixes: 96aa2e7cf126 ("mac80211: calculate min channel width correctly") Reported-by: Xiaolong Ye (via lkp-robot) xiaolong.ye@intel.com Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/mac80211/rate.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 9e2641d45587..206698bc93f4 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -40,8 +40,6 @@ void rate_control_rate_init(struct sta_info *sta)
ieee80211_sta_set_rx_nss(sta);
- ieee80211_recalc_min_chandef(sta->sdata); - if (!ref) return;
From: Parthasarathy Bhuvaragan parthasarathy.bhuvaragan@ericsson.com
[ Upstream commit 9dc3abdd1f7ea524e8552e0a3ef01219892ed1f4 ]
Commit 333f796235a527 ("tipc: fix a race condition leading to subscriber refcnt bug") reveals a soft lockup while acquiring nametbl_lock.
Before commit 333f796235a527, we call tipc_conn_shutdown() from tipc_close_conn() in the context of tipc_topsrv_stop(). In that context, we are allowed to grab the nametbl_lock.
Commit 333f796235a527, moved tipc_conn_release (renamed from tipc_conn_shutdown) to the connection refcount cleanup. This allows either tipc_nametbl_withdraw() or tipc_topsrv_stop() to the cleanup.
Since tipc_exit_net() first calls tipc_topsrv_stop() and then tipc_nametble_withdraw() increases the chances for the later to perform the connection cleanup.
The soft lockup occurs in the call chain of tipc_nametbl_withdraw(), when it performs the tipc_conn_kref_release() as it tries to grab nametbl_lock again while holding it already. tipc_nametbl_withdraw() grabs nametbl_lock tipc_nametbl_remove_publ() tipc_subscrp_report_overlap() tipc_subscrp_send_event() tipc_conn_sendmsg() << if (con->flags != CF_CONNECTED) we do conn_put(), triggering the cleanup as refcount=0. >> tipc_conn_kref_release tipc_sock_release tipc_conn_release tipc_subscrb_delete tipc_subscrp_delete tipc_nametbl_unsubscribe << Soft Lockup >>
The previous changes in this series fixes the race conditions fixed by commit 333f796235a527. Hence we can now revert the commit.
Fixes: 333f796235a52727 ("tipc: fix a race condition leading to subscriber refcnt bug") Reported-and-Tested-by: John Thompson thompa.atl@gmail.com Acked-by: Ying Xue ying.xue@windriver.com Acked-by: Jon Maloy jon.maloy@ericsson.com Signed-off-by: Parthasarathy Bhuvaragan parthasarathy.bhuvaragan@ericsson.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/tipc/server.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-)
diff --git a/net/tipc/server.c b/net/tipc/server.c index f89c0c2e8c16..04ff441b8065 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -86,7 +86,6 @@ struct outqueue_entry { static void tipc_recv_work(struct work_struct *work); static void tipc_send_work(struct work_struct *work); static void tipc_clean_outqueues(struct tipc_conn *con); -static void tipc_sock_release(struct tipc_conn *con);
static void tipc_conn_kref_release(struct kref *kref) { @@ -104,7 +103,6 @@ static void tipc_conn_kref_release(struct kref *kref) } saddr->scope = -TIPC_NODE_SCOPE; kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); - tipc_sock_release(con); sock_release(sock); con->sock = NULL;
@@ -194,19 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con) write_unlock_bh(&sk->sk_callback_lock); }
-static void tipc_sock_release(struct tipc_conn *con) +static void tipc_close_conn(struct tipc_conn *con) { struct tipc_server *s = con->server;
- if (con->conid) - s->tipc_conn_release(con->conid, con->usr_data); - - tipc_unregister_callbacks(con); -} - -static void tipc_close_conn(struct tipc_conn *con) -{ if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { + tipc_unregister_callbacks(con); + + if (con->conid) + s->tipc_conn_release(con->conid, con->usr_data);
/* We shouldn't flush pending works as we may be in the * thread. In fact the races with pending rx/tx work structs
From: Parthasarathy Bhuvaragan parthasarathy.bhuvaragan@ericsson.com
[ Upstream commit 35e22e49a5d6a741ebe7f2dd280b2052c3003ef7 ]
In tipc_server_stop(), we iterate over the connections with limiting factor as server's idr_in_use. We ignore the fact that this variable is decremented in tipc_close_conn(), leading to premature exit.
In this commit, we iterate until the we have no connections left.
Acked-by: Ying Xue ying.xue@windriver.com Acked-by: Jon Maloy jon.maloy@ericsson.com Tested-by: John Thompson thompa.atl@gmail.com Signed-off-by: Parthasarathy Bhuvaragan parthasarathy.bhuvaragan@ericsson.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/tipc/server.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/net/tipc/server.c b/net/tipc/server.c index 04ff441b8065..3cd6402e812c 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -619,14 +619,12 @@ int tipc_server_start(struct tipc_server *s) void tipc_server_stop(struct tipc_server *s) { struct tipc_conn *con; - int total = 0; int id;
spin_lock_bh(&s->idr_lock); - for (id = 0; total < s->idr_in_use; id++) { + for (id = 0; s->idr_in_use; id++) { con = idr_find(&s->conn_idr, id); if (con) { - total++; spin_unlock_bh(&s->idr_lock); tipc_close_conn(con); spin_lock_bh(&s->idr_lock);
From: Ram Amrani Ram.Amrani@Cavium.com
[ Upstream commit 865cea40b69741c3da2574176876463233b2b67c ]
If the user is requesting us to change the QP state to the same state that it is already in, return success instead of failure.
Signed-off-by: Ram Amrani Ram.Amrani@cavium.com Signed-off-by: Michal Kalderon Michal.Kalderon@cavium.com Signed-off-by: Doug Ledford dledford@redhat.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 4ba019e3dc56..35d5b89decb4 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1653,7 +1653,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev, int status = 0;
if (new_state == qp->state) - return 1; + return 0;
switch (qp->state) { case QED_ROCE_QP_STATE_RESET:
From: Ram Amrani Ram.Amrani@Cavium.com
[ Upstream commit af2b14b8b8ae21b0047a52c767ac8b44f435a280 ]
The loopback logic in RDMA CM packets compares Ethernet addresses and was accidently inverse.
Signed-off-by: Ram Amrani Ram.Amrani@cavium.com Signed-off-by: Ariel Elior Ariel.Elior@cavium.com Signed-off-by: Doug Ledford dledford@redhat.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/infiniband/hw/qedr/qedr_cm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 63890ebb72bd..eccf7039aaca 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -404,9 +404,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev, }
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) - packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; - else packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; + else + packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode; memcpy(packet->header.vaddr, ud_header_buffer, header_size);
From: Ram Amrani Ram.Amrani@Cavium.com
[ Upstream commit af2b14b8b8ae21b0047a52c767ac8b44f435a280 ]
The loopback logic in RDMA CM packets compares Ethernet addresses and was accidently inverse.
Signed-off-by: Ram Amrani Ram.Amrani@cavium.com Signed-off-by: Ariel Elior Ariel.Elior@cavium.com Signed-off-by: Doug Ledford dledford@redhat.com Signed-off-by: Sasha Levin alexander.levin@verizon.com
drivers/infiniband/hw/qedr/qedr_cm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 63890ebb72bd..eccf7039aaca 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -404,9 +404,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev, }
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
- else packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
else
packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode; memcpy(packet->header.vaddr, ud_header_buffer, header_size);
-- 2.11.0
Thanks!
Acked-by: Ram Amrani Ram.Amrani@cavium.com
From: Sagi Grimberg sagi@grimberg.me
[ Upstream commit 06406d81a2d7cfb8abcc4fa6cdfeb8e5897007c5 ]
Make sure they are not running and we can free the controller safely.
Signed-off-by: Roy Shterman roys@lightbitslabs.com Signed-off-by: Sagi Grimberg sagi@grimberg.me Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/nvme/target/core.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 55ce769cecee..fbd6d487103f 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref) list_del(&ctrl->subsys_entry); mutex_unlock(&subsys->lock);
+ flush_work(&ctrl->async_event_work); + cancel_work_sync(&ctrl->fatal_err_work); + ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); nvmet_subsys_put(subsys);
From: Iago Abal mail@iagoabal.eu
[ Upstream commit 91539eb1fda2d530d3b268eef542c5414e54bf1a ]
The static bug finder EBA (http://www.iagoabal.eu/eba/) reported the following double-lock bug:
Double lock: 1. spin_lock_irqsave(pch->lock, flags) at pl330_free_chan_resources:2236; 2. call to function `pl330_release_channel' immediately after; 3. call to function `dma_pl330_rqcb' in line 1753; 4. spin_lock_irqsave(pch->lock, flags) at dma_pl330_rqcb:1505.
I have fixed it as suggested by Marek Szyprowski.
First, I have replaced `pch->lock' with `pl330->lock' in functions `pl330_alloc_chan_resources' and `pl330_free_chan_resources'. This avoids the double-lock by acquiring a different lock than `dma_pl330_rqcb'.
NOTE that, as a result, `pl330_free_chan_resources' executes `list_splice_tail_init' on `pch->work_list' under lock `pl330->lock', whereas in the rest of the code `pch->work_list' is protected by `pch->lock'. I don't know if this may cause race conditions. Similarly `pch->cyclic' is written by `pl330_alloc_chan_resources' under `pl330->lock' but read by `pl330_tx_submit' under `pch->lock'.
Second, I have removed locking from `pl330_request_channel' and `pl330_release_channel' functions. Function `pl330_request_channel' is only called from `pl330_alloc_chan_resources', so the lock is already held. Function `pl330_release_channel' is called from `pl330_free_chan_resources', which already holds the lock, and from `pl330_del'. Function `pl330_del' is called in an error path of `pl330_probe' and at the end of `pl330_remove', but I assume that there cannot be concurrent accesses to the protected data at those points.
Signed-off-by: Iago Abal mail@iagoabal.eu Reviewed-by: Marek Szyprowski m.szyprowski@samsung.com Signed-off-by: Vinod Koul vinod.koul@intel.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/dma/pl330.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 9f3dbc8c63d2..fb2e7476d96b 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1694,7 +1694,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i) static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) { struct pl330_thread *thrd = NULL; - unsigned long flags; int chans, i;
if (pl330->state == DYING) @@ -1702,8 +1701,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
chans = pl330->pcfg.num_chan;
- spin_lock_irqsave(&pl330->lock, flags); - for (i = 0; i < chans; i++) { thrd = &pl330->channels[i]; if ((thrd->free) && (!_manager_ns(thrd) || @@ -1721,8 +1718,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) thrd = NULL; }
- spin_unlock_irqrestore(&pl330->lock, flags); - return thrd; }
@@ -1740,7 +1735,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev) static void pl330_release_channel(struct pl330_thread *thrd) { struct pl330_dmac *pl330; - unsigned long flags;
if (!thrd || thrd->free) return; @@ -1752,10 +1746,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
pl330 = thrd->dmac;
- spin_lock_irqsave(&pl330->lock, flags); _free_event(thrd, thrd->ev); thrd->free = true; - spin_unlock_irqrestore(&pl330->lock, flags); }
/* Initialize the structure for PL330 configuration, that can be used @@ -2120,20 +2112,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) struct pl330_dmac *pl330 = pch->dmac; unsigned long flags;
- spin_lock_irqsave(&pch->lock, flags); + spin_lock_irqsave(&pl330->lock, flags);
dma_cookie_init(chan); pch->cyclic = false;
pch->thread = pl330_request_channel(pl330); if (!pch->thread) { - spin_unlock_irqrestore(&pch->lock, flags); + spin_unlock_irqrestore(&pl330->lock, flags); return -ENOMEM; }
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
- spin_unlock_irqrestore(&pch->lock, flags); + spin_unlock_irqrestore(&pl330->lock, flags);
return 1; } @@ -2236,12 +2228,13 @@ static int pl330_pause(struct dma_chan *chan) static void pl330_free_chan_resources(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); + struct pl330_dmac *pl330 = pch->dmac; unsigned long flags;
tasklet_kill(&pch->task);
pm_runtime_get_sync(pch->dmac->ddma.dev); - spin_lock_irqsave(&pch->lock, flags); + spin_lock_irqsave(&pl330->lock, flags);
pl330_release_channel(pch->thread); pch->thread = NULL; @@ -2249,7 +2242,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan) if (pch->cyclic) list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
- spin_unlock_irqrestore(&pch->lock, flags); + spin_unlock_irqrestore(&pl330->lock, flags); pm_runtime_mark_last_busy(pch->dmac->ddma.dev); pm_runtime_put_autosuspend(pch->dmac->ddma.dev); }
From: Mike Looijmans mike.looijmans@topic.nl
[ Upstream commit 0e1929dedea36781e25902118c93edd8d8f09af1 ]
The cadence I2C driver calls cdns_i2c_writereg(..) to setup a workaround in the controller, but did so after calling i2c_add_adapter() which starts probing devices on the bus. Change the order so that the configuration is completely finished before using the adapter.
Signed-off-by: Mike Looijmans mike.looijmans@topic.nl Signed-off-by: Wolfram Sang wsa@the-dreams.de Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/i2c/busses/i2c-cadence.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 686971263bef..45d6771fac8c 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev) goto err_clk_dis; }
- ret = i2c_add_adapter(&id->adap); - if (ret < 0) - goto err_clk_dis; - /* * Cadence I2C controller has a bug wherein it generates * invalid read transaction after HW timeout in master receiver mode. @@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev) */ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+ ret = i2c_add_adapter(&id->adap); + if (ret < 0) + goto err_clk_dis; + dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
From: Andreas Schultz aschultz@tpip.net
[ Upstream commit c6ce1d08eede4c2968ed08aafa3165e8e183c5a1 ]
3GPP TS 29.281 and 3GPP TS 29.060 imply that GTP-U packets should be sent with the DF bit cleared. For example 3GPP TS 29.060, Release 8, Section 13.2.2:
Backbone router: Any router in the backbone may fragment the GTP packet if needed, according to IPv4.
Signed-off-by: Andreas Schultz aschultz@tpip.net Acked-by: Harald Welte laforge@netfilter.org Acked-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/gtp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index cebde074d196..5b67f8a8b79a 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -612,7 +612,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) pktinfo.fl4.saddr, pktinfo.fl4.daddr, pktinfo.iph->tos, ip4_dst_hoplimit(&pktinfo.rt->dst), - htons(IP_DF), + 0, pktinfo.gtph_port, pktinfo.gtph_port, true, false); break;
From: Jason Baron jbaron@akamai.com
[ Upstream commit 56d806222ace4c3aeae516cd7a855340fb2839d8 ]
sock_reset_flag() maps to __clear_bit() not the atomic version clear_bit(). Thus, we need smp_mb(), smp_mb__after_atomic() is not sufficient.
Fixes: 3c7151275c0c ("tcp: add memory barriers to write space paths") Cc: Eric Dumazet eric.dumazet@gmail.com Cc: Oleg Nesterov oleg@redhat.com Signed-off-by: Jason Baron jbaron@akamai.com Acked-by: Eric Dumazet edumazet@google.com Reported-by: Oleg Nesterov oleg@redhat.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- net/ipv4/tcp_input.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8fcd0c642742..05255a286888 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5081,7 +5081,7 @@ static void tcp_check_space(struct sock *sk) if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); /* pairs with tcp_poll() */ - smp_mb__after_atomic(); + smp_mb(); if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) tcp_new_space(sk);
From: Sean Nyekjaer sean.nyekjaer@prevas.dk
[ Upstream commit cf626c3b252b2c9d131be0dd66096ec3bf729e54 ]
As pr commit "net: phy: phy drivers should not set SUPPORTED_[Asym_]Pause" this phy driver should not set these feature bits.
Signed-off-by: Sean Nyekjaer sean.nyekjaer@prevas.dk Fixes: 9d162ed69f51 ("net: phy: micrel: add support for KSZ8795") Reviewed-by: Florian Fainelli f.fainelli@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/phy/micrel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 222918828655..fbf5945ce00d 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1020,7 +1020,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8795, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8795", - .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg,
From: Ivan Vecera cera@cera.cz
[ Upstream commit 4993b39ab04b083ff6ee1147e7e7f120feb6bf7f ]
Recent commit 34393529163a ("be2net: fix MAC addr setting on privileged BE3 VFs") allows privileged BE3 VFs to set its MAC address during initialization. Although the initial MAC for such VFs is already programmed by parent PF the subsequent setting performed by VF is OK, but in certain cases (after fresh boot) this command in VF can fail.
The MAC should be initialized only when: 1) no MAC is programmed (always except BE3 VFs during first init) 2) programmed MAC is different from requested (e.g. MAC is set when interface is down). In this case the initial MAC programmed by PF needs to be deleted.
The adapter->dev_mac contains MAC address currently programmed in HW so it should be zeroed when the MAC is deleted from HW and should not be filled when MAC is set when interface is down in be_mac_addr_set() as no programming is performed in this case.
Example of failure without the fix (immediately after fresh boot):
# ip link set eth0 up <- eth0 is BE3 PF be2net 0000:01:00.0 eth0: Link is Up
# echo 1 > /sys/class/net/eth0/device/sriov_numvfs <- Create 1 VF ... be2net 0000:01:04.0: Emulex OneConnect(be3): VF port 0
# ip link set eth8 up <- eth8 is created privileged VF be2net 0000:01:04.0: opcode 59-1 failed:status 1-76 RTNETLINK answers: Input/output error
# echo 0 > /sys/class/net/eth0/device/sriov_numvfs <- Delete VF iommu: Removing device 0000:01:04.0 from group 33 ...
# echo 1 > /sys/class/net/eth0/device/sriov_numvfs <- Create it again iommu: Removing device 0000:01:04.0 from group 33 ...
# ip link set eth8 up be2net 0000:01:04.0 eth8: Link is Up
Initialization is now OK.
v2 - Corrected the comment and condition check suggested by Suresh & Harsha
Fixes: 34393529163a ("be2net: fix MAC addr setting on privileged BE3 VFs") Cc: Sathya Perla sathya.perla@broadcom.com Cc: Ajit Khaparde ajit.khaparde@broadcom.com Cc: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com Cc: Somnath Kotur somnath.kotur@broadcom.com Signed-off-by: Ivan Vecera cera@cera.cz Acked-by: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/emulex/benet/be_main.c | 33 ++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index fbb396b08495..1644896568c4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) status = -EPERM; goto err; } -done: + + /* Remember currently programmed MAC */ ether_addr_copy(adapter->dev_mac, addr->sa_data); +done: ether_addr_copy(netdev->dev_addr, addr->sa_data); dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0; @@ -3635,8 +3637,10 @@ static void be_disable_if_filters(struct be_adapter *adapter) { /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ if (!BEx_chip(adapter) || !be_virtfn(adapter) || - check_privilege(adapter, BE_PRIV_FILTMGMT)) + check_privilege(adapter, BE_PRIV_FILTMGMT)) { be_dev_mac_del(adapter, adapter->pmac_id[0]); + eth_zero_addr(adapter->dev_mac); + }
be_clear_uc_list(adapter); be_clear_mc_list(adapter); @@ -3790,12 +3794,27 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (status) return status;
- /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ - if (!BEx_chip(adapter) || !be_virtfn(adapter) || - check_privilege(adapter, BE_PRIV_FILTMGMT)) { + /* Normally this condition usually true as the ->dev_mac is zeroed. + * But on BE3 VFs the initial MAC is pre-programmed by PF and + * subsequent be_dev_mac_add() can fail (after fresh boot) + */ + if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) { + int old_pmac_id = -1; + + /* Remember old programmed MAC if any - can happen on BE3 VF */ + if (!is_zero_ether_addr(adapter->dev_mac)) + old_pmac_id = adapter->pmac_id[0]; + status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); if (status) return status; + + /* Delete the old programmed MAC as we successfully programmed + * a new MAC + */ + if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0]) + be_dev_mac_del(adapter, old_pmac_id); + ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); }
@@ -4569,6 +4588,10 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); + + /* Initial MAC for BE3 VFs is already programmed by PF */ + if (BEx_chip(adapter) && be_virtfn(adapter)) + memcpy(adapter->dev_mac, mac, ETH_ALEN); }
return 0;
From: Andreas Schultz aschultz@tpip.net
[ Upstream commit 3ab1b469e847ba425af3c5ad5068cc94b55b38d0 ]
The use of the passed through netlink src_net to check for a cross netns operation was wrong. Using the GTP socket and the GTP netdevice is always correct (even if the netdev has been moved to new netns after link creation).
Remove the now obsolete net field from gtp_dev.
Signed-off-by: Andreas Schultz aschultz@tpip.net Acked-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/gtp.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 5b67f8a8b79a..cb206e5526c4 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -69,7 +69,6 @@ struct gtp_dev { struct socket *sock0; struct socket *sock1u;
- struct net *net; struct net_device *dev;
unsigned int hash_size; @@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
- xnet = !net_eq(gtp->net, dev_net(gtp->dev)); + xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
switch (udp_sk(sk)->encap_type) { case UDP_ENCAP_GTP0: @@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev) static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); static void gtp_hashtable_free(struct gtp_dev *gtp); static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, - int fd_gtp0, int fd_gtp1, struct net *src_net); + int fd_gtp0, int fd_gtp1);
static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) @@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, fd0 = nla_get_u32(data[IFLA_GTP_FD0]); fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
- err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net); + err = gtp_encap_enable(dev, gtp, fd0, fd1); if (err < 0) goto out_err;
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp) }
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, - int fd_gtp0, int fd_gtp1, struct net *src_net) + int fd_gtp0, int fd_gtp1) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct socket *sock0, *sock1u; @@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
gtp->sock0 = sock0; gtp->sock1u = sock1u; - gtp->net = src_net;
tuncfg.sk_user_data = gtp; tuncfg.encap_rcv = gtp_encap_recv;
From: Vincent vincent.stehle@laposte.net
[ Upstream commit c73e44269369e936165f0f9b61f1f09a11dae01c ]
This fixes the following smatch and coccinelle warnings:
drivers/net/ethernet/cavium/thunder/thunder_xcv.c:119 xcv_setup_link() error: we previously assumed 'xcv' could be null (see line 118) [smatch] drivers/net/ethernet/cavium/thunder/thunder_xcv.c:119:16-20: ERROR: xcv is NULL but dereferenced. [coccinelle]
Fixes: 6465859aba1e66a5 ("net: thunderx: Add RGMII interface type support") Signed-off-by: Vincent Stehlé vincent.stehle@laposte.net Cc: Sunil Goutham sgoutham@cavium.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/cavium/thunder/thunder_xcv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c index 67befedef709..578c7f8f11bf 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c @@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed) int speed = 2;
if (!xcv) { - dev_err(&xcv->pdev->dev, - "XCV init not done, probe may have failed\n"); + pr_err("XCV init not done, probe may have failed\n"); return; }
From: Jan Kara jack@suse.cz
[ Upstream commit 0911d0041c22922228ca52a977d7b0b0159fee4b ]
Some ->page_mkwrite handlers may return VM_FAULT_RETRY as its return code (GFS2 or Lustre can definitely do this). However VM_FAULT_RETRY from ->page_mkwrite is completely unhandled by the mm code and results in locking and writeably mapping the page which definitely is not what the caller wanted.
Fix Lustre and block_page_mkwrite_ret() used by other filesystems (notably GFS2) to return VM_FAULT_NOPAGE instead which results in bailing out from the fault code, the CPU then retries the access, and we fault again effectively doing what the handler wanted.
Link: http://lkml.kernel.org/r/20170203150729.15863-1-jack@suse.cz Signed-off-by: Jan Kara jack@suse.cz Reported-by: Al Viro viro@ZenIV.linux.org.uk Reviewed-by: Jinshan Xiong jinshan.xiong@intel.com Cc: Matthew Wilcox willy@infradead.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/staging/lustre/lustre/llite/llite_mmap.c | 4 +--- include/linux/buffer_head.h | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index 436691814a5e..27333d973bcd 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c @@ -401,15 +401,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) result = VM_FAULT_LOCKED; break; case -ENODATA: + case -EAGAIN: case -EFAULT: result = VM_FAULT_NOPAGE; break; case -ENOMEM: result = VM_FAULT_OOM; break; - case -EAGAIN: - result = VM_FAULT_RETRY; - break; default: result = VM_FAULT_SIGBUS; break; diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 447a915db25d..4431ea2c8802 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -239,12 +239,10 @@ static inline int block_page_mkwrite_return(int err) { if (err == 0) return VM_FAULT_LOCKED; - if (err == -EFAULT) + if (err == -EFAULT || err == -EAGAIN) return VM_FAULT_NOPAGE; if (err == -ENOMEM) return VM_FAULT_OOM; - if (err == -EAGAIN) - return VM_FAULT_RETRY; /* -ENOSPC, -EDQUOT, -EIO ... */ return VM_FAULT_SIGBUS; }
From: Alexey Kardashevskiy aik@ozlabs.ru
[ Upstream commit 2da64d20a0b20046d688e44f4033efd09157e29d ]
Commit d9c728949ddc ("vfio/spapr: Postpone default window creation") added an additional exit to the VFIO_IOMMU_SPAPR_TCE_CREATE case and made it possible to return from tce_iommu_ioctl() without unlocking container->lock; this fixes the issue.
Fixes: d9c728949ddc ("vfio/spapr: Postpone default window creation") Signed-off-by: Alexey Kardashevskiy aik@ozlabs.ru Reviewed-by: David Gibson david@gibson.dropbear.id.au Signed-off-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/vfio/vfio_iommu_spapr_tce.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 85d3e648bdea..59b3f62a2d64 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data, mutex_lock(&container->lock);
ret = tce_iommu_create_default_window(container); - if (ret) - return ret; - - ret = tce_iommu_create_window(container, create.page_shift, - create.window_size, create.levels, - &create.start_addr); + if (!ret) + ret = tce_iommu_create_window(container, + create.page_shift, + create.window_size, create.levels, + &create.start_addr);
mutex_unlock(&container->lock);
From: Hans Verkuil hverkuil@xs4all.nl
[ Upstream commit 42980da2eb7eb9695d8efc0c0ef145cbbb993b2c ]
Poll messages that are used to allocate a logical address should use the same initiator as the destination. Instead, it expected that the initiator was 0xf which is not according to the standard.
This also had consequences for the message checks in cec_transmit_msg_fh that incorrectly rejected poll messages with the same initiator and destination.
Signed-off-by: Hans Verkuil hans.verkuil@cisco.com Signed-off-by: Mauro Carvalho Chehab mchehab@s-opensource.com Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/staging/media/cec/cec-adap.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c index 057c9b5ab1e5..f6c9548d1a7f 100644 --- a/drivers/staging/media/cec/cec-adap.c +++ b/drivers/staging/media/cec/cec-adap.c @@ -608,8 +608,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, } memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len); if (msg->len == 1) { - if (cec_msg_initiator(msg) != 0xf || - cec_msg_destination(msg) == 0xf) { + if (cec_msg_destination(msg) == 0xf) { dprintk(1, "cec_transmit_msg: invalid poll message\n"); return -EINVAL; } @@ -634,7 +633,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, dprintk(1, "cec_transmit_msg: destination is the adapter itself\n"); return -EINVAL; } - if (cec_msg_initiator(msg) != 0xf && + if (msg->len > 1 && adap->is_configured && !cec_has_log_addr(adap, cec_msg_initiator(msg))) { dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n", cec_msg_initiator(msg)); @@ -883,7 +882,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
/* Send poll message */ msg.len = 1; - msg.msg[0] = 0xf0 | log_addr; + msg.msg[0] = (log_addr << 4) | log_addr; err = cec_transmit_msg_fh(adap, &msg, NULL, true);
/*
From: Ross Lagerwall ross.lagerwall@citrix.com
[ Upstream commit e2e004acc7cbe3c531e752a270a74e95cde3ea48 ]
This fixes a crash when running out of grant refs when creating many queues across many netdevs.
* If creating queues fails (i.e. there are no grant refs available), call xenbus_dev_fatal() to ensure that the xenbus device is set to the closed state. * If no queues are created, don't call xennet_disconnect_backend as netdev->real_num_tx_queues will not have been set correctly. * If setup_netfront() fails, ensure that all the queues created are cleaned up, not just those that have been set up. * If any queues were set up and an error occurs, call xennet_destroy_queues() to clean up the napi context. * If any fatal error occurs, unregister and destroy the netdev to avoid leaving around a half setup network device.
Signed-off-by: Ross Lagerwall ross.lagerwall@citrix.com Reviewed-by: Boris Ostrovsky boris.ostrovsky@oracle.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/xen-netfront.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index cd442e46afb4..265e2095f8d9 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1854,27 +1854,19 @@ static int talk_to_netback(struct xenbus_device *dev, xennet_destroy_queues(info);
err = xennet_create_queues(info, &num_queues); - if (err < 0) - goto destroy_ring; + if (err < 0) { + xenbus_dev_fatal(dev, err, "creating queues"); + kfree(info->queues); + info->queues = NULL; + goto out; + }
/* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = setup_netfront(dev, queue, feature_split_evtchn); - if (err) { - /* setup_netfront() will tidy up the current - * queue on error, but we need to clean up - * those already allocated. - */ - if (i > 0) { - rtnl_lock(); - netif_set_real_num_tx_queues(info->netdev, i); - rtnl_unlock(); - goto destroy_ring; - } else { - goto out; - } - } + if (err) + goto destroy_ring; }
again: @@ -1964,9 +1956,10 @@ static int talk_to_netback(struct xenbus_device *dev, xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); - kfree(info->queues); - info->queues = NULL; + xennet_destroy_queues(info); out: + unregister_netdev(info->netdev); + xennet_free_netdev(info->netdev); return err; }
From: Mart van Santen mart@greenhost.nl
[ Upstream commit ebf692f85ff78092cd238166d8d7ec51419f9c02 ]
This patch fixes an issue where the type of counters in the queue(s) and interface are not in sync (queue counters are int, interface counters are long), causing incorrect reporting of tx/rx values of the vif interface and unclear counter overflows. This patch sets both counters to the u64 type.
Signed-off-by: Mart van Santen mart@greenhost.nl Reviewed-by: Paul Durrant paul.durrant@citrix.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/xen-netback/common.h | 8 ++++---- drivers/net/xen-netback/interface.c | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index cb7365bdf6e0..5b1d2e8402d9 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -113,10 +113,10 @@ struct xenvif_stats { * A subset of struct net_device_stats that contains only the * fields that are updated in netback.c for each queue. */ - unsigned int rx_bytes; - unsigned int rx_packets; - unsigned int tx_bytes; - unsigned int tx_packets; + u64 rx_bytes; + u64 rx_packets; + u64 tx_bytes; + u64 tx_packets;
/* Additional stats used by xenvif */ unsigned long rx_gso_checksum_fixup; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 5bfaf5578810..618013e7f87b 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -225,10 +225,10 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; - unsigned long rx_bytes = 0; - unsigned long rx_packets = 0; - unsigned long tx_bytes = 0; - unsigned long tx_packets = 0; + u64 rx_bytes = 0; + u64 rx_packets = 0; + u64 tx_bytes = 0; + u64 tx_packets = 0; unsigned int index;
spin_lock(&vif->lock);
From: Rui Sousa rui.sousa@nxp.com
[ Upstream commit 01f8902bcf3ff124d0aeb88a774180ebcec20ace ]
Fix hardware setup of multicast address hash: - Never clear the hardware hash (to avoid packet loss) - Construct the hash register values in software and then write once to hardware
Signed-off-by: Rui Sousa rui.sousa@nxp.com Signed-off-by: Fugang Duan fugang.duan@nxp.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin alexander.levin@verizon.com --- drivers/net/ethernet/freescale/fec_main.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 12aef1b15356..849b8712ec81 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2923,6 +2923,7 @@ static void set_multicast_list(struct net_device *ndev) struct netdev_hw_addr *ha; unsigned int i, bit, data, crc, tmp; unsigned char hash; + unsigned int hash_high = 0, hash_low = 0;
if (ndev->flags & IFF_PROMISC) { tmp = readl(fep->hwp + FEC_R_CNTRL); @@ -2945,11 +2946,7 @@ static void set_multicast_list(struct net_device *ndev) return; }
- /* Clear filter and add the addresses in hash register - */ - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - + /* Add the addresses in hash register */ netdev_for_each_mc_addr(ha, ndev) { /* calculate crc32 value of mac address */ crc = 0xffffffff; @@ -2967,16 +2964,14 @@ static void set_multicast_list(struct net_device *ndev) */ hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
- if (hash > 31) { - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - tmp |= 1 << (hash - 32); - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - } else { - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); - tmp |= 1 << hash; - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - } + if (hash > 31) + hash_high |= 1 << (hash - 32); + else + hash_low |= 1 << hash; } + + writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); }
/* Set a MAC change in hardware. */
linux-stable-mirror@lists.linaro.org