diff --git a/Makefile b/Makefile index 1c565572bfb2..7cd8862d854e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 4 -SUBLEVEL = 136 +SUBLEVEL = 137 EXTRAVERSION = NAME = Kleptomaniac Octopus
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 37bd41ff8dff..151c0220047d 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -195,16 +195,15 @@ #size-cells = <1>; ranges;
- vic: intc@10140000 { + vic: interrupt-controller@10140000 { compatible = "arm,versatile-vic"; interrupt-controller; #interrupt-cells = <1>; reg = <0x10140000 0x1000>; - clear-mask = <0xffffffff>; valid-mask = <0xffffffff>; };
- sic: intc@10003000 { + sic: interrupt-controller@10003000 { compatible = "arm,versatile-sic"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 06a0fdf24026..e7e751a858d8 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -7,7 +7,7 @@
amba { /* The Versatile PB is using more SIC IRQ lines than the AB */ - sic: intc@10003000 { + sic: interrupt-controller@10003000 { clear-mask = <0xffffffff>; /* * Valid interrupt lines mask according to diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 377157656a8b..5d35b9656b67 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -475,8 +475,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { queue: - if (has_error && !is_protmode(vcpu)) - has_error = false; if (reinject) { /* * On vmentry, vcpu->arch.exception.pending is only @@ -7592,6 +7590,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); }
+static void kvm_inject_exception(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) + vcpu->arch.exception.error_code = false; + kvm_x86_ops->queue_exception(vcpu); +} + static int inject_pending_event(struct kvm_vcpu *vcpu) { int r; @@ -7599,7 +7604,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu) /* try to reinject previous events if any */
if (vcpu->arch.exception.injected) - kvm_x86_ops->queue_exception(vcpu); + kvm_inject_exception(vcpu); /* * Do not inject an NMI or interrupt if there is a pending * exception. Exceptions and interrupts are recognized at @@ -7665,7 +7670,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu) } }
- kvm_x86_ops->queue_exception(vcpu); + kvm_inject_exception(vcpu); }
/* Don't consider new event if we re-injected an event */ diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 7b6903bad408..48e6e2b48924 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -54,7 +54,6 @@ enum scmi_error_codes { SCMI_ERR_GENERIC = -8, /* Generic Error */ SCMI_ERR_HARDWARE = -9, /* Hardware Error */ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ - SCMI_ERR_MAX };
/* List of all SCMI devices active in system */ @@ -176,8 +175,10 @@ static const int scmi_linux_errmap[] = {
static inline int scmi_to_linux_errno(int errno) { - if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) - return scmi_linux_errmap[-errno]; + int err_idx = -errno; + + if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) + return scmi_linux_errmap[err_idx]; return -EIO; }
@@ -693,8 +694,9 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo) struct scmi_xfers_info *info = &sinfo->tx_minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */ - if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { - dev_err(dev, "Maximum message of %d exceeds supported %ld\n", + if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) { + dev_err(dev, + "Invalid maximum messages %d, not in range [1 - %lu]\n", desc->max_msg, MSG_TOKEN_MAX); return -EINVAL; } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index bf6b4f71dc58..defee1d208d2 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -498,8 +498,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, p = buf; while (bytes_left >= sizeof(*p)) { info->speed = le64_to_cpu(p->LinkSpeed); - info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE); - info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE); + info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; + info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count); cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed); diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index 4af318fbda77..ef9498a6e88a 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c @@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) fd->key = ptr + tree->max_key_len + 2; hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); - mutex_lock(&tree->tree_lock); + switch (tree->cnid) { + case HFS_CAT_CNID: + mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); + break; + case HFS_EXT_CNID: + mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); + break; + case HFS_ATTR_CNID: + mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); + break; + default: + return -EINVAL; + } return 0; }
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index b63a4df7327b..c0a73a6ffb28 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -15,16 +15,31 @@
#include "btree.h"
-void hfs_bnode_read(struct hfs_bnode *node, void *buf, - int off, int len) +void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; + int pagenum; + int bytes_read; + int bytes_to_read; + void *vaddr;
off += node->page_offset; - page = node->page[0]; + pagenum = off >> PAGE_SHIFT; + off &= ~PAGE_MASK; /* compute page offset for the first page */
- memcpy(buf, kmap(page) + off, len); - kunmap(page); + for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) { + if (pagenum >= node->tree->pages_per_bnode) + break; + page = node->page[pagenum]; + bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); + + vaddr = kmap_atomic(page); + memcpy(buf + bytes_read, vaddr + off, bytes_to_read); + kunmap_atomic(vaddr); + + pagenum++; + off = 0; /* page offset only applies to the first page */ + } }
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h index dcc2aab1b2c4..25ac9a8bb57a 100644 --- a/fs/hfs/btree.h +++ b/fs/hfs/btree.h @@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
#define NODE_HASH_SIZE 256
+/* B-tree mutex nested subclasses */ +enum hfs_btree_mutex_classes { + CATALOG_BTREE_MUTEX, + EXTENTS_BTREE_MUTEX, + ATTR_BTREE_MUTEX, +}; + /* A HFS BTree held in memory */ struct hfs_btree { struct super_block *sb; diff --git a/fs/hfs/super.c b/fs/hfs/super.c index c33324686d89..bcf820ce0e02 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -421,14 +421,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) if (!res) { if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { res = -EIO; - goto bail; + goto bail_hfs_find; } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); } - if (res) { - hfs_find_exit(&fd); - goto bail_no_root; - } + if (res) + goto bail_hfs_find; res = -EINVAL; root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); hfs_find_exit(&fd); @@ -444,6 +442,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) /* everything's okay */ return 0;
+bail_hfs_find: + hfs_find_exit(&fd); bail_no_root: pr_err("get root inode failed\n"); bail: diff --git a/fs/internal.h b/fs/internal.h index 7651e8b8ef13..61aed95f83d1 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -52,7 +52,6 @@ extern void __init chrdev_init(void); */ extern const struct fs_context_operations legacy_fs_context_ops; extern int parse_monolithic_mount_data(struct fs_context *, void *); -extern void fc_drop_locked(struct fs_context *); extern void vfs_clean_context(struct fs_context *fc); extern int finish_clean_context(struct fs_context *fc);
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index c04bad4b2b43..10c4c1e80124 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -140,23 +140,20 @@ loff_t iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); - loff_t length = size - offset; loff_t ret;
/* Nothing to be found before or beyond the end of the file. */ if (offset < 0 || offset >= size) return -ENXIO;
- while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, - &offset, iomap_seek_hole_actor); + while (offset < size) { + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT, + ops, &offset, iomap_seek_hole_actor); if (ret < 0) return ret; if (ret == 0) break; - offset += ret; - length -= ret; }
return offset; @@ -186,27 +183,23 @@ loff_t iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); - loff_t length = size - offset; loff_t ret;
/* Nothing to be found before or beyond the end of the file. */ if (offset < 0 || offset >= size) return -ENXIO;
- while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, - &offset, iomap_seek_data_actor); + while (offset < size) { + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT, + ops, &offset, iomap_seek_data_actor); if (ret < 0) return ret; if (ret == 0) - break; - + return offset; offset += ret; - length -= ret; }
- if (length <= 0) - return -ENXIO; - return offset; + /* We've reached the end of the file without finding data */ + return -ENXIO; } EXPORT_SYMBOL_GPL(iomap_seek_data); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index e5c14e2c53d3..ba8a58754340 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -134,6 +134,7 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); +extern void fc_drop_locked(struct fs_context *fc);
/* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 86e028388bad..9899b9af7f22 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -36,7 +36,7 @@ static inline bool net_busy_loop_on(void)
static inline bool sk_can_busy_loop(const struct sock *sk) { - return sk->sk_ll_usec && !signal_pending(current); + return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); }
bool sk_busy_loop_end(void *p, unsigned long start_time); diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 06e1deeef464..8c6b04f9f6cb 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -328,8 +328,7 @@ enum { #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>, - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, - * 192.88.99.0/24. + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24. * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * addresses. */ @@ -337,7 +336,6 @@ enum { ((htonl(INADDR_BROADCAST) == a) || \ ipv4_is_multicast(a) || \ ipv4_is_zeronet(a) || \ - ipv4_is_test_198(a) || \ ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */ diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 9329f725d22b..2d0ef613ca07 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -1228,9 +1228,7 @@ int cgroup1_get_tree(struct fs_context *fc) ret = cgroup_do_get_tree(fc);
if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) { - struct super_block *sb = fc->root->d_sb; - dput(fc->root); - deactivate_locked_super(sb); + fc_drop_locked(fc); ret = 1; }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8f41499d8257..6aeb53b4e19f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3660,15 +3660,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work) unbound_release_work); struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; - bool is_last; + bool is_last = false;
- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) - return; + /* + * when @pwq is not linked, it doesn't hold any reference to the + * @wq, and @wq is invalid to access. + */ + if (!list_empty(&pwq->pwqs_node)) { + if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) + return;
- mutex_lock(&wq->mutex); - list_del_rcu(&pwq->pwqs_node); - is_last = list_empty(&wq->pwqs); - mutex_unlock(&wq->mutex); + mutex_lock(&wq->mutex); + list_del_rcu(&pwq->pwqs_node); + is_last = list_empty(&wq->pwqs); + mutex_unlock(&wq->mutex); + }
mutex_lock(&wq_pool_mutex); put_unbound_pool(pool); diff --git a/net/802/garp.c b/net/802/garp.c index 400bd857e5f5..f6012f8e59f0 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr kfree(attr); }
+static void garp_attr_destroy_all(struct garp_applicant *app) +{ + struct rb_node *node, *next; + struct garp_attr *attr; + + for (node = rb_first(&app->gid); + next = node ? rb_next(node) : NULL, node != NULL; + node = next) { + attr = rb_entry(node, struct garp_attr, node); + garp_attr_destroy(app, attr); + } +} + static int garp_pdu_init(struct garp_applicant *app) { struct sk_buff *skb; @@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
spin_lock_bh(&app->lock); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); + garp_attr_destroy_all(app); garp_pdu_queue(app); spin_unlock_bh(&app->lock);
diff --git a/net/802/mrp.c b/net/802/mrp.c index 2cfdfbfbb2ed..5b804dbe2d08 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr) kfree(attr); }
+static void mrp_attr_destroy_all(struct mrp_applicant *app) +{ + struct rb_node *node, *next; + struct mrp_attr *attr; + + for (node = rb_first(&app->mad); + next = node ? rb_next(node) : NULL, node != NULL; + node = next) { + attr = rb_entry(node, struct mrp_attr, node); + mrp_attr_destroy(app, attr); + } +} + static int mrp_pdu_init(struct mrp_applicant *app) { struct sk_buff *skb; @@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
spin_lock_bh(&app->lock); mrp_mad_event(app, MRP_EVENT_TX); + mrp_attr_destroy_all(app); mrp_pdu_queue(app); spin_unlock_bh(&app->lock);
diff --git a/net/core/sock.c b/net/core/sock.c index 68f84fac63e0..452883b28aba 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1098,7 +1098,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, if (val < 0) ret = -EINVAL; else - sk->sk_ll_usec = val; + WRITE_ONCE(sk->sk_ll_usec, val); } break; #endif diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 33444d985681..fc913f09606d 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -59,10 +59,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; + unsigned int hh_len = LL_RESERVED_SPACE(dev); + int delta = hh_len - skb_headroom(skb); const struct in6_addr *nexthop; struct neighbour *neigh; int ret;
+ /* Be paranoid, rather than too clever. */ + if (unlikely(delta > 0) && dev->header_ops) { + /* pskb_expand_head() might crash, if skb is shared */ + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + + if (likely(nskb)) { + if (skb->sk) + skb_set_owner_w(nskb, skb->sk); + consume_skb(skb); + } else { + kfree_skb(skb); + } + skb = nskb; + } + if (skb && + pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { + kfree_skb(skb); + skb = NULL; + } + if (!skb) { + IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); + return -ENOMEM; + } + } + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 7f8702abc7bf..bb370a7948f4 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -397,7 +397,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr) retval = SCTP_SCOPE_LINK; } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || ipv4_is_private_172(addr->v4.sin_addr.s_addr) || - ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { + ipv4_is_private_192(addr->v4.sin_addr.s_addr) || + ipv4_is_test_198(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_PRIVATE; } else { retval = SCTP_SCOPE_GLOBAL; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 9f96826eb3ba..52ee3a9bb709 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1512,6 +1512,53 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) return err; }
+static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + scm->fp = scm_fp_dup(UNIXCB(skb).fp); + + /* + * Garbage collection of unix sockets starts by selecting a set of + * candidate sockets which have reference only from being in flight + * (total_refs == inflight_refs). This condition is checked once during + * the candidate collection phase, and candidates are marked as such, so + * that non-candidates can later be ignored. While inflight_refs is + * protected by unix_gc_lock, total_refs (file count) is not, hence this + * is an instantaneous decision. + * + * Once a candidate, however, the socket must not be reinstalled into a + * file descriptor while the garbage collection is in progress. + * + * If the above conditions are met, then the directed graph of + * candidates (*) does not change while unix_gc_lock is held. + * + * Any operations that changes the file count through file descriptors + * (dup, close, sendmsg) does not change the graph since candidates are + * not installed in fds. + * + * Dequeing a candidate via recvmsg would install it into an fd, but + * that takes unix_gc_lock to decrement the inflight count, so it's + * serialized with garbage collection. + * + * MSG_PEEK is special in that it does not change the inflight count, + * yet does install the socket into an fd. The following lock/unlock + * pair is to ensure serialization with garbage collection. It must be + * done between incrementing the file count and installing the file into + * an fd. + * + * If garbage collection starts after the barrier provided by the + * lock/unlock, then it will see the elevated refcount and not mark this + * as a candidate. If a garbage collection is already in progress + * before the file count was incremented, then the lock/unlock pair will + * ensure that garbage collection is finished before progressing to + * installing the fd. + * + * (*) A -> B where B is on the queue of A or B is on the queue of C + * which is on the queue of listening socket A. + */ + spin_lock(&unix_gc_lock); + spin_unlock(&unix_gc_lock); +} + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) { int err = 0; @@ -2137,7 +2184,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp) - scm.fp = scm_fp_dup(UNIXCB(skb).fp); + unix_peek_fds(&scm, skb); } err = (flags & MSG_TRUNC) ? skb->len - skip : size;
@@ -2378,7 +2425,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state, /* It is questionable, see note in unix_dgram_recvmsg. */ if (UNIXCB(skb).fp) - scm.fp = scm_fp_dup(UNIXCB(skb).fp); + unix_peek_fds(&scm, skb);
sk_peek_offset_fwd(sk, chunk);
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 812fc97bb1a9..add0ef37ba96 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -39,8 +39,6 @@ EXTRA_WARNINGS += -Wundef EXTRA_WARNINGS += -Wwrite-strings EXTRA_WARNINGS += -Wformat
-CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?) - # Makefiles suck: This macro sets a default value of $(2) for the # variable named by $(1), unless the variable has been set by # environment or command line. This is necessary for CC and AR @@ -52,12 +50,22 @@ define allow-override $(eval $(1) = $(2))) endef
+ifneq ($(LLVM),) +$(call allow-override,CC,clang) +$(call allow-override,AR,llvm-ar) +$(call allow-override,LD,ld.lld) +$(call allow-override,CXX,clang++) +$(call allow-override,STRIP,llvm-strip) +else # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix. $(call allow-override,CC,$(CROSS_COMPILE)gcc) $(call allow-override,AR,$(CROSS_COMPILE)ar) $(call allow-override,LD,$(CROSS_COMPILE)ld) $(call allow-override,CXX,$(CROSS_COMPILE)g++) $(call allow-override,STRIP,$(CROSS_COMPILE)strip) +endif + +CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
ifneq ($(LLVM),) HOSTAR ?= llvm-ar diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 17ac167823a6..9ba7feffe344 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -141,7 +141,7 @@ static void anon_allocate_area(void **alloc_area) { *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - if (*alloc_area == MAP_FAILED) + if (*alloc_area == MAP_FAILED) { fprintf(stderr, "mmap of anonymous memory failed"); *alloc_area = NULL; }