From: Tetsuo Handa penguin-kernel@I-love.SAKURA.ne.jp
[ Upstream commit 9f2df09a33aa2c76ce6385d382693f98d7f2f07e ]
syzbot is reporting too large memory allocation at bfs_fill_super() [1]. Since file system image is corrupted such that bfs_sb->s_start == 0, bfs_fill_super() is trying to allocate 8MB of continuous memory. Fix this by adding a sanity check on bfs_sb->s_start, __GFP_NOWARN and printf().
[1] https://syzkaller.appspot.com/bug?id=16a87c236b951351374a84c8a32f40edbc034e9...
Link: http://lkml.kernel.org/r/1525862104-3407-1-git-send-email-penguin-kernel@I-l... Signed-off-by: Tetsuo Handa penguin-kernel@I-love.SAKURA.ne.jp Reported-by: syzbot syzbot+71c6b5d68e91149fc8a4@syzkaller.appspotmail.com Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Tigran Aivazian aivazian.tigran@gmail.com Cc: Matthew Wilcox willy@infradead.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/bfs/inode.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 9a69392f1fb3..d81c148682e7 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
s->s_magic = BFS_MAGIC;
- if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) { + if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) || + le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) { printf("Superblock is corrupted\n"); goto out1; } @@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) sizeof(struct bfs_inode) + BFS_ROOT_INO - 1; imap_len = (info->si_lasti / 8) + 1; - info->si_imap = kzalloc(imap_len, GFP_KERNEL); - if (!info->si_imap) + info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN); + if (!info->si_imap) { + printf("Cannot allocate %u bytes\n", imap_len); goto out1; + } for (i = 0; i < BFS_ROOT_INO; i++) set_bit(i, info->si_imap);
From: Colin Ian King colin.king@canonical.com
[ Upstream commit 8c6c9bed8773375b1d54ccca2911ec892c59db5d ]
There is a null check on dst_file->private data which suggests it can be potentially null. However, before this check, pointer smb_file_target is derived from dst_file->private and dereferenced in the call to tlink_tcon, hence there is a potential null pointer deference.
Fix this by assigning smb_file_target and target_tcon after the null pointer sanity checks.
Detected by CoverityScan, CID#1475302 ("Dereference before null check")
Fixes: 04b38d601239 ("vfs: pull btrfs clone API to vfs layer") Signed-off-by: Colin Ian King colin.king@canonical.com Signed-off-by: Steve French stfrench@microsoft.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/cifs/cifsfs.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 7065426b3280..fb32f3d6925e 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -981,8 +981,8 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off, struct inode *src_inode = file_inode(src_file); struct inode *target_inode = file_inode(dst_file); struct cifsFileInfo *smb_file_src = src_file->private_data; - struct cifsFileInfo *smb_file_target = dst_file->private_data; - struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink); + struct cifsFileInfo *smb_file_target; + struct cifs_tcon *target_tcon; unsigned int xid; int rc;
@@ -996,6 +996,9 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off, goto out; }
+ smb_file_target = dst_file->private_data; + target_tcon = tlink_tcon(smb_file_target->tlink); + /* * Note: cifs case is easier than btrfs since server responsible for * checks for proper open modes and file type and if it wants
From: Ronnie Sahlberg lsahlber@redhat.com
[ Upstream commit 0c5d6cb6643f48ad3775322f3ebab6c7eb67484e ]
If the application buffer was too small to fit all the names we would still count the number of bytes and return this for listxattr. This would then trigger a BUG in usercopy.c
Fix the computation of the size so that we return -ERANGE correctly when the buffer is too small.
This fixes the kernel BUG for xfstest generic/377
Signed-off-by: Ronnie Sahlberg lsahlber@redhat.com Signed-off-by: Steve French stfrench@microsoft.com Reviewed-by: Aurelien Aptel aaptel@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/cifs/smb2ops.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 89985a0a6819..812da3e56a22 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -686,6 +686,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, int rc = 0; unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0; char *name, *value; + size_t buf_size = dst_size; size_t name_len, value_len, user_name_len;
while (src_size > 0) { @@ -721,9 +722,10 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, /* 'user.' plus a terminating null */ user_name_len = 5 + 1 + name_len;
- rc += user_name_len; - - if (dst_size >= user_name_len) { + if (buf_size == 0) { + /* skip copy - calc size only */ + rc += user_name_len; + } else if (dst_size >= user_name_len) { dst_size -= user_name_len; memcpy(dst, "user.", 5); dst += 5; @@ -731,8 +733,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, dst += name_len; *dst = 0; ++dst; - } else if (dst_size == 0) { - /* skip copy - calc size only */ + rc += user_name_len; } else { /* stop before overrun buffer */ rc = -ERANGE;
From: Anders Roxell anders.roxell@linaro.org
[ Upstream commit 966866892cf89d606544bca22d584ba2ef9ec208 ]
Commit 1404d6f13e47 ("arm64: dump: Add checking for writable and exectuable pages") has successfully identified code that leaves a page with W+X permissions.
[ 3.245140] arm64/mm: Found insecure W+X mapping at address (____ptrval____)/0xffff000000d90000 [ 3.245771] WARNING: CPU: 0 PID: 1 at ../arch/arm64/mm/dump.c:232 note_page+0x410/0x420 [ 3.246141] Modules linked in: [ 3.246653] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.0-rc5-next-20180928-00001-ge70ae259b853-dirty #62 [ 3.247008] Hardware name: linux,dummy-virt (DT) [ 3.247347] pstate: 80000005 (Nzcv daif -PAN -UAO) [ 3.247623] pc : note_page+0x410/0x420 [ 3.247898] lr : note_page+0x410/0x420 [ 3.248071] sp : ffff00000804bcd0 [ 3.248254] x29: ffff00000804bcd0 x28: ffff000009274000 [ 3.248578] x27: ffff00000921a000 x26: ffff80007dfff000 [ 3.248845] x25: ffff0000093f5000 x24: ffff000009526f6a [ 3.249109] x23: 0000000000000004 x22: ffff000000d91000 [ 3.249396] x21: ffff000000d90000 x20: 0000000000000000 [ 3.249661] x19: ffff00000804bde8 x18: 0000000000000400 [ 3.249924] x17: 0000000000000000 x16: 0000000000000000 [ 3.250271] x15: ffffffffffffffff x14: 295f5f5f5f6c6176 [ 3.250594] x13: 7274705f5f5f5f28 x12: 2073736572646461 [ 3.250941] x11: 20746120676e6970 x10: 70616d20582b5720 [ 3.251252] x9 : 6572756365736e69 x8 : 3039643030303030 [ 3.251519] x7 : 306666666678302f x6 : ffff0000095467b2 [ 3.251802] x5 : 0000000000000000 x4 : 0000000000000000 [ 3.252060] x3 : 0000000000000000 x2 : ffffffffffffffff [ 3.252323] x1 : 4d151327adc50b00 x0 : 0000000000000000 [ 3.252664] Call trace: [ 3.252953] note_page+0x410/0x420 [ 3.253186] walk_pgd+0x12c/0x238 [ 3.253417] ptdump_check_wx+0x68/0xf8 [ 3.253637] mark_rodata_ro+0x68/0x98 [ 3.253847] kernel_init+0x38/0x160 [ 3.254103] ret_from_fork+0x10/0x18
kprobes allocates a writable executable page with module_alloc() in order to store executable code. Reworked to that when allocate a page it sets mode RO. Inspired by commit 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use text_poke()").
Suggested-by: Arnd Bergmann arnd@arndb.de Suggested-by: Ard Biesheuvel ard.biesheuvel@linaro.org Acked-by: Will Deacon will.deacon@arm.com Acked-by: Masami Hiramatsu mhiramat@kernel.org Reviewed-by: Laura Abbott labbott@redhat.com Signed-off-by: Anders Roxell anders.roxell@linaro.org [catalin.marinas@arm.com: removed unnecessary casts] Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm64/kernel/probes/kprobes.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index e78c3ef04d95..b5a367d4bba6 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -23,7 +23,9 @@ #include <linux/slab.h> #include <linux/stop_machine.h> #include <linux/sched/debug.h> +#include <linux/set_memory.h> #include <linux/stringify.h> +#include <linux/vmalloc.h> #include <asm/traps.h> #include <asm/ptrace.h> #include <asm/cacheflush.h> @@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +{ + void *addrs[1]; + u32 insns[1]; + + addrs[0] = addr; + insns[0] = opcode; + + return aarch64_insn_patch_text(addrs, insns, 1); +} + static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { /* prepare insn slot */ - p->ainsn.api.insn[0] = cpu_to_le32(p->opcode); + patch_text(p->ainsn.api.insn, p->opcode);
flush_icache_range((uintptr_t) (p->ainsn.api.insn), (uintptr_t) (p->ainsn.api.insn) + @@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; }
-static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +void *alloc_insn_page(void) { - void *addrs[1]; - u32 insns[1]; + void *page;
- addrs[0] = (void *)addr; - insns[0] = (u32)opcode; + page = vmalloc_exec(PAGE_SIZE); + if (page) + set_memory_ro((unsigned long)page, 1);
- return aarch64_insn_patch_text(addrs, insns, 1); + return page; }
/* arm kprobe: install breakpoint in text */
From: Keith Busch keith.busch@intel.com
[ Upstream commit 9fe5c59ff6a1e5e26a39b75489a1420e7eaaf0b1 ]
The nvme pci driver had been adding its CMB resource to the P2P DMA subsystem everytime on on a controller reset. This results in the following warning:
------------[ cut here ]------------ nvme 0000:00:03.0: Conflicting mapping in same section WARNING: CPU: 7 PID: 81 at kernel/memremap.c:155 devm_memremap_pages+0xa6/0x380 ... Call Trace: pci_p2pdma_add_resource+0x153/0x370 nvme_reset_work+0x28c/0x17b1 [nvme] ? add_timer+0x107/0x1e0 ? dequeue_entity+0x81/0x660 ? dequeue_entity+0x3b0/0x660 ? pick_next_task_fair+0xaf/0x610 ? __switch_to+0xbc/0x410 process_one_work+0x1cf/0x350 worker_thread+0x215/0x3d0 ? process_one_work+0x350/0x350 kthread+0x107/0x120 ? kthread_park+0x80/0x80 ret_from_fork+0x1f/0x30 ---[ end trace f7ea76ac6ee72727 ]--- nvme nvme0: failed to register the CMB
This patch fixes this by registering the CMB with P2P only once.
Signed-off-by: Keith Busch keith.busch@intel.com Reviewed-by: Logan Gunthorpe logang@deltatee.com Signed-off-by: Christoph Hellwig hch@lst.de Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/pci.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d668682f91df..da18e0ac9fa2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1647,6 +1647,9 @@ static void nvme_map_cmb(struct nvme_dev *dev) struct pci_dev *pdev = to_pci_dev(dev->dev); int bar;
+ if (dev->cmb_size) + return; + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!dev->cmbsz) return; @@ -2129,7 +2132,6 @@ static void nvme_pci_disable(struct nvme_dev *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev);
- nvme_release_cmb(dev); pci_free_irq_vectors(pdev);
if (pci_is_enabled(pdev)) { @@ -2577,6 +2579,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_stop_ctrl(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl); nvme_dev_disable(dev, true); + nvme_release_cmb(dev); nvme_free_host_mem(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0);
From: Ming Lei ming.lei@redhat.com
[ Upstream commit 153fcd5f6d93b8e1e4040b1337f564a10f8d93af ]
brd_free() may be called in failure path on one brd instance which disk isn't added yet, so release handler of gendisk may free the associated request_queue early and causes the following use-after-free[1].
This patch fixes this issue by associating gendisk with request_queue just before adding disk.
[1] KASAN: use-after-free Read in del_timer_syncNon-volatile memory driver v1.3 Linux agpgart interface v0.103 [drm] Initialized vgem 1.0.0 20120112 for virtual device on minor 0 usbcore: registered new interface driver udl ================================================================== BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218 Read of size 8 at addr ffff8801d1b6b540 by task swapper/0/1
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.0+ #88 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x244/0x39d lib/dump_stack.c:113 print_address_description.cold.7+0x9/0x1ff mm/kasan/report.c:256 kasan_report_error mm/kasan/report.c:354 [inline] kasan_report.cold.8+0x242/0x309 mm/kasan/report.c:412 __asan_report_load8_noabort+0x14/0x20 mm/kasan/report.c:433 __lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218 lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844 del_timer_sync+0xb7/0x270 kernel/time/timer.c:1283 blk_cleanup_queue+0x413/0x710 block/blk-core.c:809 brd_free+0x5d/0x71 drivers/block/brd.c:422 brd_init+0x2eb/0x393 drivers/block/brd.c:518 do_one_initcall+0x145/0x957 init/main.c:890 do_initcall_level init/main.c:958 [inline] do_initcalls init/main.c:966 [inline] do_basic_setup init/main.c:984 [inline] kernel_init_freeable+0x5c6/0x6b9 init/main.c:1148 kernel_init+0x11/0x1ae init/main.c:1068 ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:350
Reported-by: syzbot+3701447012fe951dabb2@syzkaller.appspotmail.com Signed-off-by: Ming Lei ming.lei@redhat.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/block/brd.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index df8103dd40ac..c18586fccb6f 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -396,15 +396,14 @@ static struct brd_device *brd_alloc(int i) disk->first_minor = i * max_part; disk->fops = &brd_fops; disk->private_data = brd; - disk->queue = brd->brd_queue; disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); - disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; + brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
/* Tell the block layer that this is not a rotational device */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
return brd;
@@ -436,6 +435,7 @@ static struct brd_device *brd_init_one(int i, bool *new)
brd = brd_alloc(i); if (brd) { + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); list_add_tail(&brd->brd_list, &brd_devices); } @@ -503,8 +503,14 @@ static int __init brd_init(void)
/* point of no return */
- list_for_each_entry(brd, &brd_devices, brd_list) + list_for_each_entry(brd, &brd_devices, brd_list) { + /* + * associate with queue just before adding disk for + * avoiding to mess up failure path + */ + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); + }
blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, THIS_MODULE, brd_probe, NULL, NULL);
From: Daniel Borkmann daniel@iogearbox.net
[ Upstream commit 0962590e553331db2cc0aef2dc35c57f6300dbbe ]
ALU operations on pointers such as scalar_reg += map_value_ptr are handled in adjust_ptr_min_max_vals(). Problem is however that map_ptr and range in the register state share a union, so transferring state through dst_reg->range = ptr_reg->range is just buggy as any new map_ptr in the dst_reg is then truncated (or null) for subsequent checks. Fix this by adding a raw member and use it for copying state over to dst_reg.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Daniel Borkmann daniel@iogearbox.net Cc: Edward Cree ecree@solarflare.com Acked-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/bpf_verifier.h | 3 +++ kernel/bpf/verifier.c | 10 ++++++---- 2 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 38b04f559ad3..1fd6fa822d2c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -50,6 +50,9 @@ struct bpf_reg_state { * PTR_TO_MAP_VALUE_OR_NULL */ struct bpf_map *map_ptr; + + /* Max size from any of the above. */ + unsigned long raw; }; /* Fixed part of pointer offset, pointer types only */ s32 off; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 465952a8e465..b046564cc18d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2762,7 +2762,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; - dst_reg->range = ptr_reg->range; + dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. Note that off_reg->off @@ -2792,10 +2792,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; + dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ - dst_reg->range = 0; + dst_reg->raw = 0; } break; case BPF_SUB: @@ -2824,7 +2825,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; - dst_reg->range = ptr_reg->range; + dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. If the subtrahend is known @@ -2850,11 +2851,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; + dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) - dst_reg->range = 0; + dst_reg->raw = 0; } break; case BPF_AND:
From: Huazhong Tan tanhuazhong@huawei.com
[ Upstream commit 29118ab962d5476fdc65fae312ac38db68092d78 ]
Since hclgevf_reset_wait() is used to wait for the hardware to complete the reset, it is not necessary to hold the rtnl_lock during hclgevf_reset_wait(). So this patch releases the lock for the duration of hclgevf_reset_wait().
Fixes: 6988eb2a9b77 ("net: hns3: Add support to reset the enet/ring mgmt layer") Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9c0091f2addf..57958273945c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1062,6 +1062,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) /* bring down the nic to stop any ongoing TX/RX */ hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock(); + /* check if VF could successfully fetch the hardware reset completion * status from the hardware */ @@ -1073,12 +1075,15 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) ret);
dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); + rtnl_lock(); hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
rtnl_unlock(); return ret; }
+ rtnl_lock(); + /* now, re-initialize the nic client and ae device*/ ret = hclgevf_reset_stack(hdev); if (ret)
From: Huazhong Tan tanhuazhong@huawei.com
[ Upstream commit a963052e539887df481d4d3a6ad4c92ca6461852 ]
Since hclge_reset_wait() is used to wait for the hardware to complete the reset, it is not necessary to hold the rtnl_lock during hclge_reset_wait(). So this patch releases the lock for the duration of hclge_reset_wait().
Fixes: 6d4fab39533f ("net: hns3: Reset net device with rtnl_lock") Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8577dfc799ad..18056c08f62a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2799,14 +2799,17 @@ static void hclge_reset(struct hclge_dev *hdev) handle = &hdev->vport[0].nic; rtnl_lock(); hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock();
if (!hclge_reset_wait(hdev)) { + rtnl_lock(); hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
hclge_clear_reset_cause(hdev); } else { + rtnl_lock(); /* schedule again to check pending resets later */ set_bit(hdev->reset_type, &hdev->reset_pending); hclge_reset_task_schedule(hdev);
From: Huazhong Tan tanhuazhong@huawei.com
[ Upstream commit 3c88ed1d798da355859ca083d3884a16ce0841f2 ]
In a multi-core machine, the mailbox service and reset service will be executed at the same time. The reset service will re-initialize the command queue, before that, the mailbox handler can only get some invalid messages.
The HCLGE_STATE_CMD_DISABLE flag means that the command queue is not available and needs to be reinitialized. Therefore, when the mailbox handler recognizes this flag, it should not process the command.
Fixes: dde1a86e93ca ("net: hns3: Add mailbox support to PF driver") Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f34851c91eb3..e08e82020402 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -458,6 +458,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
/* handle all the mailbox requests in the queue */ while (!hclge_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "command queue needs re-initializing\n"); + return; + } + desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
From: Huazhong Tan tanhuazhong@huawei.com
[ Upstream commit b2f74dbaf12bf59ff35d451005b3cdee78232ff0 ]
The spin lock of the command queue only need to be initialized once when the driver initializes the command queue. It is not necessary to initialize the spin lock when resetting. At the same time, the modification of the queue member should be performed after acquiring the lock.
Fixes: 3efb960f056d ("net: hns3: Refactor the initialization of command queue") Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index ac13cb2b168e..68026a5ad7e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -304,6 +304,10 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev) { int ret;
+ /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + /* Setup the queue entries for use cmd queue */ hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; @@ -337,18 +341,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) u32 version; int ret;
+ spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock_bh(&hdev->hw.cmq.crq.lock); + hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; hdev->hw.cmq.crq.next_to_clean = 0; hdev->hw.cmq.crq.next_to_use = 0;
- /* Setup the lock for command queue */ - spin_lock_init(&hdev->hw.cmq.csq.lock); - spin_lock_init(&hdev->hw.cmq.crq.lock); - hclge_cmd_init_regs(&hdev->hw); clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev,
From: Radoslaw Tyl radoslawx.tyl@intel.com
[ Upstream commit 6702185c1ffec3421181b5e24491e3fac920cb61 ]
This change resolves a driver bug where the driver is logging a message that says "Spoofed packets detected". This can occur on the PF (host) when a VF has VLAN+MACVLAN enabled and is re-started with a different MAC address.
MAC and VLAN anti-spoofing filters are to be enabled together.
Signed-off-by: Radoslaw Tyl radoslawx.tyl@intel.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Acked-by: Piotr Skajewski piotrx.skajewski@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 3c6f01c41b78..eea63a99f29c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -721,8 +721,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, adapter->default_up, vf);
- if (vfinfo->spoofchk_enabled) + if (vfinfo->spoofchk_enabled) { hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); + } }
/* reset multicast table array for vf */
From: Jan Kara jack@suse.cz
[ Upstream commit f2c57d91b0d96aa13ccff4e3b178038f17b00658 ]
In DAX mode a write pagefault can race with write(2) in the following way:
CPU0 CPU1 write fault for mapped zero page (hole) dax_iomap_rw() iomap_apply() xfs_file_iomap_begin() - allocates blocks dax_iomap_actor() invalidate_inode_pages2_range() - invalidates radix tree entries in given range dax_iomap_pte_fault() grab_mapping_entry() - no entry found, creates empty ... xfs_file_iomap_begin() - finds already allocated block ... vmf_insert_mixed_mkwrite() - WARNs and does nothing because there is still zero page mapped in PTE unmap_mapping_pages()
This race results in WARN_ON from insert_pfn() and is occasionally triggered by fstest generic/344. Note that the race is otherwise harmless as before write(2) on CPU0 is finished, we will invalidate page tables properly and thus user of mmap will see modified data from write(2) from that point on. So just restrict the warning only to the case when the PFN in PTE is not zero page.
Link: http://lkml.kernel.org/r/20180824154542.26872-1-jack@suse.cz Signed-off-by: Jan Kara jack@suse.cz Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Ross Zwisler ross.zwisler@linux.intel.com Cc: Dan Williams dan.j.williams@intel.com Cc: Dave Jiang dave.jiang@intel.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/memory.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c index c467102a5cbc..d988bae46479 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1787,10 +1787,15 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, * in may not match the PFN we have mapped if the * mapped PFN is a writeable COW page. In the mkwrite * case we are creating a writable PTE for a shared - * mapping and we expect the PFNs to match. + * mapping and we expect the PFNs to match. If they + * don't match, we are likely racing with block + * allocation and mapping invalidation so just skip the + * update. */ - if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn))) + if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) { + WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); goto out_unlock; + } entry = *pte; goto out_mkwrite; } else
From: David Hildenbrand david@redhat.com
[ Upstream commit 8df1d0e4a265f25dc1e7e7624ccdbcb4a6630c89 ]
add_memory() currently does not take the device_hotplug_lock, however is aleady called under the lock from arch/powerpc/platforms/pseries/hotplug-memory.c drivers/acpi/acpi_memhotplug.c to synchronize against CPU hot-remove and similar.
In general, we should hold the device_hotplug_lock when adding memory to synchronize against online/offline request (e.g. from user space) - which already resulted in lock inversions due to device_lock() and mem_hotplug_lock - see 30467e0b3be ("mm, hotplug: fix concurrent memory hot-add deadlock"). add_memory()/add_memory_resource() will create memory block devices, so this really feels like the right thing to do.
Holding the device_hotplug_lock makes sure that a memory block device can really only be accessed (e.g. via .online/.state) from user space, once the memory has been fully added to the system.
The lock is not held yet in drivers/xen/balloon.c arch/powerpc/platforms/powernv/memtrace.c drivers/s390/char/sclp_cmd.c drivers/hv/hv_balloon.c So, let's either use the locked variants or take the lock.
Don't export add_memory_resource(), as it once was exported to be used by XEN, which is never built as a module. If somebody requires it, we also have to export a locked variant (as device_hotplug_lock is never exported).
Link: http://lkml.kernel.org/r/20180925091457.28651-3-david@redhat.com Signed-off-by: David Hildenbrand david@redhat.com Reviewed-by: Pavel Tatashin pavel.tatashin@microsoft.com Reviewed-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Reviewed-by: Rashmica Gupta rashmica.g@gmail.com Reviewed-by: Oscar Salvador osalvador@suse.de Cc: Benjamin Herrenschmidt benh@kernel.crashing.org Cc: Paul Mackerras paulus@samba.org Cc: Michael Ellerman mpe@ellerman.id.au Cc: "Rafael J. Wysocki" rjw@rjwysocki.net Cc: Len Brown lenb@kernel.org Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Boris Ostrovsky boris.ostrovsky@oracle.com Cc: Juergen Gross jgross@suse.com Cc: Nathan Fontenot nfont@linux.vnet.ibm.com Cc: John Allen jallen@linux.vnet.ibm.com Cc: Michal Hocko mhocko@suse.com Cc: Dan Williams dan.j.williams@intel.com Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Vlastimil Babka vbabka@suse.cz Cc: Mathieu Malaterre malat@debian.org Cc: Pavel Tatashin pavel.tatashin@microsoft.com Cc: YASUAKI ISHIMATSU yasu.isimatu@gmail.com Cc: Balbir Singh bsingharora@gmail.com Cc: Haiyang Zhang haiyangz@microsoft.com Cc: Heiko Carstens heiko.carstens@de.ibm.com Cc: Jonathan Corbet corbet@lwn.net Cc: Kate Stewart kstewart@linuxfoundation.org Cc: "K. Y. Srinivasan" kys@microsoft.com Cc: Martin Schwidefsky schwidefsky@de.ibm.com Cc: Michael Neuling mikey@neuling.org Cc: Philippe Ombredanne pombredanne@nexb.com Cc: Stephen Hemminger sthemmin@microsoft.com Cc: Thomas Gleixner tglx@linutronix.de Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- .../platforms/pseries/hotplug-memory.c | 2 +- drivers/acpi/acpi_memhotplug.c | 2 +- drivers/base/memory.c | 9 ++++++-- drivers/xen/balloon.c | 3 +++ include/linux/memory_hotplug.h | 1 + mm/memory_hotplug.c | 22 ++++++++++++++++--- 6 files changed, 32 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index c1578f54c626..79e074eac486 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -702,7 +702,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) nid = memory_add_physaddr_to_nid(lmb->base_addr);
/* Add the memory */ - rc = add_memory(nid, lmb->base_addr, block_sz); + rc = __add_memory(nid, lmb->base_addr, block_sz); if (rc) { dlpar_remove_device_tree_lmb(lmb); return rc; diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 6b0d3ef7309c..2ccfbb61ca89 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) if (node < 0) node = memory_add_physaddr_to_nid(info->start_addr);
- result = add_memory(node, info->start_addr, info->length); + result = __add_memory(node, info->start_addr, info->length);
/* * If the memory block has been used by the kernel, add_memory() diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 817320c7c4c1..40cac122ec73 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -519,15 +519,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) return -EINVAL;
+ ret = lock_device_hotplug_sysfs(); + if (ret) + goto out; + nid = memory_add_physaddr_to_nid(phys_addr); - ret = add_memory(nid, phys_addr, - MIN_MEMORY_BLOCK_SIZE * sections_per_block); + ret = __add_memory(nid, phys_addr, + MIN_MEMORY_BLOCK_SIZE * sections_per_block);
if (ret) goto out;
ret = count; out: + unlock_device_hotplug(); return ret; }
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index e12bb256036f..6bab019a82b1 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -395,7 +395,10 @@ static enum bp_state reserve_additional_memory(void) * callers drop the mutex before trying again. */ mutex_unlock(&balloon_mutex); + /* add_memory_resource() requires the device_hotplug lock */ + lock_device_hotplug(); rc = add_memory_resource(nid, resource, memhp_auto_online); + unlock_device_hotplug(); mutex_lock(&balloon_mutex);
if (rc) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 34a28227068d..16487052017d 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -322,6 +322,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} extern void __ref free_area_init_core_hotplug(int nid); extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); +extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource, bool online); extern int arch_add_memory(int nid, u64 start, u64 size, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 38d94b703e9d..3e42226407c7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1111,7 +1111,12 @@ static int online_memory_block(struct memory_block *mem, void *arg) return device_online(&mem->dev); }
-/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ +/* + * NOTE: The caller must call lock_device_hotplug() to serialize hotplug + * and online/offline operations (triggered e.g. by sysfs). + * + * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG + */ int __ref add_memory_resource(int nid, struct resource *res, bool online) { u64 start, size; @@ -1180,9 +1185,9 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) mem_hotplug_done(); return ret; } -EXPORT_SYMBOL_GPL(add_memory_resource);
-int __ref add_memory(int nid, u64 start, u64 size) +/* requires device_hotplug_lock, see add_memory_resource() */ +int __ref __add_memory(int nid, u64 start, u64 size) { struct resource *res; int ret; @@ -1196,6 +1201,17 @@ int __ref add_memory(int nid, u64 start, u64 size) release_memory_resource(res); return ret; } + +int add_memory(int nid, u64 start, u64 size) +{ + int rc; + + lock_device_hotplug(); + rc = __add_memory(nid, start, size); + unlock_device_hotplug(); + + return rc; +} EXPORT_SYMBOL_GPL(add_memory);
#ifdef CONFIG_MEMORY_HOTREMOVE
From: Jann Horn jannh@google.com
[ Upstream commit b10298d56c9623f9b173f19959732d3184b35f4f ]
fill_with_dentries() failed to propagate errors up to reiserfs_for_each_xattr() properly. Plumb them through.
Note that reiserfs_for_each_xattr() is only used by reiserfs_delete_xattrs() and reiserfs_chown_xattrs(). The result of reiserfs_delete_xattrs() is discarded anyway, the only difference there is whether a warning is printed to dmesg. The result of reiserfs_chown_xattrs() does matter because it can block chowning of the file to which the xattrs belong; but either way, the resulting state can have misaligned ownership, so my patch doesn't improve things greatly.
Credit for making me look at this code goes to Al Viro, who pointed out that the ->actor calling convention is suboptimal and should be changed.
Link: http://lkml.kernel.org/r/20180802163335.83312-1-jannh@google.com Signed-off-by: Jann Horn jannh@google.com Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Jeff Mahoney jeffm@suse.com Cc: Eric Biggers ebiggers@google.com Cc: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/reiserfs/xattr.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 48cdfc81fe10..32d8986c26fb 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -185,6 +185,7 @@ struct reiserfs_dentry_buf { struct dir_context ctx; struct dentry *xadir; int count; + int err; struct dentry *dentries[8]; };
@@ -207,6 +208,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
dentry = lookup_one_len(name, dbuf->xadir, namelen); if (IS_ERR(dentry)) { + dbuf->err = PTR_ERR(dentry); return PTR_ERR(dentry); } else if (d_really_is_negative(dentry)) { /* A directory entry exists, but no file? */ @@ -215,6 +217,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen, "not found for file %pd.\n", dentry, dbuf->xadir); dput(dentry); + dbuf->err = -EIO; return -EIO; }
@@ -262,6 +265,10 @@ static int reiserfs_for_each_xattr(struct inode *inode, err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx); if (err) break; + if (buf.err) { + err = buf.err; + break; + } if (!buf.count) break; for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
From: Ernesto A. Fernández ernesto.mnd.fernandez@gmail.com
[ Upstream commit d057c036672f33d43a5f7344acbb08cf3a8a0c09 ]
This bug is triggered whenever hfs_brec_update_parent() needs to split the root node. The height of the btree is not increased, which leaves the new node orphaned and its records lost. It is not possible for this to happen on a valid hfs filesystem because the index nodes have fixed length keys.
For reasons I ignore, the hfs module does have support for a number of hfsplus features. A corrupt btree header may report variable length keys and trigger this bug, so it's better to fix it.
Link: http://lkml.kernel.org/r/9750b1415685c4adca10766895f6d5ef12babdb0.1535682463... Signed-off-by: Ernesto A. Fernández ernesto.mnd.fernandez@gmail.com Cc: Christoph Hellwig hch@infradead.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/hfs/brec.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c index 9a8772465a90..da25c49203cc 100644 --- a/fs/hfs/brec.c +++ b/fs/hfs/brec.c @@ -425,6 +425,10 @@ static int hfs_brec_update_parent(struct hfs_find_data *fd) if (new_node) { __be32 cnid;
+ if (!new_node->parent) { + hfs_btree_inc_height(tree); + new_node->parent = tree->root; + } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index key and entry */ hfs_bnode_read_key(new_node, fd->search_key, 14);
From: Ernesto A. Fernández ernesto.mnd.fernandez@gmail.com
[ Upstream commit 0a3021d4f5295aa073c7bf5c5e4de60a2e292578 ]
Creating, renaming or deleting a file may cause catalog corruption and data loss. This bug is randomly triggered by xfstests generic/027, but here is a faster reproducer:
truncate -s 50M fs.iso mkfs.hfsplus fs.iso mount fs.iso /mnt i=100 while [ $i -le 150 ]; do touch /mnt/$i &>/dev/null ((++i)) done i=100 while [ $i -le 150 ]; do mv /mnt/$i /mnt/$(perl -e "print $i x82") &>/dev/null ((++i)) done umount /mnt fsck.hfsplus -n fs.iso
The bug is triggered whenever hfs_brec_update_parent() needs to split the root node. The height of the btree is not increased, which leaves the new node orphaned and its records lost.
Link: http://lkml.kernel.org/r/26d882184fc43043a810114258f45277752186c7.1535682461... Signed-off-by: Ernesto A. Fernández ernesto.mnd.fernandez@gmail.com Cc: Christoph Hellwig hch@infradead.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/hfsplus/brec.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index ed8eacb34452..aa17a392b414 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c @@ -429,6 +429,10 @@ static int hfs_brec_update_parent(struct hfs_find_data *fd) if (new_node) { __be32 cnid;
+ if (!new_node->parent) { + hfs_btree_inc_height(tree); + new_node->parent = tree->root; + } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index key and entry */ hfs_bnode_read_key(new_node, fd->search_key, 14);
From: Dan Carpenter dan.carpenter@oracle.com
[ Upstream commit 4b408c74ee5a0b74fc9265c2fe39b0e7dec7c056 ]
The concern here is that "gup->size" is a u64 and "nr_pages" is unsigned long. On 32 bit systems we could trick the kernel into allocating fewer pages than expected.
Link: http://lkml.kernel.org/r/20181025061546.hnhkv33diogf2uis@kili.mountain Fixes: 64c349f4ae78 ("mm: add infrastructure for get_user_pages_fast() benchmarking") Signed-off-by: Dan Carpenter dan.carpenter@oracle.com Acked-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Stephen Rothwell sfr@canb.auug.org.au Cc: Keith Busch keith.busch@intel.com Cc: "Michael S. Tsirkin" mst@redhat.com Cc: Kees Cook keescook@chromium.org Cc: YueHaibing yuehaibing@huawei.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/gup_benchmark.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c index 7405c9d89d65..7e6f2d2dafb5 100644 --- a/mm/gup_benchmark.c +++ b/mm/gup_benchmark.c @@ -23,6 +23,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd, int nr; struct page **pages;
+ if (gup->size > ULONG_MAX) + return -EINVAL; + nr_pages = gup->size / PAGE_SIZE; pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); if (!pages)
From: Milian Wolff milian.wolff@kdab.com
[ Upstream commit 1fe627da30331024f453faef04d500079b901107 ]
libdwfl parses an ELF file itself and creates mappings for the individual sections. perf on the other hand sees raw mmap events which represent individual sections. When we encounter an address pointing into a mapping with pgoff != 0, we must take that into account and report the file at the non-offset base address.
This fixes unwinding with libdwfl in some cases. E.g. for a file like:
```
using namespace std;
mutex g_mutex;
double worker() { lock_guard<mutex> guard(g_mutex); uniform_real_distribution<double> uniform(-1E5, 1E5); default_random_engine engine; double s = 0; for (int i = 0; i < 1000; ++i) { s += norm(complex<double>(uniform(engine), uniform(engine))); } cout << s << endl; return s; }
int main() { vector<std::future<double>> results; for (int i = 0; i < 10000; ++i) { results.push_back(async(launch::async, worker)); } return 0; } ```
Compile it with `g++ -g -O2 -lpthread cpp-locking.cpp -o cpp-locking`, then record it with `perf record --call-graph dwarf -e sched:sched_switch`.
When you analyze it with `perf script` and libunwind, you should see:
``` cpp-locking 20038 [005] 54830.236589: sched:sched_switch: prev_comm=cpp-locking prev_pid=20038 prev_prio=120 prev_state=T ==> next_comm=swapper/5 next_pid=0 next_prio=120 ffffffffb166fec5 __sched_text_start+0x545 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb166fec5 __sched_text_start+0x545 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1670208 schedule+0x28 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb16737cc rwsem_down_read_failed+0xec (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1665e04 call_rwsem_down_read_failed+0x14 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1672a03 down_read+0x13 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb106bd85 __do_page_fault+0x445 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb18015f5 page_fault+0x45 (/lib/modules/4.14.78-1-lts/build/vmlinux) 7f38e4252591 new_heap+0x101 (/usr/lib/libc-2.28.so) 7f38e4252d0b arena_get2.part.4+0x2fb (/usr/lib/libc-2.28.so) 7f38e4255b1c tcache_init.part.6+0xec (/usr/lib/libc-2.28.so) 7f38e42569e5 __GI___libc_malloc+0x115 (inlined) 7f38e4241790 __GI__IO_file_doallocate+0x90 (inlined) 7f38e424fbbf __GI__IO_doallocbuf+0x4f (inlined) 7f38e424ee47 __GI__IO_file_overflow+0x197 (inlined) 7f38e424df36 _IO_new_file_xsputn+0x116 (inlined) 7f38e4242bfb __GI__IO_fwrite+0xdb (inlined) 7f38e463fa6d std::basic_streambuf<char, std::char_traits<char> >::sputn(char const*, long)+0x1cd (inlined) 7f38e463fa6d std::ostreambuf_iterator<char, std::char_traits<char> >::_M_put(char const*, long)+0x1cd (inlined) 7f38e463fa6d std::ostreambuf_iterator<char, std::char_traits<char> > std::__write<char>(std::ostreambuf_iterator<char, std::char_traits<char> >, char const*, int)+0x1cd (inlined) 7f38e463fa6d std::ostreambuf_iterator<char, std::char_traits<char> > std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::_M_insert_float<double>(std::ostreambuf_iterator<c> 7f38e464bd70 std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::put(std::ostreambuf_iterator<char, std::char_traits<char> >, std::ios_base&, char, double) const+0x90 (inl> 7f38e464bd70 std::ostream& std::ostream::_M_insert<double>(double)+0x90 (/usr/lib/libstdc++.so.6.0.25) 563b9cb502f7 std::ostream::operator<<(double)+0xb7 (inlined) 563b9cb502f7 worker()+0xb7 (/ssd/milian/projects/kdab/rnd/hotspot/build/tests/test-clients/cpp-locking/cpp-locking) 563b9cb506fb double std::__invoke_impl<double, double (*)()>(std::__invoke_other, double (*&&)())+0x2b (inlined) 563b9cb506fb std::__invoke_result<double (*)()>::type std::__invoke<double (*)()>(double (*&&)())+0x2b (inlined) 563b9cb506fb decltype (__invoke((_S_declval<0ul>)())) std::thread::_Invoker<std::tuple<double (*)()> >::_M_invoke<0ul>(std::_Index_tuple<0ul>)+0x2b (inlined) 563b9cb506fb std::thread::_Invoker<std::tuple<double (*)()> >::operator()()+0x2b (inlined) 563b9cb506fb std::__future_base::_Task_setter<std::unique_ptr<std::__future_base::_Result<double>, std::__future_base::_Result_base::_Deleter>, std::thread::_Invoker<std::tuple<double (*)()> >, dou> 563b9cb506fb std::_Function_handler<std::unique_ptr<std::__future_base::_Result_base, std::__future_base::_Result_base::_Deleter> (), std::__future_base::_Task_setter<std::unique_ptrstd::__future_ 563b9cb507e8 std::function<std::unique_ptr<std::__future_base::_Result_base, std::__future_base::_Result_base::_Deleter> ()>::operator()() const+0x28 (inlined) 563b9cb507e8 std::__future_base::_State_baseV2::_M_do_set(std::function<std::unique_ptr<std::__future_base::_Result_base, std::__future_base::_Result_base::_Deleter> ()>*, bool*)+0x28 (/ssd/milian/> 7f38e46d24fe __pthread_once_slow+0xbe (/usr/lib/libpthread-2.28.so) 563b9cb51149 __gthread_once+0xe9 (inlined) 563b9cb51149 void std::call_once<void (std::__future_base::_State_baseV2::*)(std::function<std::unique_ptr<std::__future_base::_Result_base, std::__future_base::_Result_base::_Deleter> ()>*, bool*)> 563b9cb51149 std::__future_base::_State_baseV2::_M_set_result(std::function<std::unique_ptr<std::__future_base::_Result_base, std::__future_base::_Result_base::_Deleter> ()>, bool)+0xe9 (inlined) 563b9cb51149 std::__future_base::_Async_state_impl<std::thread::_Invoker<std::tuple<double (*)()> >, double>::_Async_state_impl(std::thread::_Invoker<std::tuple<double (*)()> >&&)::{lambda()#1}::op> 563b9cb51149 void std::__invoke_impl<void, std::__future_base::_Async_state_impl<std::thread::_Invoker<std::tuple<double (*)()> >, double>::_Async_state_impl(std::thread::_Invoker<std::tuple<double> 563b9cb51149 std::__invoke_result<std::__future_base::_Async_state_impl<std::thread::_Invoker<std::tuple<double (*)()> >, double>::_Async_state_impl(std::thread::_Invoker<std::tuple<double (*)()> >> 563b9cb51149 decltype (__invoke((_S_declval<0ul>)())) std::thread::_Invoker<std::tuple<std::__future_base::_Async_state_impl<std::thread::_Invoker<std::tuple<double (*)()> >, double>::_Async_state_> 563b9cb51149 std::thread::_Invoker<std::tuple<std::__future_base::_Async_state_impl<std::thread::_Invoker<std::tuple<double (*)()> >, double>::_Async_state_impl(std::thread::_Invoker<std::tuple<dou> 563b9cb51149 std::thread::_State_impl<std::thread::_Invoker<std::tuple<std::__future_base::_Async_state_impl<std::thread::_Invoker<std::tuple<double (*)()> >, double>::_Async_state_impl(std::thread> 7f38e45f0062 execute_native_thread_routine+0x12 (/usr/lib/libstdc++.so.6.0.25) 7f38e46caa9c start_thread+0xfc (/usr/lib/libpthread-2.28.so) 7f38e42ccb22 __GI___clone+0x42 (inlined) ```
Before this patch, using libdwfl, you would see:
``` cpp-locking 20038 [005] 54830.236589: sched:sched_switch: prev_comm=cpp-locking prev_pid=20038 prev_prio=120 prev_state=T ==> next_comm=swapper/5 next_pid=0 next_prio=120 ffffffffb166fec5 __sched_text_start+0x545 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb166fec5 __sched_text_start+0x545 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1670208 schedule+0x28 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb16737cc rwsem_down_read_failed+0xec (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1665e04 call_rwsem_down_read_failed+0x14 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1672a03 down_read+0x13 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb106bd85 __do_page_fault+0x445 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb18015f5 page_fault+0x45 (/lib/modules/4.14.78-1-lts/build/vmlinux) 7f38e4252591 new_heap+0x101 (/usr/lib/libc-2.28.so) a041161e77950c5c [unknown] ([unknown]) ```
With this patch applied, we get a bit further in unwinding:
``` cpp-locking 20038 [005] 54830.236589: sched:sched_switch: prev_comm=cpp-locking prev_pid=20038 prev_prio=120 prev_state=T ==> next_comm=swapper/5 next_pid=0 next_prio=120 ffffffffb166fec5 __sched_text_start+0x545 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb166fec5 __sched_text_start+0x545 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1670208 schedule+0x28 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb16737cc rwsem_down_read_failed+0xec (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1665e04 call_rwsem_down_read_failed+0x14 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb1672a03 down_read+0x13 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb106bd85 __do_page_fault+0x445 (/lib/modules/4.14.78-1-lts/build/vmlinux) ffffffffb18015f5 page_fault+0x45 (/lib/modules/4.14.78-1-lts/build/vmlinux) 7f38e4252591 new_heap+0x101 (/usr/lib/libc-2.28.so) 7f38e4252d0b arena_get2.part.4+0x2fb (/usr/lib/libc-2.28.so) 7f38e4255b1c tcache_init.part.6+0xec (/usr/lib/libc-2.28.so) 7f38e42569e5 __GI___libc_malloc+0x115 (inlined) 7f38e4241790 __GI__IO_file_doallocate+0x90 (inlined) 7f38e424fbbf __GI__IO_doallocbuf+0x4f (inlined) 7f38e424ee47 __GI__IO_file_overflow+0x197 (inlined) 7f38e424df36 _IO_new_file_xsputn+0x116 (inlined) 7f38e4242bfb __GI__IO_fwrite+0xdb (inlined) 7f38e463fa6d std::basic_streambuf<char, std::char_traits<char> >::sputn(char const*, long)+0x1cd (inlined) 7f38e463fa6d std::ostreambuf_iterator<char, std::char_traits<char> >::_M_put(char const*, long)+0x1cd (inlined) 7f38e463fa6d std::ostreambuf_iterator<char, std::char_traits<char> > std::__write<char>(std::ostreambuf_iterator<char, std::char_traits<char> >, char const*, int)+0x1cd (inlined) 7f38e463fa6d std::ostreambuf_iterator<char, std::char_traits<char> > std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::_M_insert_float<double>(std::ostreambuf_iterator<c> 7f38e464bd70 std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::put(std::ostreambuf_iterator<char, std::char_traits<char> >, std::ios_base&, char, double) const+0x90 (inl> 7f38e464bd70 std::ostream& std::ostream::_M_insert<double>(double)+0x90 (/usr/lib/libstdc++.so.6.0.25) 563b9cb502f7 std::ostream::operator<<(double)+0xb7 (inlined) 563b9cb502f7 worker()+0xb7 (/ssd/milian/projects/kdab/rnd/hotspot/build/tests/test-clients/cpp-locking/cpp-locking) 6eab825c1ee3e4ff [unknown] ([unknown]) ```
Note that the backtrace is still stopping too early, when compared to the nice results obtained via libunwind. It's unclear so far what the reason for that is.
Committer note:
Further comment by Milian on the thread started on the Link: tag below:
--- The remaining issue is due to a bug in elfutils:
https://sourceware.org/ml/elfutils-devel/2018-q4/msg00089.html
With both patches applied, libunwind and elfutils produce the same output for the above scenario. ---
Signed-off-by: Milian Wolff milian.wolff@kdab.com Acked-by: Jiri Olsa jolsa@kernel.org Link: http://lkml.kernel.org/r/20181029141644.3907-1-milian.wolff@kdab.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/unwind-libdw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 6f318b15950e..5eff9bfc5758 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -45,13 +45,13 @@ static int __report_module(struct addr_location *al, u64 ip, Dwarf_Addr s;
dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); - if (s != al->map->start) + if (s != al->map->start - al->map->pgoff) mod = 0; }
if (!mod) mod = dwfl_report_elf(ui->dwfl, dso->short_name, - (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start, + (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff, false);
return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
From: Richard Weinberger richard@nod.at
[ Upstream commit 7ff1e34bbdc15acab823b1ee4240e94623d50ee8 ]
Fixes: arch/um/os-Linux/skas/process.c:613:1: warning: control reaches end of non-void function [-Wreturn-type]
longjmp() never returns but gcc still warns that the end of the function can be reached. Add a return code and debug aid to detect this impossible case.
Signed-off-by: Richard Weinberger richard@nod.at Signed-off-by: Sasha Levin sashal@kernel.org --- arch/um/os-Linux/skas/process.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index c94c3bd70ccd..df4a985716eb 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -610,6 +610,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf) fatal_sigsegv(); } longjmp(*switch_buf, 1); + + /* unreachable */ + printk(UM_KERN_ERR "impossible long jump!"); + fatal_sigsegv(); + return 0; }
void initial_thread_cb_skas(void (*proc)(void *), void *arg)
From: "Lee, Shawn C" shawn.c.lee@intel.com
[ Upstream commit 922dceff8dc1fb4dafc9af78139ba65671408103 ]
BOE panel (ID: 0x0771) that reports "DFP 1.x compliant TMDS". But it's 6bpc panel only instead of 8 bpc.
Add panel ID to edid quirk list and set 6 bpc as default to work around this issue.
Cc: Jani Nikula jani.nikula@intel.com Cc: Maarten Lankhorst maarten.lankhorst@linux.intel.com Cc: Gustavo Padovan gustavo@padovan.org Cc: Cooper Chiou cooper.chiou@intel.com Signed-off-by: Lee, Shawn C shawn.c.lee@intel.com> Signed-off-by: Daniel Vetter daniel.vetter@ffwll.ch Link: https://patchwork.freedesktop.org/patch/msgid/1540792173-7288-1-git-send-ema... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/drm_edid.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ff0bfc65a8c1..b506e3622b08 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -122,6 +122,9 @@ static const struct edid_quirk { /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+ /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */ + { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
From: David Howells dhowells@redhat.com
[ Upstream commit 4ac15ea53622272c01954461b4814892b7481b40 ]
Fix afs_deliver_to_call() to handle -EIO being returned by the operation delivery function, indicating that the call found itself in the wrong state, by printing an error and aborting the call.
Currently, an assertion failure will occur. This can happen, say, if the delivery function falls off the end without calling afs_extract_data() with the want_more parameter set to false to collect the end of the Rx phase of a call.
The assertion failure looks like:
AFS: Assertion failed 4 == 7 is false 0x4 == 0x7 is false ------------[ cut here ]------------ kernel BUG at fs/afs/rxrpc.c:462!
and is matched in the trace buffer by a line like:
kworker/7:3-3226 [007] ...1 85158.030203: afs_io_error: c=0003be0c r=-5 CM_REPLY
Fixes: 98bf40cd99fc ("afs: Protect call->state changes against signals") Reported-by: Marc Dionne marc.dionne@auristor.com Signed-off-by: David Howells dhowells@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/afs/rxrpc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 77a83790a31f..2543f24d23f8 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -500,7 +500,6 @@ static void afs_deliver_to_call(struct afs_call *call) case -EINPROGRESS: case -EAGAIN: goto out; - case -EIO: case -ECONNABORTED: ASSERTCMP(state, ==, AFS_CALL_COMPLETE); goto done; @@ -509,6 +508,10 @@ static void afs_deliver_to_call(struct afs_call *call) rxrpc_kernel_abort_call(call->net->socket, call->rxcall, abort_code, ret, "KIV"); goto local_abort; + case -EIO: + pr_err("kAFS: Call %u in bad state %u\n", + call->debug_id, state); + /* Fall through */ case -ENODATA: case -EBADMSG: case -EMSGSIZE:
From: Rajneesh Bhardwaj rajneesh.bhardwaj@linux.intel.com
[ Upstream commit 8d98b1ef368feeb7720b8b9b6f3bd93f2ad892bc ]
On some Goldmont based systems such as ASRock J3455M the BIOS may not enable the IPC1 device that provides access to the PMC and PUNIT. In such scenarios, the IOSS and PSS resources from the platform device can not be obtained and result in a invalid telemetry_plt_config which is an internal data structure that holds platform config and is maintained by the telemetry platform driver.
This is also applicable to the platforms where the BIOS supports IPC1 device under debug configurations but IPC1 is disabled by user or the policy.
This change allows user to know the reason for not seeing entries under /sys/kernel/debug/telemetry/* when there is no apparent failure at boot.
Cc: Matt Turner matt.turner@intel.com Cc: Len Brown len.brown@intel.com Cc: Souvik Kumar Chakravarty souvik.k.chakravarty@intel.com Cc: Kuppuswamy Sathyanarayanan sathyanarayanan.kuppuswamy@intel.com
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=198779 Acked-by: Matt Turner matt.turner@intel.com Signed-off-by: Rajneesh Bhardwaj rajneesh.bhardwaj@linux.intel.com Signed-off-by: Andy Shevchenko andriy.shevchenko@linux.intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/platform/x86/intel_telemetry_debugfs.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index ffd0474b0531..1423fa8710fd 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -951,12 +951,16 @@ static int __init telemetry_debugfs_init(void) debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data;
err = telemetry_pltconfig_valid(); - if (err < 0) + if (err < 0) { + pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n"); return -ENODEV; + }
err = telemetry_debugfs_check_evts(); - if (err < 0) + if (err < 0) { + pr_info("telemetry_debugfs_check_evts failed\n"); return -EINVAL; + }
register_pm_notifier(&pm_notifier);
From: Alan Tull atull@kernel.org
[ Upstream commit 52091c256bdcad0d01e2852a63f19cd2cce6af96 ]
When the fixed rate clock is created by devicetree, of_clk_add_provider is called. Add a call to of_clk_del_provider in the remove function to balance it out.
Signed-off-by: Alan Tull atull@kernel.org Fixes: 435779fe1336 ("clk: fixed-rate: Convert into a module platform driver") Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/clk-fixed-rate.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index b5c46b3f8764..6d6475c32ee5 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -200,6 +200,7 @@ static int of_fixed_clk_remove(struct platform_device *pdev) { struct clk *clk = platform_get_drvdata(pdev);
+ of_clk_del_provider(pdev->dev.of_node); clk_unregister_fixed_rate(clk);
return 0;
From: David Miller davem@davemloft.net
[ Upstream commit d6afa561e1471ccfdaf7191230c0c59a37e45a5b ]
Using the sh_entsize for both values isn't correct. It happens to be correct on x86...
For both 32-bit and 64-bit sparc, there are four PLT entries in the PLT section.
Signed-off-by: David S. Miller davem@davemloft.net Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Alexis Berlemont alexis.berlemont@gmail.com Cc: David Tolnay dtolnay@gmail.com Cc: Hanjun Guo guohanjun@huawei.com Cc: Hemant Kumar hemant@linux.vnet.ibm.com Cc: Li Bin huawei.libin@huawei.com Cc: Masami Hiramatsu mhiramat@kernel.org Cc: Milian Wolff milian.wolff@kdab.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Wang Nan wangnan0@huawei.com Cc: zhangmengting@huawei.com Fixes: b2f7605076d6 ("perf symbols: Fix plt entry calculation for ARM and AARCH64") Link: http://lkml.kernel.org/r/20181017.120859.2268840244308635255.davem@davemloft... Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/symbol-elf.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 29770ea61768..6e70cc00c161 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -324,7 +324,17 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss) plt_entry_size = 16; break;
- default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/sparc/xtensa need to be checked */ + case EM_SPARC: + plt_header_size = 48; + plt_entry_size = 12; + break; + + case EM_SPARCV9: + plt_header_size = 128; + plt_entry_size = 32; + break; + + default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */ plt_header_size = shdr_plt.sh_entsize; plt_entry_size = shdr_plt.sh_entsize; break;
From: Chengguang Xu cgxu519@gmx.com
[ Upstream commit 515f1867addaba49c1c6ac73abfaffbc192c1db4 ]
There are some cases can cause memory leak when parsing option 'osdname'.
Signed-off-by: Chengguang Xu cgxu519@gmx.com Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/exofs/super.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 41cf2fbee50d..7d61e3fa378c 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -101,6 +101,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts) token = match_token(p, tokens, args); switch (token) { case Opt_name: + kfree(opts->dev_name); opts->dev_name = match_strdup(&args[0]); if (unlikely(!opts->dev_name)) { EXOFS_ERR("Error allocating dev_name"); @@ -866,8 +867,10 @@ static struct dentry *exofs_mount(struct file_system_type *type, int ret;
ret = parse_options(data, &opts); - if (ret) + if (ret) { + kfree(opts.dev_name); return ERR_PTR(ret); + }
if (!opts.dev_name) opts.dev_name = dev_name;
From: Marek Szyprowski m.szyprowski@samsung.com
[ Upstream commit b33228029d842269e17bba591609e83ed422005d ]
Ensure that clocks for core SoC modules (including TZPC0..9 modules) are enabled for suspend/resume cycle. This fixes suspend/resume support on Exynos5422-based Odroid XU3/XU4 boards.
Suggested-by: Joonyoung Shim jy0922.shim@samsung.com Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com Signed-off-by: Sylwester Nawrocki snawrocki@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/samsung/clk-exynos5420.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 95e1bf69449b..d4f77c4eb277 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -281,6 +281,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = { { .offset = GATE_BUS_TOP, .value = 0xffffffff, }, { .offset = GATE_BUS_DISP1, .value = 0xffffffff, }, { .offset = GATE_IP_PERIC, .value = 0xffffffff, }, + { .offset = GATE_IP_PERIS, .value = 0xffffffff, }, };
static int exynos5420_clk_suspend(void)
From: Zubin Mithra zsm@chromium.org
[ Upstream commit 250f2da49cb8e582215a65c03f50e8ddf5cd119c ]
Syzkaller reported a OOB-read with the stacktrace below. This occurs inside __aa_lookupn_ns as `n` is not initialized. `n` is obtained from aa_splitn_fqname. In cases where `name` is invalid, aa_splitn_fqname returns without initializing `ns_name` and `ns_len`.
Fix this by always initializing `ns_name` and `ns_len`.
__dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c4/0x2b4 lib/dump_stack.c:113 print_address_description.cold.8+0x9/0x1ff mm/kasan/report.c:256 kasan_report_error mm/kasan/report.c:354 [inline] kasan_report.cold.9+0x242/0x309 mm/kasan/report.c:412 __asan_report_load1_noabort+0x14/0x20 mm/kasan/report.c:430 memcmp+0xe3/0x160 lib/string.c:861 strnstr+0x4b/0x70 lib/string.c:934 __aa_lookupn_ns+0xc1/0x570 security/apparmor/policy_ns.c:209 aa_lookupn_ns+0x88/0x1e0 security/apparmor/policy_ns.c:240 aa_fqlookupn_profile+0x1b9/0x1010 security/apparmor/policy.c:468 fqlookupn_profile+0x80/0xc0 security/apparmor/label.c:1844 aa_label_strn_parse+0xa3a/0x1230 security/apparmor/label.c:1908 aa_label_parse+0x42/0x50 security/apparmor/label.c:1943 aa_change_profile+0x513/0x3510 security/apparmor/domain.c:1362 apparmor_setprocattr+0xaa4/0x1150 security/apparmor/lsm.c:658 security_setprocattr+0x66/0xc0 security/security.c:1298 proc_pid_attr_write+0x301/0x540 fs/proc/base.c:2555 __vfs_write+0x119/0x9f0 fs/read_write.c:485 vfs_write+0x1fc/0x560 fs/read_write.c:549 ksys_write+0x101/0x260 fs/read_write.c:598 __do_sys_write fs/read_write.c:610 [inline] __se_sys_write fs/read_write.c:607 [inline] __x64_sys_write+0x73/0xb0 fs/read_write.c:607 do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe
Fixes: 3b0aaf5866bf ("apparmor: add lib fn to find the "split" for fqnames") Reported-by: syzbot+61e4b490d9d2da591b50@syzkaller.appspotmail.com Signed-off-by: Zubin Mithra zsm@chromium.org Reviewed-by: Kees Cook keescook@chromium.org Signed-off-by: John Johansen john.johansen@canonical.com Signed-off-by: Sasha Levin sashal@kernel.org --- security/apparmor/lib.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 974affe50531..76491e7f4177 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -90,10 +90,12 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name, const char *end = fqname + n; const char *name = skipn_spaces(fqname, n);
- if (!name) - return NULL; *ns_name = NULL; *ns_len = 0; + + if (!name) + return NULL; + if (name[0] == ':') { char *split = strnchr(&name[1], end - &name[1], ':'); *ns_name = skipn_spaces(&name[1], end - &name[1]);
From: Feng Tang feng.tang@intel.com
[ Upstream commit d2266bbfa9e3e32e3b642965088ca461bd24a94f ]
The "pciserial" earlyprintk variant helps much on many modern x86 platforms, but unfortunately there are still some platforms with PCI UART devices which have the wrong PCI class code. In that case, the current class code check does not allow for them to be used for logging.
Add a sub-option "force" which overrides the class code check and thus the use of such device can be enforced.
[ bp: massage formulations. ]
Suggested-by: Borislav Petkov bp@alien8.de Signed-off-by: Feng Tang feng.tang@intel.com Signed-off-by: Borislav Petkov bp@suse.de Cc: "H. Peter Anvin" hpa@zytor.com Cc: "Stuart R . Anderson" stuart.r.anderson@intel.com Cc: Bjorn Helgaas bhelgaas@google.com Cc: David Rientjes rientjes@google.com Cc: Feng Tang feng.tang@intel.com Cc: Frederic Weisbecker frederic@kernel.org Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: H Peter Anvin hpa@linux.intel.com Cc: Ingo Molnar mingo@kernel.org Cc: Jiri Kosina jkosina@suse.cz Cc: Jonathan Corbet corbet@lwn.net Cc: Kai-Heng Feng kai.heng.feng@canonical.com Cc: Kate Stewart kstewart@linuxfoundation.org Cc: Konrad Rzeszutek Wilk konrad.wilk@oracle.com Cc: Peter Zijlstra peterz@infradead.org Cc: Philippe Ombredanne pombredanne@nexb.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Thymo van Beers thymovanbeers@gmail.com Cc: alan@linux.intel.com Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/20181002164921.25833-1-feng.tang@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../admin-guide/kernel-parameters.txt | 6 +++- arch/x86/kernel/early_printk.c | 29 ++++++++++++------- 2 files changed, 24 insertions(+), 11 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 92eb1f42240d..34e6800dea0e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1063,7 +1063,7 @@ earlyprintk=serial[,0x...[,baudrate]] earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] - earlyprintk=pciserial,bus:device.function[,baudrate] + earlyprintk=pciserial[,force],bus:device.function[,baudrate] earlyprintk=xdbc[xhciController#]
earlyprintk is useful when the kernel crashes before @@ -1095,6 +1095,10 @@
The sclp output can only be used on s390.
+ The optional "force" to "pciserial" enables use of a + PCI device even when its classcode is not of the + UART class. + edac_report= [HW,EDAC] Control how to report EDAC event Format: {"on" | "off" | "force"} on: enable EDAC to report H/W event. May be overridden diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 5e801c8c8ce7..374a52fa5296 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset) * early_pci_serial_init() * * This function is invoked when the early_printk param starts with "pciserial" - * The rest of the param should be ",B:D.F,baud" where B, D & F describe the - * location of a PCI device that must be a UART device. + * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe + * the location of a PCI device that must be a UART device. "force" is optional + * and overrides the use of an UART device with a wrong PCI class code. */ static __init void early_pci_serial_init(char *s) { @@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s) u32 classcode, bar0; u16 cmdreg; char *e; + int force = 0;
- - /* - * First, part the param to get the BDF values - */ if (*s == ',') ++s;
if (*s == 0) return;
+ /* Force the use of an UART device with wrong class code */ + if (!strncmp(s, "force,", 6)) { + force = 1; + s += 6; + } + + /* + * Part the param to get the BDF values + */ bus = (u8)simple_strtoul(s, &e, 16); s = e; if (*s != ':') @@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s) s++;
/* - * Second, find the device from the BDF + * Find the device from the BDF */ cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND); classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); @@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s) */ if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) && (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) || - (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ - return; + (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ { + if (!force) + return; + }
/* * Determine if it is IO or memory mapped @@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s) }
/* - * Lastly, initialize the hardware + * Initialize the hardware */ if (*s) { if (strcmp(s, "nocfg") == 0)
From: Paul Gortmaker paul.gortmaker@windriver.com
[ Upstream commit 684238d79ad85c5e19a71bb5818e77e329912fbc ]
To fix:
acerhdf: unknown (unsupported) BIOS version Gateway /LT31 /v1.3307 , please report, aborting!
As can be seen in the context, the BIOS registers haven't changed in the previous versions, so the assumption is they won't have changed in this last update for this somewhat older platform either.
Cc: Peter Feuerer peter@piie.net Cc: Darren Hart dvhart@infradead.org Cc: Andy Shevchenko andy@infradead.org Signed-off-by: Paul Gortmaker paul.gortmaker@windriver.com Signed-off-by: Andy Shevchenko andriy.shevchenko@linux.intel.com Reviewed-by: Peter Feuerer peter@piie.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/platform/x86/acerhdf.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index ea22591ee66f..53dfe67807e3 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -233,6 +233,7 @@ static const struct bios_settings bios_tbl[] = { {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0}, {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0}, {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0}, + {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0}, /* Packard Bell */ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0}, {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
From: Yixun Lan yixun.lan@amlogic.com
[ Upstream commit 69b93104c7ec5668019caf5d2dbfd0e182df06db ]
We found the PCIe driver doesn't really work with the mpll3 clock which is actually reserved for debug, So drop it from the mux list.
Fixes: 33b89db68236 ("clk: meson-axg: add clocks required by pcie driver") Tested-by: Jianxin Qin jianxin.qin@amlogic.com Signed-off-by: Yixun Lan yixun.lan@amlogic.com Signed-off-by: Jerome Brunet jbrunet@baylibre.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/meson/axg.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 00ce62ad6416..a95152071087 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -700,12 +700,14 @@ static struct clk_regmap axg_pcie_mux = { .offset = HHI_PCIE_PLL_CNTL6, .mask = 0x1, .shift = 2, + /* skip the parent mpll3, reserved for debug */ + .table = (u32[]){ 1 }, }, .hw.init = &(struct clk_init_data){ .name = "pcie_mux", .ops = &clk_regmap_mux_ops, - .parent_names = (const char *[]){ "mpll3", "pcie_pll" }, - .num_parents = 2, + .parent_names = (const char *[]){ "pcie_pll" }, + .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, };
From: Nathan Chancellor natechancellor@gmail.com
[ Upstream commit b5bb425871186303e6936fa2581521bdd1964a58 ]
Clang warns that if the default case is taken, ret will be uninitialized.
./arch/arm64/include/asm/percpu.h:196:2: warning: variable 'ret' is used uninitialized whenever switch default is taken [-Wsometimes-uninitialized] default: ^~~~~~~ ./arch/arm64/include/asm/percpu.h:200:9: note: uninitialized use occurs here return ret; ^~~ ./arch/arm64/include/asm/percpu.h:157:19: note: initialize the variable 'ret' to silence this warning unsigned long ret, loop; ^ = 0
This warning appears several times while building the erofs filesystem. While it's not strictly wrong, the BUILD_BUG will prevent this from becoming a true problem. Initialize ret to 0 in the default case right before the BUILD_BUG to silence all of these warnings.
Reported-by: Prasad Sodagudi psodagud@codeaurora.org Signed-off-by: Nathan Chancellor natechancellor@gmail.com Reviewed-by: Nick Desaulniers ndesaulniers@google.com Signed-off-by: Dennis Zhou dennis@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm64/include/asm/percpu.h | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 9234013e759e..21a81b59a0cc 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -96,6 +96,7 @@ static inline unsigned long __percpu_##op(void *ptr, \ : [val] "Ir" (val)); \ break; \ default: \ + ret = 0; \ BUILD_BUG(); \ } \ \ @@ -125,6 +126,7 @@ static inline unsigned long __percpu_read(void *ptr, int size) ret = READ_ONCE(*(u64 *)ptr); break; default: + ret = 0; BUILD_BUG(); }
@@ -194,6 +196,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, : [val] "r" (val)); break; default: + ret = 0; BUILD_BUG(); }
From: Jerome Brunet jbrunet@baylibre.com
[ Upstream commit 2303a9ca693e585a558497ad737728fec97e2b8a ]
CLK_GET_RATE_NOCACHE should only be necessary when the registers controlling the rate of clock may change outside of CCF. On Amlogic, it should only be the case for the hdmi pll which is directly controlled by the display driver (WIP to fix this).
The other plls should not require this flag.
Reviewed-by: Martin Blumenstingl martin.blumenstingl@googlemail.com Tested-by: Martin Blumenstingl martin.blumenstingl@googlemail.com Signed-off-by: Jerome Brunet jbrunet@baylibre.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/meson/axg.c | 1 - drivers/clk/meson/gxbb.c | 12 ++++++++---- drivers/clk/meson/meson8b.c | 3 --- 3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index a95152071087..f24d38c12e09 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -96,7 +96,6 @@ static struct clk_regmap axg_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, };
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 86d3ae58e84c..58e4b66bfc55 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -213,7 +213,6 @@ static struct clk_regmap gxbb_fixed_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, };
@@ -276,6 +275,10 @@ static struct clk_regmap gxbb_hdmi_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "hdmi_pll_pre_mult" }, .num_parents = 1, + /* + * Display directly handle hdmi pll registers ATM, we need + * NOCACHE to keep our view of the clock as accurate as possible + */ .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -334,6 +337,10 @@ static struct clk_regmap gxl_hdmi_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, + /* + * Display directly handle hdmi pll registers ATM, we need + * NOCACHE to keep our view of the clock as accurate as possible + */ .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -371,7 +378,6 @@ static struct clk_regmap gxbb_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, };
@@ -418,7 +424,6 @@ static struct clk_regmap gxbb_gp0_pll = { .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, };
@@ -472,7 +477,6 @@ static struct clk_regmap gxl_gp0_pll = { .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, };
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 7447d96a265f..74697e145dde 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -132,7 +132,6 @@ static struct clk_regmap meson8b_fixed_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, };
@@ -169,7 +168,6 @@ static struct clk_regmap meson8b_vid_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, };
@@ -207,7 +205,6 @@ static struct clk_regmap meson8b_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, };
From: Phil Edworthy phil.edworthy@renesas.com
[ Upstream commit ee02950d53eee0d4c7f1c08a35272b77d24b9459 ]
The clock for UARTs 0 through 2 is UART012, the clock for UARTs 3 through 7 is UART34567. For UART012, we stop the clock driver from changing the clock rate. This is because the Synopsys UART driver simply sets the reference clock to 16x the baud rate, but doesn't check if the actual rate is within the required tolerance. The RZ/N1 clock divider can't provide this (we have to rely on the UART's internal divider to set the correct clock rate), so you end up with a clock rate that is way off what you wanted.
In addition, since the clock is shared between multiple UARTs, you don't want the driver trying to change the clock rate as it may affect the other UARTs (which may not have been configured yet, so you don't know what baud rate they will use). Normally, the clock rate is set early on before Linux to some very high rate that supports all of the clock rates you want.
This change stops the UART34567 clock rate from changing for the same reasons.
Signed-off-by: Phil Edworthy phil.edworthy@renesas.com Fixes: 4c3d88526eba2143 ("clk: renesas: Renesas R9A06G032 clock driver") Signed-off-by: Geert Uytterhoeven geert+renesas@glider.be Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/renesas/r9a06g032-clocks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index a0b6ecdc63dd..6d2b56891559 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -539,7 +539,8 @@ r9a06g032_div_round_rate(struct clk_hw *hw, * several uarts attached to this divider, and changing this impacts * everyone. */ - if (clk->index == R9A06G032_DIV_UART) { + if (clk->index == R9A06G032_DIV_UART || + clk->index == R9A06G032_DIV_P2_PG) { pr_devel("%s div uart hack!\n", __func__); return clk_get_rate(hw->clk); }
From: Icenowy Zheng icenowy@aosc.io
[ Upstream commit c2ff8383cc33c2d9c169e4daf1e37a434c3bb420 ]
On the H6, the MMC module clocks are fixed in the new timing mode, i.e. they do not have a bit to select the mode. These clocks have a 2x divider somewhere between the clock and the MMC module.
To be consistent with other SoCs supporting the new timing mode, we model the 2x divider as a fixed post-divider on the MMC module clocks.
This patch adds the post-dividers to the MMC clocks, following the approach on A64.
Fixes: 524353ea480b ("clk: sunxi-ng: add support for the Allwinner H6 CCU") Signed-off-by: Icenowy Zheng icenowy@aosc.io Signed-off-by: Maxime Ripard maxime.ripard@bootlin.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/sunxi-ng/ccu-sun50i-h6.c | 43 +++++++++++++++------------- 1 file changed, 23 insertions(+), 20 deletions(-)
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index bdbfe78fe133..3d60f7978506 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -408,26 +408,29 @@ static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb3", 0x82c, BIT(0), 0);
static const char * const mmc_parents[] = { "osc24M", "pll-periph0-2x", "pll-periph1-2x" }; -static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc_parents, 0x830, - 0, 4, /* M */ - 8, 2, /* N */ - 24, 3, /* mux */ - BIT(31),/* gate */ - 0); - -static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc_parents, 0x834, - 0, 4, /* M */ - 8, 2, /* N */ - 24, 3, /* mux */ - BIT(31),/* gate */ - 0); - -static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc_parents, 0x838, - 0, 4, /* M */ - 8, 2, /* N */ - 24, 3, /* mux */ - BIT(31),/* gate */ - 0); +static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830, + 0, 4, /* M */ + 8, 2, /* N */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834, + 0, 4, /* M */ + 8, 2, /* N */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838, + 0, 4, /* M */ + 8, 2, /* N */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0);
static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0); static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
From: Johan Hovold johan@kernel.org
[ Upstream commit 00a461cc32ec27fa7bd9c874a7b36b0c6c542c12 ]
Fix child-node lookup which by using the wrong OF helper was searching the whole tree depth-first, something which could end up matching an unrelated node.
Also fix the related node-reference leaks.
Fixes: 5b385a45e001 ("clk: ti: add support for clkctrl aliases") Signed-off-by: Johan Hovold johan@kernel.org Acked-by: Tero Kristo t-kristo@ti.com Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/ti/clk.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 7d22e1af2247..27e0979b3158 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -129,7 +129,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) { struct ti_dt_clk *c; - struct device_node *node; + struct device_node *node, *parent; struct clk *clk; struct of_phandle_args clkspec; char buf[64]; @@ -164,8 +164,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) continue;
node = of_find_node_by_name(NULL, buf); - if (num_args) - node = of_find_node_by_name(node, "clk"); + if (num_args) { + parent = node; + node = of_get_child_by_name(parent, "clk"); + of_node_put(parent); + } + clkspec.np = node; clkspec.args_count = num_args; for (i = 0; i < num_args; i++) { @@ -173,11 +177,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) if (ret) { pr_warn("Bad tag in %s at %d: %s\n", c->node_name, i, tags[i]); + of_node_put(node); return; } } clk = of_clk_get_from_provider(&clkspec); - + of_node_put(node); if (!IS_ERR(clk)) { c->lk.clk = clk; clkdev_add(&c->lk);
From: Andrea Arcangeli aarcange@redhat.com
[ Upstream commit d7c3393413fe7e7dc54498ea200ea94742d61e18 ]
Patch series "migrate_misplaced_transhuge_page race conditions".
Aaron found a new instance of the THP MADV_DONTNEED race against pmdp_clear_flush* variants, that was apparently left unfixed.
While looking into the race found by Aaron, I may have found two more issues in migrate_misplaced_transhuge_page.
These race conditions would not cause kernel instability, but they'd corrupt userland data or leave data non zero after MADV_DONTNEED.
I did only minor testing, and I don't expect to be able to reproduce this (especially the lack of ->invalidate_range before migrate_page_copy, requires the latest iommu hardware or infiniband to reproduce). The last patch is noop for x86 and it needs further review from maintainers of archs that implement flush_cache_range() (not in CC yet).
To avoid confusion, it's not the first patch that introduces the bug fixed in the second patch, even before removing the pmdp_huge_clear_flush_notify, that _notify suffix was called after migrate_page_copy already run.
This patch (of 3):
This is a corollary of ced108037c2aa ("thp: fix MADV_DONTNEED vs. numa balancing race"), 58ceeb6bec8 ("thp: fix MADV_DONTNEED vs. MADV_FREE race") and 5b7abeae3af8c ("thp: fix MADV_DONTNEED vs clear soft dirty race).
When the above three fixes where posted Dave asked https://lkml.kernel.org/r/929b3844-aec2-0111-fef7-8002f9d4e2b9@intel.com but apparently this was missed.
The pmdp_clear_flush* in migrate_misplaced_transhuge_page() was introduced in a54a407fbf7 ("mm: Close races between THP migration and PMD numa clearing").
The important part of such commit is only the part where the page lock is not released until the first do_huge_pmd_numa_page() finished disarming the pagenuma/protnone.
The addition of pmdp_clear_flush() wasn't beneficial to such commit and there's no commentary about such an addition either.
I guess the pmdp_clear_flush() in such commit was added just in case for safety, but it ended up introducing the MADV_DONTNEED race condition found by Aaron.
At that point in time nobody thought of such kind of MADV_DONTNEED race conditions yet (they were fixed later) so the code may have looked more robust by adding the pmdp_clear_flush().
This specific race condition won't destabilize the kernel, but it can confuse userland because after MADV_DONTNEED the memory won't be zeroed out.
This also optimizes the code and removes a superfluous TLB flush.
[akpm@linux-foundation.org: reflow comment to 80 cols, fix grammar and typo (beacuse)] Link: http://lkml.kernel.org/r/20181013002430.698-2-aarcange@redhat.com Signed-off-by: Andrea Arcangeli aarcange@redhat.com Reported-by: Aaron Tomlin atomlin@redhat.com Acked-by: Mel Gorman mgorman@suse.de Acked-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Cc: Jerome Glisse jglisse@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/migrate.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c index 84381b55b2bd..1f634b1563b6 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2029,15 +2029,26 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
/* - * Clear the old entry under pagetable lock and establish the new PTE. - * Any parallel GUP will either observe the old page blocking on the - * page lock, block on the page table lock or observe the new page. - * The SetPageUptodate on the new page and page_add_new_anon_rmap - * guarantee the copy is visible before the pagetable update. + * Overwrite the old entry under pagetable lock and establish + * the new PTE. Any parallel GUP will either observe the old + * page blocking on the page lock, block on the page table + * lock or observe the new page. The SetPageUptodate on the + * new page and page_add_new_anon_rmap guarantee the copy is + * visible before the pagetable update. */ flush_cache_range(vma, mmun_start, mmun_end); page_add_anon_rmap(new_page, vma, mmun_start, true); - pmdp_huge_clear_flush_notify(vma, mmun_start, pmd); + /* + * At this point the pmd is numa/protnone (i.e. non present) and the TLB + * has already been flushed globally. So no TLB can be currently + * caching this non present pmd mapping. There's no need to clear the + * pmd before doing set_pmd_at(), nor to flush the TLB after + * set_pmd_at(). Clearing the pmd here would introduce a race + * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the + * mmap_sem for reading. If the pmd is set to NULL at any given time, + * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this + * pmd. + */ set_pmd_at(mm, mmun_start, pmd, entry); update_mmu_cache_pmd(vma, address, &entry);
@@ -2051,7 +2062,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, * No need to double call mmu_notifier->invalidate_range() callback as * the above pmdp_huge_clear_flush_notify() did already call it. */ - mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
/* Take an "isolate" reference and put new page on the LRU. */ get_page(new_page);
From: Andrea Arcangeli aarcange@redhat.com
[ Upstream commit 7066f0f933a1fd707bb38781866657769cff7efc ]
change_huge_pmd() after arming the numa/protnone pmd doesn't flush the TLB right away. do_huge_pmd_numa_page() flushes the TLB before calling migrate_misplaced_transhuge_page(). By the time do_huge_pmd_numa_page() runs some CPU could still access the page through the TLB.
change_huge_pmd() before arming the numa/protnone transhuge pmd calls mmu_notifier_invalidate_range_start(). So there's no need of mmu_notifier_invalidate_range_start()/mmu_notifier_invalidate_range_only_end() sequence in migrate_misplaced_transhuge_page() too, because by the time migrate_misplaced_transhuge_page() runs, the pmd mapping has already been invalidated in the secondary MMUs. It has to or if a secondary MMU can still write to the page, the migrate_page_copy() would lose data.
However an explicit mmu_notifier_invalidate_range() is needed before migrate_misplaced_transhuge_page() starts copying the data of the transhuge page or the below can happen for MMU notifier users sharing the primary MMU pagetables and only implementing ->invalidate_range:
CPU0 CPU1 GPU sharing linux pagetables using only ->invalidate_range ----------- ------------ --------- GPU secondary MMU writes to the page mapped by the transhuge pmd change_pmd_range() mmu..._range_start() ->invalidate_range_start() noop change_huge_pmd() set_pmd_at(numa/protnone) pmd_unlock() do_huge_pmd_numa_page() CPU TLB flush globally (1) CPU cannot write to page migrate_misplaced_transhuge_page() GPU writes to the page... migrate_page_copy() ...GPU stops writing to the page CPU TLB flush (2) mmu..._range_end() (3) ->invalidate_range_stop() noop ->invalidate_range() GPU secondary MMU is invalidated and cannot write to the page anymore (too late)
Just like we need a CPU TLB flush (1) because the TLB flush (2) arrives too late, we also need a mmu_notifier_invalidate_range() before calling migrate_misplaced_transhuge_page(), because the ->invalidate_range() in (3) also arrives too late.
This requirement is the result of the lazy optimization in change_huge_pmd() that releases the pmd_lock without first flushing the TLB and without first calling mmu_notifier_invalidate_range().
Even converting the removed mmu_notifier_invalidate_range_only_end() into a mmu_notifier_invalidate_range_end() would not have been enough to fix this, because it run after migrate_page_copy().
After the hugepage data copy is done migrate_misplaced_transhuge_page() can proceed and call set_pmd_at without having to flush the TLB nor any secondary MMUs because the secondary MMU invalidate, just like the CPU TLB flush, has to happen before the migrate_page_copy() is called or it would be a bug in the first place (and it was for drivers using ->invalidate_range()).
KVM is unaffected because it doesn't implement ->invalidate_range().
The standard PAGE_SIZEd migrate_misplaced_page is less accelerated and uses the generic migrate_pages which transitions the pte from numa/protnone to a migration entry in try_to_unmap_one() and flushes TLBs and all mmu notifiers there before copying the page.
Link: http://lkml.kernel.org/r/20181013002430.698-3-aarcange@redhat.com Signed-off-by: Andrea Arcangeli aarcange@redhat.com Acked-by: Mel Gorman mgorman@suse.de Acked-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Reviewed-by: Aaron Tomlin atomlin@redhat.com Cc: Jerome Glisse jglisse@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org
Signed-off-by: Sasha Levin sashal@kernel.org --- mm/huge_memory.c | 14 +++++++++++++- mm/migrate.c | 19 ++++++------------- 2 files changed, 19 insertions(+), 14 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index deed97fba979..a71a5172104c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1562,8 +1562,20 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) * We are not sure a pending tlb flush here is for a huge page * mapping or not. Hence use the tlb range variant */ - if (mm_tlb_flush_pending(vma->vm_mm)) + if (mm_tlb_flush_pending(vma->vm_mm)) { flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); + /* + * change_huge_pmd() released the pmd lock before + * invalidating the secondary MMUs sharing the primary + * MMU pagetables (with ->invalidate_range()). The + * mmu_notifier_invalidate_range_end() (which + * internally calls ->invalidate_range()) in + * change_pmd_range() will run after us, so we can't + * rely on it here and we need an explicit invalidate. + */ + mmu_notifier_invalidate_range(vma->vm_mm, haddr, + haddr + HPAGE_PMD_SIZE); + }
/* * Migrate the THP to the requested node, returns with page unlocked diff --git a/mm/migrate.c b/mm/migrate.c index 1f634b1563b6..1637a32f3dd7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1973,8 +1973,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, int isolated = 0; struct page *new_page = NULL; int page_lru = page_is_file_cache(page); - unsigned long mmun_start = address & HPAGE_PMD_MASK; - unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; + unsigned long start = address & HPAGE_PMD_MASK; + unsigned long end = start + HPAGE_PMD_SIZE;
new_page = alloc_pages_node(node, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), @@ -2001,11 +2001,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */ - mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ptl = pmd_lock(mm, pmd); if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { spin_unlock(ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
/* Reverse changes made by migrate_page_copy() */ if (TestClearPageActive(new_page)) @@ -2036,8 +2034,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, * new page and page_add_new_anon_rmap guarantee the copy is * visible before the pagetable update. */ - flush_cache_range(vma, mmun_start, mmun_end); - page_add_anon_rmap(new_page, vma, mmun_start, true); + flush_cache_range(vma, start, end); + page_add_anon_rmap(new_page, vma, start, true); /* * At this point the pmd is numa/protnone (i.e. non present) and the TLB * has already been flushed globally. So no TLB can be currently @@ -2049,7 +2047,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this * pmd. */ - set_pmd_at(mm, mmun_start, pmd, entry); + set_pmd_at(mm, start, pmd, entry); update_mmu_cache_pmd(vma, address, &entry);
page_ref_unfreeze(page, 2); @@ -2058,11 +2056,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
spin_unlock(ptl); - /* - * No need to double call mmu_notifier->invalidate_range() callback as - * the above pmdp_huge_clear_flush_notify() did already call it. - */ - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
/* Take an "isolate" reference and put new page on the LRU. */ get_page(new_page); @@ -2086,7 +2079,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ptl = pmd_lock(mm, pmd); if (pmd_same(*pmd, entry)) { entry = pmd_modify(entry, vma->vm_page_prot); - set_pmd_at(mm, mmun_start, pmd, entry); + set_pmd_at(mm, start, pmd, entry); update_mmu_cache_pmd(vma, address, &entry); } spin_unlock(ptl);
From: Pavel Tatashin pasha.tatashin@oracle.com
[ Upstream commit d3035be4ce2345d98633a45f93a74e526e94b802 ]
update_defer_init() should be called only when struct page is about to be initialized. Because it counts number of initialized struct pages, but there we may skip struct pages if there is some mirrored memory.
So move, update_defer_init() after checking for mirrored memory.
Also, rename update_defer_init() to defer_init() and reverse the return boolean to emphasize that this is a boolean function, that tells that the reset of memmap initialization should be deferred.
Make this function self-contained: do not pass number of already initialized pages in this zone by using static counters.
I found this bug by reading the code. The effect is that fewer than expected struct pages are initialized early in boot, and it is possible that in some corner cases we may fail to boot when mirrored pages are used. The deferred on demand code should somewhat mitigate this. But this still brings some inconsistencies compared to when booting without mirrored pages, so it is better to fix.
[pasha.tatashin@oracle.com: add comment about defer_init's lack of locking] Link: http://lkml.kernel.org/r/20180726193509.3326-3-pasha.tatashin@oracle.com [akpm@linux-foundation.org: make defer_init non-inline, __meminit] Link: http://lkml.kernel.org/r/20180724235520.10200-3-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin pasha.tatashin@oracle.com Reviewed-by: Oscar Salvador osalvador@suse.de Cc: Abdul Haleem abdhalee@linux.vnet.ibm.com Cc: Baoquan He bhe@redhat.com Cc: Daniel Jordan daniel.m.jordan@oracle.com Cc: Dan Williams dan.j.williams@intel.com Cc: Dave Hansen dave.hansen@intel.com Cc: David Rientjes rientjes@google.com Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Ingo Molnar mingo@kernel.org Cc: Jan Kara jack@suse.cz Cc: Jérôme Glisse jglisse@redhat.com Cc: Kirill A. Shutemov kirill.shutemov@linux.intel.com Cc: Michael Ellerman mpe@ellerman.id.au Cc: Michal Hocko mhocko@suse.com Cc: Souptick Joarder jrdr.linux@gmail.com Cc: Steven Sistare steven.sistare@oracle.com Cc: Vlastimil Babka vbabka@suse.cz Cc: Wei Yang richard.weiyang@gmail.com Cc: Pasha Tatashin Pavel.Tatashin@microsoft.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/page_alloc.c | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e2ef1c17942f..63f990b73750 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -306,24 +306,33 @@ static inline bool __meminit early_page_uninitialised(unsigned long pfn) }
/* - * Returns false when the remaining initialisation should be deferred until + * Returns true when the remaining initialisation should be deferred until * later in the boot cycle when it can be parallelised. */ -static inline bool update_defer_init(pg_data_t *pgdat, - unsigned long pfn, unsigned long zone_end, - unsigned long *nr_initialised) +static bool __meminit +defer_init(int nid, unsigned long pfn, unsigned long end_pfn) { + static unsigned long prev_end_pfn, nr_initialised; + + /* + * prev_end_pfn static that contains the end of previous zone + * No need to protect because called very early in boot before smp_init. + */ + if (prev_end_pfn != end_pfn) { + prev_end_pfn = end_pfn; + nr_initialised = 0; + } + /* Always populate low zones for address-constrained allocations */ - if (zone_end < pgdat_end_pfn(pgdat)) - return true; - (*nr_initialised)++; - if ((*nr_initialised > pgdat->static_init_pgcnt) && - (pfn & (PAGES_PER_SECTION - 1)) == 0) { - pgdat->first_deferred_pfn = pfn; + if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) return false; + nr_initialised++; + if ((nr_initialised > NODE_DATA(nid)->static_init_pgcnt) && + (pfn & (PAGES_PER_SECTION - 1)) == 0) { + NODE_DATA(nid)->first_deferred_pfn = pfn; + return true; } - - return true; + return false; } #else static inline bool early_page_uninitialised(unsigned long pfn) @@ -331,11 +340,9 @@ static inline bool early_page_uninitialised(unsigned long pfn) return false; }
-static inline bool update_defer_init(pg_data_t *pgdat, - unsigned long pfn, unsigned long zone_end, - unsigned long *nr_initialised) +static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) { - return true; + return false; } #endif
@@ -5459,9 +5466,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, struct vmem_altmap *altmap) { unsigned long end_pfn = start_pfn + size; - pg_data_t *pgdat = NODE_DATA(nid); unsigned long pfn; - unsigned long nr_initialised = 0; struct page *page; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP struct memblock_region *r = NULL, *tmp; @@ -5489,8 +5494,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, continue; if (!early_pfn_in_nid(pfn, nid)) continue; - if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) - break;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP /* @@ -5513,6 +5516,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, } } #endif + if (defer_init(nid, pfn, end_pfn)) + break;
not_early: page = pfn_to_page(pfn);
From: Roman Gushchin guro@fb.com
[ Upstream commit 7a1adfddaf0d11a39fdcaf6e82a88e9c0586e08b ]
It was reported that on some of our machines containers were restarted with OOM symptoms without an obvious reason. Despite there were almost no memory pressure and plenty of page cache, MEMCG_OOM event was raised occasionally, causing the container management software to think, that OOM has happened. However, no tasks have been killed.
The following investigation showed that the problem is caused by a failing attempt to charge a high-order page. In such case, the OOM killer is never invoked. As shown below, it can happen under conditions, which are very far from a real OOM: e.g. there is plenty of clean page cache and no memory pressure.
There is no sense in raising an OOM event in this case, as it might confuse a user and lead to wrong and excessive actions (e.g. restart the workload, as in my case).
Let's look at the charging path in try_charge(). If the memory usage is about memory.max, which is absolutely natural for most memory cgroups, we try to reclaim some pages. Even if we were able to reclaim enough memory for the allocation, the following check can fail due to a race with another concurrent allocation:
if (mem_cgroup_margin(mem_over_limit) >= nr_pages) goto retry;
For regular pages the following condition will save us from triggering the OOM:
if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) goto retry;
But for high-order allocation this condition will intentionally fail. The reason behind is that we'll likely fall to regular pages anyway, so it's ok and even preferred to return ENOMEM.
In this case the idea of raising MEMCG_OOM looks dubious.
Fix this by moving MEMCG_OOM raising to mem_cgroup_oom() after allocation order check, so that the event won't be raised for high order allocations. This change doesn't affect regular pages allocation and charging.
Link: http://lkml.kernel.org/r/20181004214050.7417-1-guro@fb.com Signed-off-by: Roman Gushchin guro@fb.com Acked-by: David Rientjes rientjes@google.com Acked-by: Michal Hocko mhocko@kernel.org Acked-by: Johannes Weiner hannes@cmpxchg.org Cc: Vladimir Davydov vdavydov.dev@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- Documentation/admin-guide/cgroup-v2.rst | 4 ++++ mm/memcontrol.c | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 184193bcb262..5d9939388a78 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1127,6 +1127,10 @@ PAGE_SIZE multiple when read back. disk readahead. For now OOM in memory cgroup kills tasks iff shortage has happened inside page fault.
+ This event is not raised if the OOM killer is not + considered as an option, e.g. for failed high-order + allocations. + oom_kill The number of processes belonging to this cgroup killed by any kind of OOM killer. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e79cb59552d9..07c7af6f5e59 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1669,6 +1669,8 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int if (order > PAGE_ALLOC_COSTLY_ORDER) return OOM_SKIPPED;
+ memcg_memory_event(memcg, MEMCG_OOM); + /* * We are in the middle of the charge context here, so we * don't want to block when potentially sitting on a callstack @@ -2250,8 +2252,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, if (fatal_signal_pending(current)) goto force;
- memcg_memory_event(mem_over_limit, MEMCG_OOM); - /* * keep retrying as long as the memcg oom killer is able to make * a forward progress or bypass the charge if the oom killer
From: Jann Horn jannh@google.com
[ Upstream commit f0ecf25a093fc0589f0a6bc4c1ea068bbb67d220 ]
Having two gigantic arrays that must manually be kept in sync, including ifdefs, isn't exactly robust. To make it easier to catch such issues in the future, add a BUILD_BUG_ON().
Link: http://lkml.kernel.org/r/20181001143138.95119-3-jannh@google.com Signed-off-by: Jann Horn jannh@google.com Reviewed-by: Kees Cook keescook@chromium.org Reviewed-by: Andrew Morton akpm@linux-foundation.org Acked-by: Roman Gushchin guro@fb.com Acked-by: Michal Hocko mhocko@suse.com Cc: Davidlohr Bueso dave@stgolabs.net Cc: Oleg Nesterov oleg@redhat.com Cc: Christoph Lameter clameter@sgi.com Cc: Kemi Wang kemi.wang@intel.com Cc: Andy Lutomirski luto@kernel.org Cc: Ingo Molnar mingo@kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/vmstat.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/mm/vmstat.c b/mm/vmstat.c index 7878da76abf2..b678c607e490 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1663,6 +1663,8 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos) stat_items_size += sizeof(struct vm_event_state); #endif
+ BUILD_BUG_ON(stat_items_size != + ARRAY_SIZE(vmstat_text) * sizeof(unsigned long)); v = kmalloc(stat_items_size, GFP_KERNEL); m->private = v; if (!v)
From: Andrea Arcangeli aarcange@redhat.com
[ Upstream commit 3b9aadf7278d16d7bed4d5d808501065f70898d8 ]
get_mempolicy(MPOL_F_NODE|MPOL_F_ADDR) called a get_user_pages that would not be waiting for userfaults before failing and it would hit on a SIGBUS instead. Using get_user_pages_locked/unlocked instead will allow get_mempolicy to allow userfaults to resolve the fault and fill the hole, before grabbing the node id of the page.
If the user calls get_mempolicy() with MPOL_F_ADDR | MPOL_F_NODE for an address inside an area managed by uffd and there is no page at that address, the page allocation from within get_mempolicy() will fail because get_user_pages() does not allow for page fault retry required for uffd; the user will get SIGBUS.
With this patch, the page fault will be resolved by the uffd and the get_mempolicy() will continue normally.
Background:
Via code review, previously the syscall would have returned -EFAULT (vm_fault_to_errno), now it will block and wait for an userfault (if it's waken before the fault is resolved it'll still -EFAULT).
This way get_mempolicy will give a chance to an "unaware" app to be compliant with userfaults.
The reason this visible change is that becoming "userfault compliant" cannot regress anything: all other syscalls including read(2)/write(2) had to become "userfault compliant" long time ago (that's one of the things userfaultfd can do that PROT_NONE and trapping segfaults can't).
So this is just one more syscall that become "userfault compliant" like all other major ones already were.
This has been happening on virtio-bridge dpdk process which just called get_mempolicy on the guest space post live migration, but before the memory had a chance to be migrated to destination.
I didn't run an strace to be able to show the -EFAULT going away, but I've the confirmation of the below debug aid information (only visible with CONFIG_DEBUG_VM=y) going away with the patch:
[20116.371461] FAULT_FLAG_ALLOW_RETRY missing 0 [20116.371464] CPU: 1 PID: 13381 Comm: vhost-events Not tainted 4.17.12-200.fc28.x86_64 #1 [20116.371465] Hardware name: LENOVO 20FAS2BN0A/20FAS2BN0A, BIOS N1CET54W (1.22 ) 02/10/2017 [20116.371466] Call Trace: [20116.371473] dump_stack+0x5c/0x80 [20116.371476] handle_userfault.cold.37+0x1b/0x22 [20116.371479] ? remove_wait_queue+0x20/0x60 [20116.371481] ? poll_freewait+0x45/0xa0 [20116.371483] ? do_sys_poll+0x31c/0x520 [20116.371485] ? radix_tree_lookup_slot+0x1e/0x50 [20116.371488] shmem_getpage_gfp+0xce7/0xe50 [20116.371491] ? page_add_file_rmap+0x1a/0x2c0 [20116.371493] shmem_fault+0x78/0x1e0 [20116.371495] ? filemap_map_pages+0x3a1/0x450 [20116.371498] __do_fault+0x1f/0xc0 [20116.371500] __handle_mm_fault+0xe2e/0x12f0 [20116.371502] handle_mm_fault+0xda/0x200 [20116.371504] __get_user_pages+0x238/0x790 [20116.371506] get_user_pages+0x3e/0x50 [20116.371510] kernel_get_mempolicy+0x40b/0x700 [20116.371512] ? vfs_write+0x170/0x1a0 [20116.371515] __x64_sys_get_mempolicy+0x21/0x30 [20116.371517] do_syscall_64+0x5b/0x160 [20116.371520] entry_SYSCALL_64_after_hwframe+0x44/0xa9
The above harmless debug message (not a kernel crash, just a dump_stack()) is shown with CONFIG_DEBUG_VM=y to more quickly identify and improve kernel spots that may have to become "userfaultfd compliant" like this one (without having to run an strace and search for syscall misbehavior). Spots like the above are more closer to a kernel bug for the non-cooperative usages that Mike focuses on, than for for dpdk qemu-cooperative usages that reproduced it, but it's still nicer to get this fixed for dpdk too.
The part of the patch that caused me to think is only the implementation issue of mpol_get, but it looks like it should work safe no matter the kind of mempolicy structure that is (the default static policy also starts at 1 so it'll go to 2 and back to 1 without crashing everything at 0).
[rppt@linux.vnet.ibm.com: changelog addition] http://lkml.kernel.org/r/20180904073718.GA26916@rapoport-lnx Link: http://lkml.kernel.org/r/20180831214848.23676-1-aarcange@redhat.com Signed-off-by: Andrea Arcangeli aarcange@redhat.com Reported-by: Maxime Coquelin maxime.coquelin@redhat.com Tested-by: Dr. David Alan Gilbert dgilbert@redhat.com Reviewed-by: Mike Rapoport rppt@linux.vnet.ibm.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/mempolicy.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index da858f794eb6..2e76a8f65e94 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -797,16 +797,19 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) } }
-static int lookup_node(unsigned long addr) +static int lookup_node(struct mm_struct *mm, unsigned long addr) { struct page *p; int err;
- err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL); + int locked = 1; + err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); if (err >= 0) { err = page_to_nid(p); put_page(p); } + if (locked) + up_read(&mm->mmap_sem); return err; }
@@ -817,7 +820,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, int err; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; - struct mempolicy *pol = current->mempolicy; + struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) @@ -857,7 +860,16 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
if (flags & MPOL_F_NODE) { if (flags & MPOL_F_ADDR) { - err = lookup_node(addr); + /* + * Take a refcount on the mpol, lookup_node() + * wil drop the mmap_sem, so after calling + * lookup_node() only "pol" remains valid, "vma" + * is stale. + */ + pol_refcount = pol; + vma = NULL; + mpol_get(pol); + err = lookup_node(mm, addr); if (err < 0) goto out; *policy = err; @@ -892,7 +904,9 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, out: mpol_cond_put(pol); if (vma) - up_read(¤t->mm->mmap_sem); + up_read(&mm->mmap_sem); + if (pol_refcount) + mpol_put(pol_refcount); return err; }
From: Roman Gushchin guro@fb.com
[ Upstream commit 68600f623d69da428c6163275f97ca126e1a8ec5 ]
I've noticed, that dying memory cgroups are often pinned in memory by a single pagecache page. Even under moderate memory pressure they sometimes stayed in such state for a long time. That looked strange.
My investigation showed that the problem is caused by applying the LRU pressure balancing math:
scan = div64_u64(scan * fraction[lru], denominator),
where
denominator = fraction[anon] + fraction[file] + 1.
Because fraction[lru] is always less than denominator, if the initial scan size is 1, the result is always 0.
This means the last page is not scanned and has no chances to be reclaimed.
Fix this by rounding up the result of the division.
In practice this change significantly improves the speed of dying cgroups reclaim.
[guro@fb.com: prevent double calculation of DIV64_U64_ROUND_UP() arguments] Link: http://lkml.kernel.org/r/20180829213311.GA13501@castle Link: http://lkml.kernel.org/r/20180827162621.30187-3-guro@fb.com Signed-off-by: Roman Gushchin guro@fb.com Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Johannes Weiner hannes@cmpxchg.org Cc: Michal Hocko mhocko@kernel.org Cc: Tejun Heo tj@kernel.org Cc: Rik van Riel riel@surriel.com Cc: Konstantin Khlebnikov koct9i@gmail.com Cc: Matthew Wilcox willy@infradead.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/math64.h | 3 +++ mm/vmscan.c | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/include/linux/math64.h b/include/linux/math64.h index 837f2f2d1d34..bb2c84afb80c 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -281,4 +281,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) } #endif /* mul_u64_u32_div */
+#define DIV64_U64_ROUND_UP(ll, d) \ + ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) + #endif /* _LINUX_MATH64_H */ diff --git a/mm/vmscan.c b/mm/vmscan.c index c5ef7240cbcb..961401c46334 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2456,9 +2456,11 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, /* * Scan types proportional to swappiness and * their relative recent reclaim efficiency. + * Make sure we don't miss the last page + * because of a round-off error. */ - scan = div64_u64(scan * fraction[file], - denominator); + scan = DIV64_U64_ROUND_UP(scan * fraction[file], + denominator); break; case SCAN_FILE: case SCAN_ANON:
From: Dmitry Vyukov dvyukov@google.com
[ Upstream commit 61448479a9f2c954cde0cfe778cb6bec5d0a748d ]
Slub does not call kmalloc_slab() for sizes > KMALLOC_MAX_CACHE_SIZE, instead it falls back to kmalloc_large().
For slab KMALLOC_MAX_CACHE_SIZE == KMALLOC_MAX_SIZE and it calls kmalloc_slab() for all allocations relying on NULL return value for over-sized allocations.
This inconsistency leads to unwanted warnings from kmalloc_slab() for over-sized allocations for slab. Returning NULL for failed allocations is the expected behavior.
Make slub and slab code consistent by checking size > KMALLOC_MAX_CACHE_SIZE in slab before calling kmalloc_slab().
While we are here also fix the check in kmalloc_slab(). We should check against KMALLOC_MAX_CACHE_SIZE rather than KMALLOC_MAX_SIZE. It all kinda worked because for slab the constants are the same, and slub always checks the size against KMALLOC_MAX_CACHE_SIZE before kmalloc_slab(). But if we get there with size > KMALLOC_MAX_CACHE_SIZE anyhow bad things will happen. For example, in case of a newly introduced bug in slub code.
Also move the check in kmalloc_slab() from function entry to the size > 192 case. This partially compensates for the additional check in slab code and makes slub code a bit faster (at least theoretically).
Also drop __GFP_NOWARN in the warning check. This warning means a bug in slab code itself, user-passed flags have nothing to do with it.
Nothing of this affects slob.
Link: http://lkml.kernel.org/r/20180927171502.226522-1-dvyukov@gmail.com Signed-off-by: Dmitry Vyukov dvyukov@google.com Reported-by: syzbot+87829a10073277282ad1@syzkaller.appspotmail.com Reported-by: syzbot+ef4e8fc3a06e9019bb40@syzkaller.appspotmail.com Reported-by: syzbot+6e438f4036df52cbb863@syzkaller.appspotmail.com Reported-by: syzbot+8574471d8734457d98aa@syzkaller.appspotmail.com Reported-by: syzbot+af1504df0807a083dbd9@syzkaller.appspotmail.com Acked-by: Christoph Lameter cl@linux.com Acked-by: Vlastimil Babka vbabka@suse.cz Cc: Pekka Enberg penberg@kernel.org Cc: David Rientjes rientjes@google.com Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/slab.c | 4 ++++ mm/slab_common.c | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c index aa76a70e087e..d73c7a4820a4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3675,6 +3675,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) struct kmem_cache *cachep; void *ret;
+ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) + return NULL; cachep = kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; @@ -3710,6 +3712,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, struct kmem_cache *cachep; void *ret;
+ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) + return NULL; cachep = kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; diff --git a/mm/slab_common.c b/mm/slab_common.c index fea3376f9816..3a7ac4f15194 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1027,18 +1027,18 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) { unsigned int index;
- if (unlikely(size > KMALLOC_MAX_SIZE)) { - WARN_ON_ONCE(!(flags & __GFP_NOWARN)); - return NULL; - } - if (size <= 192) { if (!size) return ZERO_SIZE_PTR;
index = size_index[size_index_elem(size)]; - } else + } else { + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { + WARN_ON(1); + return NULL; + } index = fls(size - 1); + }
#ifdef CONFIG_ZONE_DMA if (unlikely((flags & GFP_DMA)))
linux-stable-mirror@lists.linaro.org