kernel/bounds.c is recompiled on every build, and shows the following
warning when compiling with W=1:
CC kernel/bounds.s
linux/kernel/bounds.c:16:6: warning: no previous prototype for ‘foo’ [-Wmissing-prototypes]
void foo(void)
^~~
Provide a prototype to satisfy the compiler.
Signed-off-by: Kieran Bingham <kieran.bingham+renesas(a)ideasonboard.com>
Cc: stable(a)vger.kernel.org
---
I compile all of my incremental builds with W=1, which allows me to know
instantly if I add a new compiler warning in code I generate.
This warning always comes up and seems trivial to clean up.
---
kernel/bounds.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/kernel/bounds.c b/kernel/bounds.c
index c373e887c066..60136d937800 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -13,6 +13,8 @@
#include <linux/log2.h>
#include <linux/spinlock_types.h>
+void foo(void);
+
void foo(void)
{
/* The enum constants to put into include/generated/bounds.h */
--
2.17.1
In the presence of multi-order entries the typical
pagevec_lookup_entries() pattern may loop forever:
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
indices)) {
...
for (i = 0; i < pagevec_count(&pvec); i++) {
index = indices[i];
...
}
index++; /* BUG */
}
The loop updates 'index' for each index found and then increments to the
next possible page to continue the lookup. However, if the last entry in
the pagevec is multi-order then the next possible page index is more
than 1 page away. Fix this locally for the filesystem-dax case by
checking for dax-multi-order entries. Going forward new users of
multi-order entries need to be similarly careful, or we need a generic
way to report the page increment in the radix iterator.
Fixes: 5fac7408d828 ("mm, fs, dax: handle layout changes to pinned dax...")
Cc: <stable(a)vger.kernel.org>
Cc: Jan Kara <jack(a)suse.cz>
Cc: Ross Zwisler <zwisler(a)kernel.org>
Cc: Matthew Wilcox <willy(a)infradead.org>
Signed-off-by: Dan Williams <dan.j.williams(a)intel.com>
---
Changes in v2:
* Only update nr_pages if the last entry in the pagevec is multi-order.
fs/dax.c | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c
index 4becbf168b7f..0fb270f0a0ef 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -666,6 +666,8 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
indices)) {
+ pgoff_t nr_pages = 1;
+
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *pvec_ent = pvec.pages[i];
void *entry;
@@ -680,8 +682,15 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
xa_lock_irq(&mapping->i_pages);
entry = get_unlocked_mapping_entry(mapping, index, NULL);
- if (entry)
+ if (entry) {
page = dax_busy_page(entry);
+ /*
+ * Account for multi-order entries at
+ * the end of the pagevec.
+ */
+ if (i + 1 >= pagevec_count(&pvec))
+ nr_pages = 1UL << dax_radix_order(entry);
+ }
put_unlocked_mapping_entry(mapping, index, entry);
xa_unlock_irq(&mapping->i_pages);
if (page)
@@ -696,7 +705,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
*/
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
- index++;
+ index += nr_pages;
if (page)
break;
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b45ba4a51cde29b2939365ef0c07ad34c8321789 Mon Sep 17 00:00:00 2001
From: Christophe Leroy <christophe.leroy(a)c-s.fr>
Date: Mon, 1 Oct 2018 12:21:10 +0000
Subject: [PATCH] powerpc/lib: fix book3s/32 boot failure due to code patching
Commit 51c3c62b58b3 ("powerpc: Avoid code patching freed init
sections") accesses 'init_mem_is_free' flag too early, before the
kernel is relocated. This provokes early boot failure (before the
console is active).
As it is not necessary to do this verification that early, this
patch moves the test into patch_instruction() instead of
__patch_instruction().
This modification also has the advantage of avoiding unnecessary
remappings.
Fixes: 51c3c62b58b3 ("powerpc: Avoid code patching freed init sections")
Cc: stable(a)vger.kernel.org # 4.13+
Signed-off-by: Christophe Leroy <christophe.leroy(a)c-s.fr>
Signed-off-by: Michael Ellerman <mpe(a)ellerman.id.au>
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 6ae2777c220d..5ffee298745f 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -28,12 +28,6 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
{
int err;
- /* Make sure we aren't patching a freed init section */
- if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
- pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
- return 0;
- }
-
__put_user_size(instr, patch_addr, 4, err);
if (err)
return err;
@@ -148,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
unsigned int *patch_addr = NULL;
@@ -188,12 +182,22 @@ out:
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+ /* Make sure we aren't patching a freed init section */
+ if (init_mem_is_free && init_section_contains(addr, 4)) {
+ pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+ return 0;
+ }
+ return do_patch_instruction(addr, instr);
+}
NOKPROBE_SYMBOL(patch_instruction);
int patch_branch(unsigned int *addr, unsigned long target, int flags)
From: Jann Horn <jannh(a)google.com>
[ upstream commit b799207e1e1816b09e7a5920fbb2d5fcf6edd681 ]
When I wrote commit 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification"), I
assumed that, in order to emulate 64-bit arithmetic with 32-bit logic, it
is sufficient to just truncate the output to 32 bits; and so I just moved
the register size coercion that used to be at the start of the function to
the end of the function.
That assumption is true for almost every op, but not for 32-bit right
shifts, because those can propagate information towards the least
significant bit. Fix it by always truncating inputs for 32-bit ops to 32
bits.
Also get rid of the coerce_reg_to_size() after the ALU op, since that has
no effect.
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification")
Acked-by: Daniel Borkmann <daniel(a)iogearbox.net>
Signed-off-by: Jann Horn <jannh(a)google.com>
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
---
kernel/bpf/verifier.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index adbe21c..82e8ede 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2865,6 +2865,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+ if (insn_bitness == 32) {
+ /* Relevant for 32-bit RSH: Information can propagate towards
+ * LSB, so it isn't sufficient to only truncate the output to
+ * 32 bits.
+ */
+ coerce_reg_to_size(dst_reg, 4);
+ coerce_reg_to_size(&src_reg, 4);
+ }
+
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
@@ -3100,7 +3109,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
- coerce_reg_to_size(&src_reg, 4);
}
__reg_deduce_bounds(dst_reg);
--
2.9.5
> Miklos,
>
> Seeing that it wasn't fixed in 4.18..
>
> > I've nothing against applying "new primitive: discard_new_inode() now
> > + this patch, but if it is deemed too risky at this point, we could
> > just revert the buggy commit 80ea09a002bf ("vfs: factor out
> > inode_insert5()") and its dependencies.
> >
>
> Should we propose for stable the upstream commits:
> e950564b97fd vfs: don't evict uninitialized inode
> c2b6d621c4ff new primitive: discard_new_inode()
>
> Or should we go with the independent v1 patch:
> https://patchwork.kernel.org/patch/10511969/
>
Greg,
To fix a 4.18 overlayfs regression please apply the following
3 upstream commits (in apply order):
c2b6d621c4ff new primitive: discard_new_inode()
e950564b97fd vfs: don't evict uninitialized inode
6faf05c2b2b4 ovl: set I_CREATING on inode being created
Thanks,
Amir.
From: Tang Junhui <tang.junhui.linux(a)gmail.com>
refill->end record the last key of writeback, for example, at the first
time, keys (1,128K) to (1,1024K) are flush to the backend device, but
the end key (1,1024K) is not included, since the bellow code:
if (bkey_cmp(k, refill->end) >= 0) {
ret = MAP_DONE;
goto out;
}
And in the next time when we refill writeback keybuf again, we searched
key start from (1,1024K), and got a key bigger than it, so the key
(1,1024K) missed.
This patch modify the above code, and let the end key to be included to
the writeback key buffer.
Signed-off-by: Tang Junhui <tang.junhui.linux(a)gmail.com>
Cc: stable(a)vger.kernel.org
Signed-off-by: Coly Li <colyli(a)suse.de>
---
drivers/md/bcache/btree.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index e7d4817681f2..3f4211b5cd33 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2434,7 +2434,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
struct keybuf *buf = refill->buf;
int ret = MAP_CONTINUE;
- if (bkey_cmp(k, refill->end) >= 0) {
+ if (bkey_cmp(k, refill->end) > 0) {
ret = MAP_DONE;
goto out;
}
--
2.19.0
From: Tang Junhui <tang.junhui.linux(a)gmail.com>
When bcache device is clean, dirty keys may still exist after
journal replay, so we need to count these dirty keys even
device in clean status, otherwise after writeback, the amount
of dirty data would be incorrect.
Signed-off-by: Tang Junhui <tang.junhui.linux(a)gmail.com>
Cc: stable(a)vger.kernel.org
Signed-off-by: Coly Li <colyli(a)suse.de>
---
drivers/md/bcache/super.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a99af19d2f91..4989c7d4d4d0 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1152,11 +1152,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
bch_writeback_queue(dc);
}
+ bch_sectors_dirty_init(&dc->disk);
+
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
--
2.19.0