This series backports bugfixes already merged in linux upstream which we found these issues in our commercial products, which are serious and should be fixed immediately.
Note that it also includes some xarray modification since upcoming patches heavily needs it, which can reduce more conflicts later.
All patches have been tested again as a whole.
Thanks, Gao Xiang
Gao Xiang (7): staging: erofs: remove the redundant d_rehash() for the root dentry staging: erofs: atomic_cond_read_relaxed on ref-locked workgroup staging: erofs: fix `erofs_workgroup_{try_to_freeze, unfreeze}' staging: erofs: add a full barrier in erofs_workgroup_unfreeze staging: erofs: {dir,inode,super}.c: rectify BUG_ONs staging: erofs: unzip_{pagevec.h,vle.c}: rectify BUG_ONs staging: erofs: unzip_vle_lz4.c,utils.c: rectify BUG_ONs
drivers/staging/erofs/dir.c | 7 +++- drivers/staging/erofs/inode.c | 10 ++++- drivers/staging/erofs/internal.h | 70 ++++++++++++++++++++++------------- drivers/staging/erofs/super.c | 19 +++------- drivers/staging/erofs/unzip_pagevec.h | 2 +- drivers/staging/erofs/unzip_vle.c | 35 +++++++----------- drivers/staging/erofs/unzip_vle_lz4.c | 2 +- drivers/staging/erofs/utils.c | 12 +++--- 8 files changed, 86 insertions(+), 71 deletions(-)
commit e9c892465583c8f42d61fafe30970d36580925df upstream.
There is actually no need at all to d_rehash() for the root dentry as Al pointed out, fix it.
Reported-by: Al Viro viro@ZenIV.linux.org.uk Cc: Al Viro viro@ZenIV.linux.org.uk Signed-off-by: Gao Xiang gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Gao Xiang gaoxiang25@huawei.com --- drivers/staging/erofs/super.c | 6 ------ 1 file changed, 6 deletions(-)
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c index f69e619807a1..1ab3553c839b 100644 --- a/drivers/staging/erofs/super.c +++ b/drivers/staging/erofs/super.c @@ -442,12 +442,6 @@ static int erofs_read_super(struct super_block *sb,
erofs_register_super(sb);
- /* - * We already have a positive dentry, which was instantiated - * by d_make_root. Just need to d_rehash it. - */ - d_rehash(sb->s_root); - if (!silent) infoln("mounted on %s with opts: %s.", dev_name, (char *)data);
commit df134b8d17b90c1e7720e318d36416b57424ff7a upstream.
It's better to use atomic_cond_read_relaxed, which is implemented in hardware instructions to monitor a variable changes currently for ARM64, instead of open-coded busy waiting.
Reviewed-by: Chao Yu yuchao0@huawei.com Signed-off-by: Gao Xiang gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Gao Xiang gaoxiang25@huawei.com --- drivers/staging/erofs/internal.h | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h index 3ac4599bbe01..f933ab602c37 100644 --- a/drivers/staging/erofs/internal.h +++ b/drivers/staging/erofs/internal.h @@ -221,23 +221,29 @@ static inline void erofs_workgroup_unfreeze( preempt_enable(); }
+#if defined(CONFIG_SMP) +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + return atomic_cond_read_relaxed(&grp->refcount, + VAL != EROFS_LOCKED_MAGIC); +} +#else +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + int v = atomic_read(&grp->refcount); + + /* workgroup is never freezed on uniprocessor systems */ + DBG_BUGON(v == EROFS_LOCKED_MAGIC); + return v; +} +#endif + static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt) { - const int locked = (int)EROFS_LOCKED_MAGIC; int o;
repeat: - o = atomic_read(&grp->refcount); - - /* spin if it is temporarily locked at the reclaim path */ - if (unlikely(o == locked)) { -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - do - cpu_relax(); - while (atomic_read(&grp->refcount) == locked); -#endif - goto repeat; - } + o = erofs_wait_on_workgroup_freezed(grp);
if (unlikely(o <= 0)) return -1;
commit 73f5c66df3e26ab750cefcb9a3e08c71c9f79cad upstream.
There are two minor issues in the current freeze interface:
1) Freeze interfaces have not related with CONFIG_DEBUG_SPINLOCK, therefore fix the incorrect conditions;
2) For SMP platforms, it should also disable preemption before doing atomic_cmpxchg in case that some high priority tasks preempt between atomic_cmpxchg and disable_preempt, then spin on the locked refcount later.
Reviewed-by: Chao Yu yuchao0@huawei.com Signed-off-by: Gao Xiang gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Gao Xiang gaoxiang25@huawei.com --- drivers/staging/erofs/internal.h | 41 ++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-)
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h index f933ab602c37..399a7003e783 100644 --- a/drivers/staging/erofs/internal.h +++ b/drivers/staging/erofs/internal.h @@ -194,40 +194,49 @@ struct erofs_workgroup {
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
-static inline bool erofs_workgroup_try_to_freeze( - struct erofs_workgroup *grp, int v) +#if defined(CONFIG_SMP) +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) { -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - if (v != atomic_cmpxchg(&grp->refcount, - v, EROFS_LOCKED_MAGIC)) - return false; preempt_disable(); -#else - preempt_disable(); - if (atomic_read(&grp->refcount) != v) { + if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { preempt_enable(); return false; } -#endif return true; }
-static inline void erofs_workgroup_unfreeze( - struct erofs_workgroup *grp, int v) +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) { -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - atomic_set(&grp->refcount, v); -#endif + atomic_set(&grp->refcount, orig_val); preempt_enable(); }
-#if defined(CONFIG_SMP) static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) { return atomic_cond_read_relaxed(&grp->refcount, VAL != EROFS_LOCKED_MAGIC); } #else +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) +{ + preempt_disable(); + /* no need to spin on UP platforms, let's just disable preemption. */ + if (val != atomic_read(&grp->refcount)) { + preempt_enable(); + return false; + } + return true; +} + +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) +{ + preempt_enable(); +} + static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) { int v = atomic_read(&grp->refcount);
commit 948bbdb1818b7ad6e539dad4fbd2dd4650793ea9 upstream.
Just like other generic locks, insert a full barrier in case of memory reorder.
Reviewed-by: Chao Yu yuchao0@huawei.com Signed-off-by: Gao Xiang gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Gao Xiang gaoxiang25@huawei.com --- drivers/staging/erofs/internal.h | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h index 399a7003e783..892944355867 100644 --- a/drivers/staging/erofs/internal.h +++ b/drivers/staging/erofs/internal.h @@ -209,6 +209,11 @@ static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, int orig_val) { + /* + * other observers should notice all modifications + * in the freezing period. + */ + smp_mb(); atomic_set(&grp->refcount, orig_val); preempt_enable(); }
commit 8b987bca2d09649683cbe496419a011df8c08493 upstream.
remove all redundant BUG_ONs, and turn the rest useful usages to DBG_BUGONs.
Signed-off-by: Gao Xiang gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Gao Xiang gaoxiang25@huawei.com --- drivers/staging/erofs/dir.c | 7 +++++-- drivers/staging/erofs/inode.c | 10 ++++++++-- drivers/staging/erofs/super.c | 13 ++++++------- 3 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c index d1cb0d78ab84..e44ca93dcdc6 100644 --- a/drivers/staging/erofs/dir.c +++ b/drivers/staging/erofs/dir.c @@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx, strnlen(de_name, maxsize - nameoff) : le16_to_cpu(de[1].nameoff) - nameoff;
- /* the corrupted directory found */ - BUG_ON(de_namelen < 0); + /* a corrupted entry is found */ + if (unlikely(de_namelen < 0)) { + DBG_BUGON(1); + return -EIO; + }
#ifdef CONFIG_EROFS_FS_DEBUG dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen); diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c index 04c61a9d7b76..d7fbf5f4600f 100644 --- a/drivers/staging/erofs/inode.c +++ b/drivers/staging/erofs/inode.c @@ -133,7 +133,13 @@ static int fill_inline_data(struct inode *inode, void *data, return -ENOMEM;
m_pofs += vi->inode_isize + vi->xattr_isize; - BUG_ON(m_pofs + inode->i_size > PAGE_SIZE); + + /* inline symlink data shouldn't across page boundary as well */ + if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) { + DBG_BUGON(1); + kfree(lnk); + return -EIO; + }
/* get in-page inline data */ memcpy(lnk, data + m_pofs, inode->i_size); @@ -171,7 +177,7 @@ static int fill_inode(struct inode *inode, int isdir) return PTR_ERR(page); }
- BUG_ON(!PageUptodate(page)); + DBG_BUGON(!PageUptodate(page)); data = page_address(page);
err = read_inode(inode, data + ofs); diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c index 1ab3553c839b..1c2eb69682ef 100644 --- a/drivers/staging/erofs/super.c +++ b/drivers/staging/erofs/super.c @@ -40,7 +40,6 @@ static int __init erofs_init_inode_cache(void)
static void erofs_exit_inode_cache(void) { - BUG_ON(erofs_inode_cachep == NULL); kmem_cache_destroy(erofs_inode_cachep); }
@@ -303,8 +302,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) int ret = 1; /* 0 - busy */ struct address_space *const mapping = page->mapping;
- BUG_ON(!PageLocked(page)); - BUG_ON(mapping->a_ops != &managed_cache_aops); + DBG_BUGON(!PageLocked(page)); + DBG_BUGON(mapping->a_ops != &managed_cache_aops);
if (PagePrivate(page)) ret = erofs_try_to_free_cached_page(mapping, page); @@ -317,10 +316,10 @@ static void managed_cache_invalidatepage(struct page *page, { const unsigned int stop = length + offset;
- BUG_ON(!PageLocked(page)); + DBG_BUGON(!PageLocked(page));
- /* Check for overflow */ - BUG_ON(stop > PAGE_SIZE || stop < length); + /* Check for potential overflow in debug mode */ + DBG_BUGON(stop > PAGE_SIZE || stop < length);
if (offset == 0 && stop == PAGE_SIZE) while (!managed_cache_releasepage(page, GFP_NOFS)) @@ -649,7 +648,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data) unsigned int org_inject_rate = erofs_get_fault_rate(sbi); int err;
- BUG_ON(!sb_rdonly(sb)); + DBG_BUGON(!sb_rdonly(sb)); err = parse_options(sb, data); if (err) goto out;
commit 70b17991d89554cdd16f3e4fb0179bcc03c808d9 upstream.
remove all redundant BUG_ONs, and turn the rest useful usages to DBG_BUGONs.
Signed-off-by: Gao Xiang gaoxiang25@huawei.com Reviewed-by: Chao Yu yuchao0@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
Conflicts: drivers/staging/erofs/unzip_vle.c Signed-off-by: Gao Xiang gaoxiang25@huawei.com --- drivers/staging/erofs/unzip_pagevec.h | 2 +- drivers/staging/erofs/unzip_vle.c | 35 ++++++++++++++--------------------- 2 files changed, 15 insertions(+), 22 deletions(-)
diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h index 0956615b86f7..23856ba2742d 100644 --- a/drivers/staging/erofs/unzip_pagevec.h +++ b/drivers/staging/erofs/unzip_pagevec.h @@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor, erofs_vtptr_t t;
if (unlikely(ctor->index >= ctor->nr)) { - BUG_ON(ctor->next == NULL); + DBG_BUGON(!ctor->next); z_erofs_pagevec_ctor_pagedown(ctor, true); }
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c index 45e88bada907..1c4b3e0343f5 100644 --- a/drivers/staging/erofs/unzip_vle.c +++ b/drivers/staging/erofs/unzip_vle.c @@ -20,9 +20,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
void z_erofs_exit_zip_subsystem(void) { - BUG_ON(z_erofs_workqueue == NULL); - BUG_ON(z_erofs_workgroup_cachep == NULL); - destroy_workqueue(z_erofs_workqueue); kmem_cache_destroy(z_erofs_workgroup_cachep); } @@ -366,7 +363,10 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f, struct z_erofs_vle_work *work;
/* if multiref is disabled, grp should never be nullptr */ - BUG_ON(grp != NULL); + if (unlikely(grp)) { + DBG_BUGON(1); + return ERR_PTR(-EINVAL); + }
/* no available workgroup, let's allocate one */ grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS); @@ -745,7 +745,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio) bool cachemngd = false;
DBG_BUGON(PageUptodate(page)); - BUG_ON(page->mapping == NULL); + DBG_BUGON(!page->mapping);
#ifdef EROFS_FS_HAS_MANAGED_CACHE if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) { @@ -803,7 +803,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
might_sleep(); work = z_erofs_vle_grab_primary_work(grp); - BUG_ON(!READ_ONCE(work->nr_pages)); + DBG_BUGON(!READ_ONCE(work->nr_pages));
mutex_lock(&work->lock); nr_pages = work->nr_pages; @@ -852,8 +852,8 @@ static int z_erofs_vle_unzip(struct super_block *sb, else pagenr = z_erofs_onlinepage_index(page);
- BUG_ON(pagenr >= nr_pages); - BUG_ON(pages[pagenr] != NULL); + DBG_BUGON(pagenr >= nr_pages); + DBG_BUGON(pages[pagenr]);
pages[pagenr] = page; } @@ -876,9 +876,8 @@ static int z_erofs_vle_unzip(struct super_block *sb, if (z_erofs_is_stagingpage(page)) continue; #ifdef EROFS_FS_HAS_MANAGED_CACHE - else if (page->mapping == mngda) { - BUG_ON(PageLocked(page)); - BUG_ON(!PageUptodate(page)); + if (page->mapping == mngda) { + DBG_BUGON(!PageUptodate(page)); continue; } #endif @@ -886,8 +885,8 @@ static int z_erofs_vle_unzip(struct super_block *sb, /* only non-head page could be reused as a compressed page */ pagenr = z_erofs_onlinepage_index(page);
- BUG_ON(pagenr >= nr_pages); - BUG_ON(pages[pagenr] != NULL); + DBG_BUGON(pagenr >= nr_pages); + DBG_BUGON(pages[pagenr]); ++sparsemem_pages; pages[pagenr] = page;
@@ -897,9 +896,6 @@ static int z_erofs_vle_unzip(struct super_block *sb, llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) { - /* FIXME! this should be fixed in the future */ - BUG_ON(grp->llen != llen); - err = z_erofs_vle_plain_copy(compressed_pages, clusterpages, pages, nr_pages, work->pageofs); goto out; @@ -914,10 +910,8 @@ static int z_erofs_vle_unzip(struct super_block *sb, if (err != -ENOTSUPP) goto out_percpu;
- if (sparsemem_pages >= nr_pages) { - BUG_ON(sparsemem_pages > nr_pages); + if (sparsemem_pages >= nr_pages) goto skip_allocpage; - }
for (i = 0; i < nr_pages; ++i) { if (pages[i] != NULL) @@ -1010,7 +1004,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work) struct z_erofs_vle_unzip_io_sb, io.u.work); LIST_HEAD(page_pool);
- BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED); + DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED); z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
put_pages_list(&page_pool); @@ -1344,7 +1338,6 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, continue; }
- BUG_ON(PagePrivate(page)); set_page_private(page, (unsigned long)head); head = page; }
commit b8e076a6ef253e763bfdb81e5c72bcc828b0fbeb upstream.
remove all redundant BUG_ONs, and turn the rest useful usages to DBG_BUGONs.
Signed-off-by: Gao Xiang gaoxiang25@huawei.com Reviewed-by: Chao Yu yuchao0@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Gao Xiang gaoxiang25@huawei.com --- drivers/staging/erofs/unzip_vle_lz4.c | 2 +- drivers/staging/erofs/utils.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c index 1a428658cbea..16ac335ee59f 100644 --- a/drivers/staging/erofs/unzip_vle_lz4.c +++ b/drivers/staging/erofs/unzip_vle_lz4.c @@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages, if (compressed_pages[j] != page) continue;
- BUG_ON(mirrored[j]); + DBG_BUGON(mirrored[j]); memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE); mirrored[j] = true; break; diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c index d2e3ace91046..b535898ca753 100644 --- a/drivers/staging/erofs/utils.c +++ b/drivers/staging/erofs/utils.c @@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp) list_del(&page->lru); } else { page = alloc_pages(gfp | __GFP_NOFAIL, 0); - - BUG_ON(page == NULL); - BUG_ON(page->mapping != NULL); } return page; } @@ -58,7 +55,7 @@ struct erofs_workgroup *erofs_find_workgroup( /* decrease refcount added by erofs_workgroup_put */ if (unlikely(oldcount == 1)) atomic_long_dec(&erofs_global_shrink_cnt); - BUG_ON(index != grp->index); + DBG_BUGON(index != grp->index); } rcu_read_unlock(); return grp; @@ -71,8 +68,11 @@ int erofs_register_workgroup(struct super_block *sb, struct erofs_sb_info *sbi; int err;
- /* grp->refcount should not < 1 */ - BUG_ON(!atomic_read(&grp->refcount)); + /* grp shouldn't be broken or used before */ + if (unlikely(atomic_read(&grp->refcount) != 1)) { + DBG_BUGON(1); + return -EINVAL; + }
err = radix_tree_preload(GFP_NOFS); if (err)
On Fri, Feb 22, 2019 at 11:46:30PM +0800, Gao Xiang wrote:
This series backports bugfixes already merged in linux upstream which we found these issues in our commercial products, which are serious and should be fixed immediately.
Note that it also includes some xarray modification since upcoming patches heavily needs it, which can reduce more conflicts later.
All patches have been tested again as a whole.
All now queued up, thanks.
greg k-h
linux-stable-mirror@lists.linaro.org