Do not reclaim entries that are currently used by somebody from a shrinker. Firstly, these entries are likely useful. Secondly, we will need to keep such entries to protect pending increment of xattr block refcount.
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz --- fs/mbcache.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/fs/mbcache.c b/fs/mbcache.c index 97c54d3a2227..cfc28129fb6f 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -288,7 +288,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list); - if (entry->e_referenced) { + if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) { entry->e_referenced = 0; list_move_tail(&entry->e_list, &cache->c_list); continue; @@ -302,6 +302,14 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, spin_unlock(&cache->c_list_lock); head = mb_cache_entry_head(cache, entry->e_key); hlist_bl_lock(head); + /* Now a reliable check if the entry didn't get used... */ + if (atomic_read(&entry->e_refcnt) > 2) { + hlist_bl_unlock(head); + spin_lock(&cache->c_list_lock); + list_add_tail(&entry->e_list, &cache->c_list); + cache->c_entry_count++; + continue; + } if (!hlist_bl_unhashed(&entry->e_hash_list)) { hlist_bl_del_init(&entry->e_hash_list); atomic_dec(&entry->e_refcnt);
On 22/07/12 12:54PM, Jan Kara wrote:
Do not reclaim entries that are currently used by somebody from a shrinker. Firstly, these entries are likely useful. Secondly, we will need to keep such entries to protect pending increment of xattr block refcount.
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz
fs/mbcache.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/fs/mbcache.c b/fs/mbcache.c index 97c54d3a2227..cfc28129fb6f 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -288,7 +288,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list);
if (entry->e_referenced) {
if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) { entry->e_referenced = 0; list_move_tail(&entry->e_list, &cache->c_list); continue;
@@ -302,6 +302,14 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, spin_unlock(&cache->c_list_lock); head = mb_cache_entry_head(cache, entry->e_key); hlist_bl_lock(head);
/* Now a reliable check if the entry didn't get used... */
if (atomic_read(&entry->e_refcnt) > 2) {
On taking a look at this patchset again. I think if we move this "if" condition of checking refcnt to above i.e. before we delete the entry from c_list. Then we can avoid => removing of the entry -> checking it's refcnt under lock -> adding it back if the refcnt is elevated.
Thoughts?
-ritesh
hlist_bl_unlock(head);
spin_lock(&cache->c_list_lock);
list_add_tail(&entry->e_list, &cache->c_list);
cache->c_entry_count++;
continue;
if (!hlist_bl_unhashed(&entry->e_hash_list)) { hlist_bl_del_init(&entry->e_hash_list); atomic_dec(&entry->e_refcnt);}
-- 2.35.3
On Thu 14-07-22 17:17:02, Ritesh Harjani wrote:
On 22/07/12 12:54PM, Jan Kara wrote:
Do not reclaim entries that are currently used by somebody from a shrinker. Firstly, these entries are likely useful. Secondly, we will need to keep such entries to protect pending increment of xattr block refcount.
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz
fs/mbcache.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/fs/mbcache.c b/fs/mbcache.c index 97c54d3a2227..cfc28129fb6f 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -288,7 +288,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list);
if (entry->e_referenced) {
if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) { entry->e_referenced = 0; list_move_tail(&entry->e_list, &cache->c_list); continue;
@@ -302,6 +302,14 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, spin_unlock(&cache->c_list_lock); head = mb_cache_entry_head(cache, entry->e_key); hlist_bl_lock(head);
/* Now a reliable check if the entry didn't get used... */
if (atomic_read(&entry->e_refcnt) > 2) {
On taking a look at this patchset again. I think if we move this "if" condition of checking refcnt to above i.e. before we delete the entry from c_list. Then we can avoid => removing of the entry -> checking it's refcnt under lock -> adding it back if the refcnt is elevated.
Thoughts?
Well, but synchronization would get more complicated because we don't want to acquire hlist_bl_lock() under c_list_lock (technically we could at this point in the series but it would make life harder for the last patch in the series). And we need c_list_lock to remove entry from the LRU list. It could be all done but I don't think what you suggest is really that simpler and this code will go away later in the patchset anyway...
Honza
On 22/07/14 04:36PM, Jan Kara wrote:
On Thu 14-07-22 17:17:02, Ritesh Harjani wrote:
On 22/07/12 12:54PM, Jan Kara wrote:
Do not reclaim entries that are currently used by somebody from a shrinker. Firstly, these entries are likely useful. Secondly, we will need to keep such entries to protect pending increment of xattr block refcount.
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz
fs/mbcache.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/fs/mbcache.c b/fs/mbcache.c index 97c54d3a2227..cfc28129fb6f 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -288,7 +288,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list);
if (entry->e_referenced) {
if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) { entry->e_referenced = 0; list_move_tail(&entry->e_list, &cache->c_list); continue;
@@ -302,6 +302,14 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, spin_unlock(&cache->c_list_lock); head = mb_cache_entry_head(cache, entry->e_key); hlist_bl_lock(head);
/* Now a reliable check if the entry didn't get used... */
if (atomic_read(&entry->e_refcnt) > 2) {
On taking a look at this patchset again. I think if we move this "if" condition of checking refcnt to above i.e. before we delete the entry from c_list. Then we can avoid => removing of the entry -> checking it's refcnt under lock -> adding it back if the refcnt is elevated.
Thoughts?
Well, but synchronization would get more complicated because we don't want to acquire hlist_bl_lock() under c_list_lock (technically we could at this
Ok, yes. I tried implementing it and it becomes lock()/unlock() mess.
point in the series but it would make life harder for the last patch in the series). And we need c_list_lock to remove entry from the LRU list. It could be all done but I don't think what you suggest is really that simpler and this code will go away later in the patchset anyway...
I agree. Thanks for re-checking it.
-ritesh
On Tue, 12 Jul 2022 12:54:20 +0200, Jan Kara wrote:
Do not reclaim entries that are currently used by somebody from a shrinker. Firstly, these entries are likely useful. Secondly, we will need to keep such entries to protect pending increment of xattr block refcount.
Applied, thanks! (Some slight adjustments were needed to resolve a merge conflict.)
[01/10] mbcache: Don't reclaim used entries commit: ee595bcf21a86af4cff673000e2728d61c7c0e7b [02/10] mbcache: Add functions to delete entry if unused commit: ad3923aa44185f5f65e17764fe5c30501c6dfd22 [03/10] ext4: Remove EA inode entry from mbcache on inode eviction commit: 428dc374a6cb6c0cbbf6fe8984b667ef78dc7d75 [04/10] ext4: Unindent codeblock in ext4_xattr_block_set() commit: d52086dcf26a6284b08b5544210a7475b4837d52 [05/10] ext4: Fix race when reusing xattr blocks commit: 132991ed28822cfb4be41ac72195f00fc0baf3c8 [06/10] ext2: Factor our freeing of xattr block reference commit: c30e78a5f165244985aa346bdd460d459094470e [07/10] ext2: Unindent codeblock in ext2_xattr_set() commit: 0e85fb030d13e427deca44a95aabb2475614f8d2 [08/10] ext2: Avoid deleting xattr block that is being reused commit: 44ce98e77ab4583b17ff4f501c2076eec3b759d7 [09/10] mbcache: Remove mb_cache_entry_delete() commit: c3671ffa0919f2d433576c99c4e211cd367afda0 [10/10] mbcache: Automatically delete entries from cache on freeing commit: b51539a7d04fb7d05b28ab9387364ccde88b6b6d
Best regards,
linux-stable-mirror@lists.linaro.org