From: Anton Protopopov aspsk@isovalent.com
commit b34ffb0c6d23583830f9327864b9c1f486003305 upstream.
The LRU and LRU_PERCPU maps allocate a new element on update before locking the target hash table bucket. Right after that the maps try to lock the bucket. If this fails, then maps return -EBUSY to the caller without releasing the allocated element. This makes the element untracked: it doesn't belong to either of free lists, and it doesn't belong to the hash table, so can't be re-used; this eventually leads to the permanent -ENOMEM on LRU map updates, which is unexpected. Fix this by returning the element to the local free list if bucket locking fails.
Fixes: 20b6cc34ea74 ("bpf: Avoid hashtab deadlock with map_locked") Signed-off-by: Anton Protopopov aspsk@isovalent.com Link: https://lore.kernel.org/r/20230522154558.2166815-1-aspsk@isovalent.com Signed-off-by: Martin KaFai Lau martin.lau@kernel.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- kernel/bpf/hashtab.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
--- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1197,7 +1197,7 @@ static long htab_lru_map_update_elem(str
ret = htab_lock_bucket(htab, b, hash, &flags); if (ret) - return ret; + goto err_lock_bucket;
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1218,6 +1218,7 @@ static long htab_lru_map_update_elem(str err: htab_unlock_bucket(htab, b, hash, flags);
+err_lock_bucket: if (ret) htab_lru_push_free(htab, l_new); else if (l_old) @@ -1320,7 +1321,7 @@ static long __htab_lru_percpu_map_update
ret = htab_lock_bucket(htab, b, hash, &flags); if (ret) - return ret; + goto err_lock_bucket;
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1343,6 +1344,7 @@ static long __htab_lru_percpu_map_update ret = 0; err: htab_unlock_bucket(htab, b, hash, flags); +err_lock_bucket: if (l_new) bpf_lru_push_free(&htab->lru, &l_new->lru_node); return ret;