Introduce BPF_F_CPU and BPF_F_ALL_CPUS flags and check them for following APIs:
* 'map_lookup_elem()' * 'map_update_elem()' * 'generic_map_lookup_batch()' * 'generic_map_update_batch()'
And, get the correct value size for these APIs.
Acked-by: Andrii Nakryiko andrii@kernel.org Signed-off-by: Leon Hwang leon.hwang@linux.dev --- v10 -> v11: - Use '(BPF_F_ALL_CPUS << 1) - 1' as allowed_flags in map_update_elem(). - Add BPF_EXIST to allowed_flags in generic_map_update_batch(). --- include/linux/bpf.h | 23 +++++++++++++++++++++- include/uapi/linux/bpf.h | 2 ++ kernel/bpf/syscall.c | 36 ++++++++++++++++++++-------------- tools/include/uapi/linux/bpf.h | 2 ++ 4 files changed, 47 insertions(+), 16 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6498be4c44f8..d84af3719b59 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -3829,14 +3829,35 @@ bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image) } #endif
+static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type) +{ + return false; +} + static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags) { - if (flags & ~allowed_flags) + u32 cpu; + + if ((u32)flags & ~allowed_flags) return -EINVAL;
if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)) return -EINVAL;
+ if (!(flags & BPF_F_CPU) && flags >> 32) + return -EINVAL; + + if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) { + if (!bpf_map_supports_cpu_flags(map->map_type)) + return -EINVAL; + if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS)) + return -EINVAL; + + cpu = flags >> 32; + if ((flags & BPF_F_CPU) && cpu >= num_possible_cpus()) + return -ERANGE; + } + return 0; }
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f5713f59ac10..8b6279ca6e66 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1373,6 +1373,8 @@ enum { BPF_NOEXIST = 1, /* create new element if it didn't exist */ BPF_EXIST = 2, /* update existing element */ BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ + BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */ + BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */ };
/* flags for BPF_MAP_CREATE command */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cef8963d69f9..3c3e3b4095b9 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -133,12 +133,14 @@ bool bpf_map_write_active(const struct bpf_map *map) return atomic64_read(&map->writecnt) != 0; }
-static u32 bpf_map_value_size(const struct bpf_map *map) -{ - if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || - map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || - map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || - map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) +static u32 bpf_map_value_size(const struct bpf_map *map, u64 flags) +{ + if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) + return map->value_size; + else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) return round_up(map->value_size, 8) * num_possible_cpus(); else if (IS_FD_MAP(map)) return sizeof(u32); @@ -1732,7 +1734,7 @@ static int map_lookup_elem(union bpf_attr *attr) if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) return -EPERM;
- err = bpf_map_check_op_flags(map, attr->flags, BPF_F_LOCK); + err = bpf_map_check_op_flags(map, attr->flags, BPF_F_LOCK | BPF_F_CPU); if (err) return err;
@@ -1740,7 +1742,7 @@ static int map_lookup_elem(union bpf_attr *attr) if (IS_ERR(key)) return PTR_ERR(key);
- value_size = bpf_map_value_size(map); + value_size = bpf_map_value_size(map, attr->flags);
err = -ENOMEM; value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); @@ -1781,6 +1783,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); struct bpf_map *map; void *key, *value; + u64 allowed_flags; u32 value_size; int err;
@@ -1797,7 +1800,8 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) goto err_put; }
- err = bpf_map_check_op_flags(map, attr->flags, ~0); + allowed_flags = (BPF_F_ALL_CPUS << 1) - 1; + err = bpf_map_check_op_flags(map, attr->flags, allowed_flags); if (err) goto err_put;
@@ -1807,7 +1811,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) goto err_put; }
- value_size = bpf_map_value_size(map); + value_size = bpf_map_value_size(map, attr->flags); value = kvmemdup_bpfptr(uvalue, value_size); if (IS_ERR(value)) { err = PTR_ERR(value); @@ -2001,13 +2005,15 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file, void __user *keys = u64_to_user_ptr(attr->batch.keys); u32 value_size, cp, max_count; void *key, *value; + u64 allowed_flags; int err = 0;
- err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK); + allowed_flags = BPF_EXIST | BPF_F_LOCK | BPF_F_CPU | BPF_F_ALL_CPUS; + err = bpf_map_check_op_flags(map, attr->batch.elem_flags, allowed_flags); if (err) return err;
- value_size = bpf_map_value_size(map); + value_size = bpf_map_value_size(map, attr->batch.elem_flags);
max_count = attr->batch.count; if (!max_count) @@ -2062,11 +2068,11 @@ int generic_map_lookup_batch(struct bpf_map *map, u32 value_size, cp, max_count; int err;
- err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK); + err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK | BPF_F_CPU); if (err) return err;
- value_size = bpf_map_value_size(map); + value_size = bpf_map_value_size(map, attr->batch.elem_flags);
max_count = attr->batch.count; if (!max_count) @@ -2188,7 +2194,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) goto err_put; }
- value_size = bpf_map_value_size(map); + value_size = bpf_map_value_size(map, 0);
err = -ENOMEM; value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index f5713f59ac10..8b6279ca6e66 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1373,6 +1373,8 @@ enum { BPF_NOEXIST = 1, /* create new element if it didn't exist */ BPF_EXIST = 2, /* update existing element */ BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ + BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */ + BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */ };
/* flags for BPF_MAP_CREATE command */