bpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu_array maps
Introduce support for the BPF_F_ALL_CPUS flag in percpu_array maps to allow updating values for all CPUs with a single value for both update_elem and update_batch APIs. Introduce support for the BPF_F_CPU flag in percpu_array maps to allow: * update value for specified CPU for both update_elem and update_batch APIs. * lookup value for specified CPU for both lookup_elem and lookup_batch APIs. The BPF_F_CPU flag is passed via: * map_flags of lookup_elem and update_elem APIs along with embedded cpu info. * elem_flags of lookup_batch and update_batch APIs along with embedded cpu info. Signed-off-by: Leon Hwang <leon.hwang@linux.dev> Link: https://lore.kernel.org/r/20260107022022.12843-3-leon.hwang@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>master
parent
2b421662c7
commit
8eb76cb03f
|
|
@ -2848,7 +2848,7 @@ int map_set_for_each_callback_args(struct bpf_verifier_env *env,
|
|||
struct bpf_func_state *callee);
|
||||
|
||||
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
|
||||
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
|
||||
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 flags);
|
||||
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
|
||||
u64 flags);
|
||||
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
|
||||
|
|
@ -3917,7 +3917,12 @@ bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
|
|||
|
||||
static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
|
||||
{
|
||||
return false;
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_PERCPU_ARRAY:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
|
||||
|
|
|
|||
|
|
@ -307,7 +307,7 @@ static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key,
|
|||
return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
|
||||
}
|
||||
|
||||
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
|
||||
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 map_flags)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
u32 index = *(u32 *)key;
|
||||
|
|
@ -325,11 +325,18 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
|
|||
size = array->elem_size;
|
||||
rcu_read_lock();
|
||||
pptr = array->pptrs[index & array->index_mask];
|
||||
if (map_flags & BPF_F_CPU) {
|
||||
cpu = map_flags >> 32;
|
||||
copy_map_value(map, value, per_cpu_ptr(pptr, cpu));
|
||||
check_and_init_map_value(map, value);
|
||||
goto unlock;
|
||||
}
|
||||
for_each_possible_cpu(cpu) {
|
||||
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
|
||||
check_and_init_map_value(map, value + off);
|
||||
off += size;
|
||||
}
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -398,10 +405,11 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
|
|||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
u32 index = *(u32 *)key;
|
||||
void __percpu *pptr;
|
||||
int cpu, off = 0;
|
||||
void *ptr, *val;
|
||||
u32 size;
|
||||
int cpu;
|
||||
|
||||
if (unlikely(map_flags > BPF_EXIST))
|
||||
if (unlikely((map_flags & BPF_F_LOCK) || (u32)map_flags > BPF_F_ALL_CPUS))
|
||||
/* unknown flags */
|
||||
return -EINVAL;
|
||||
|
||||
|
|
@ -422,11 +430,20 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
|
|||
size = array->elem_size;
|
||||
rcu_read_lock();
|
||||
pptr = array->pptrs[index & array->index_mask];
|
||||
for_each_possible_cpu(cpu) {
|
||||
copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
|
||||
bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
|
||||
off += size;
|
||||
if (map_flags & BPF_F_CPU) {
|
||||
cpu = map_flags >> 32;
|
||||
ptr = per_cpu_ptr(pptr, cpu);
|
||||
copy_map_value(map, ptr, value);
|
||||
bpf_obj_free_fields(array->map.record, ptr);
|
||||
goto unlock;
|
||||
}
|
||||
for_each_possible_cpu(cpu) {
|
||||
ptr = per_cpu_ptr(pptr, cpu);
|
||||
val = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
|
||||
copy_map_value(map, ptr, val);
|
||||
bpf_obj_free_fields(array->map.record, ptr);
|
||||
}
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -318,7 +318,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
|
|||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
|
||||
err = bpf_percpu_hash_copy(map, key, value);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
|
||||
err = bpf_percpu_array_copy(map, key, value);
|
||||
err = bpf_percpu_array_copy(map, key, value, flags);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
|
||||
err = bpf_percpu_cgroup_storage_copy(map, key, value);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue