Introduce BPF_F_ALL_CPUS flag support for percpu_cgroup_storage maps to
allow updating values for all CPUs with a single value for update_elem
API.

Introduce BPF_F_CPU flag support for percpu_cgroup_storage maps to
allow:

* update value for specified CPU for update_elem API.
* lookup value for specified CPU for lookup_elem API.

The BPF_F_CPU flag is passed via map_flags along with embedded cpu info.

Signed-off-by: Leon Hwang <[email protected]>
---
 include/linux/bpf-cgroup.h |  4 ++--
 include/linux/bpf.h        |  1 +
 kernel/bpf/local_storage.c | 23 ++++++++++++++++++-----
 kernel/bpf/syscall.c       |  2 +-
 4 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index aedf573bdb426..013f4db9903fd 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -172,7 +172,7 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage 
*storage,
 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
 
-int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void 
*value);
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void 
*value, u64 flags);
 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
                                     void *value, u64 flags);
 
@@ -467,7 +467,7 @@ static inline struct bpf_cgroup_storage 
*bpf_cgroup_storage_alloc(
 static inline void bpf_cgroup_storage_free(
        struct bpf_cgroup_storage *storage) {}
 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void 
*key,
-                                                void *value) {
+                                                void *value, u64 flags) {
        return 0;
 }
 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 1fc0afb7f547f..e87779d3c5927 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3819,6 +3819,7 @@ static inline bool bpf_map_supports_cpu_flags(enum 
bpf_map_type map_type)
        case BPF_MAP_TYPE_PERCPU_ARRAY:
        case BPF_MAP_TYPE_PERCPU_HASH:
        case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+       case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
                return true;
        default:
                return false;
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 2ab4b60ffe61f..1ccbf28b2ad9f 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -180,7 +180,7 @@ static long cgroup_storage_update_elem(struct bpf_map *map, 
void *key,
 }
 
 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
-                                  void *value)
+                                  void *value, u64 map_flags)
 {
        struct bpf_cgroup_storage_map *map = map_to_storage(_map);
        struct bpf_cgroup_storage *storage;
@@ -198,11 +198,17 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, 
void *key,
         * access 'value_size' of them, so copying rounded areas
         * will not leak any kernel data
         */
+       if (map_flags & BPF_F_CPU) {
+               cpu = map_flags >> 32;
+               copy_map_value(_map, value, per_cpu_ptr(storage->percpu_buf, 
cpu));
+               goto unlock;
+       }
        size = round_up(_map->value_size, 8);
        for_each_possible_cpu(cpu) {
                copy_map_value_long(_map, value + off, 
per_cpu_ptr(storage->percpu_buf, cpu));
                off += size;
        }
+unlock:
        rcu_read_unlock();
        return 0;
 }
@@ -212,10 +218,11 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map 
*_map, void *key,
 {
        struct bpf_cgroup_storage_map *map = map_to_storage(_map);
        struct bpf_cgroup_storage *storage;
-       int cpu, off = 0;
+       void *val;
        u32 size;
+       int cpu;
 
-       if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
+       if ((u32)map_flags & ~(BPF_ANY | BPF_EXIST | BPF_F_CPU | 
BPF_F_ALL_CPUS))
                return -EINVAL;
 
        rcu_read_lock();
@@ -231,11 +238,17 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map 
*_map, void *key,
         * returned or zeros which were zero-filled by percpu_alloc,
         * so no kernel data leaks possible
         */
+       if (map_flags & BPF_F_CPU) {
+               cpu = map_flags >> 32;
+               copy_map_value(_map, per_cpu_ptr(storage->percpu_buf, cpu), 
value);
+               goto unlock;
+       }
        size = round_up(_map->value_size, 8);
        for_each_possible_cpu(cpu) {
-               copy_map_value_long(_map, per_cpu_ptr(storage->percpu_buf, 
cpu), value + off);
-               off += size;
+               val = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
+               copy_map_value(_map, per_cpu_ptr(storage->percpu_buf, cpu), 
val);
        }
+unlock:
        rcu_read_unlock();
        return 0;
 }
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3cff19a4be527..643e589c10231 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -320,7 +320,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void 
*key, void *value,
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
                err = bpf_percpu_array_copy(map, key, value, flags);
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
-               err = bpf_percpu_cgroup_storage_copy(map, key, value);
+               err = bpf_percpu_cgroup_storage_copy(map, key, value, flags);
        } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
                err = bpf_stackmap_extract(map, key, value, false);
        } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
-- 
2.51.2


Reply via email to