Introduce BPF_F_ALL_CPUS flag support for percpu_hash and lru_percpu_hash
maps to allow updating values for all CPUs with a single value for both
update_elem and update_batch APIs.

Introduce BPF_F_CPU flag support for percpu_hash and lru_percpu_hash
maps to allow:

* update value for specified CPU for both update_elem and update_batch
APIs.
* lookup value for specified CPU for both lookup_elem and lookup_batch
APIs.

The BPF_F_CPU flag is passed via:

* map_flags along with embedded cpu info.
* elem_flags along with embedded cpu info.

Signed-off-by: Leon Hwang <[email protected]>
---
 include/linux/bpf.h  |  4 +-
 kernel/bpf/hashtab.c | 94 ++++++++++++++++++++++++++++++--------------
 kernel/bpf/syscall.c |  2 +-
 3 files changed, 68 insertions(+), 32 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c43cdd90bfb12..1fc0afb7f547f 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2747,7 +2747,7 @@ int map_set_for_each_callback_args(struct 
bpf_verifier_env *env,
                                   struct bpf_func_state *caller,
                                   struct bpf_func_state *callee);
 
-int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 
flags);
 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 
flags);
 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
                           u64 flags);
@@ -3817,6 +3817,8 @@ static inline bool bpf_map_supports_cpu_flags(enum 
bpf_map_type map_type)
 {
        switch (map_type) {
        case BPF_MAP_TYPE_PERCPU_ARRAY:
+       case BPF_MAP_TYPE_PERCPU_HASH:
+       case BPF_MAP_TYPE_LRU_PERCPU_HASH:
                return true;
        default:
                return false;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index c8a9b27f8663b..441ff5bc54ac2 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -932,7 +932,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct 
htab_elem *l)
 }
 
 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
-                           void *value, bool onallcpus)
+                           void *value, bool onallcpus, u64 map_flags)
 {
        void *ptr;
 
@@ -943,19 +943,28 @@ static void pcpu_copy_value(struct bpf_htab *htab, void 
__percpu *pptr,
                bpf_obj_free_fields(htab->map.record, ptr);
        } else {
                u32 size = round_up(htab->map.value_size, 8);
-               int off = 0, cpu;
+               void *val;
+               int cpu;
+
+               if (map_flags & BPF_F_CPU) {
+                       cpu = map_flags >> 32;
+                       ptr = per_cpu_ptr(pptr, cpu);
+                       copy_map_value(&htab->map, ptr, value);
+                       bpf_obj_free_fields(htab->map.record, ptr);
+                       return;
+               }
 
                for_each_possible_cpu(cpu) {
                        ptr = per_cpu_ptr(pptr, cpu);
-                       copy_map_value_long(&htab->map, ptr, value + off);
+                       val = (map_flags & BPF_F_ALL_CPUS) ? value : value + 
size * cpu;
+                       copy_map_value(&htab->map, ptr, val);
                        bpf_obj_free_fields(htab->map.record, ptr);
-                       off += size;
                }
        }
 }
 
 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
-                           void *value, bool onallcpus)
+                           void *value, bool onallcpus, u64 map_flags)
 {
        /* When not setting the initial value on all cpus, zero-fill element
         * values for other cpus. Otherwise, bpf program has no way to ensure
@@ -973,7 +982,7 @@ static void pcpu_init_value(struct bpf_htab *htab, void 
__percpu *pptr,
                                zero_map_value(&htab->map, per_cpu_ptr(pptr, 
cpu));
                }
        } else {
-               pcpu_copy_value(htab, pptr, value, onallcpus);
+               pcpu_copy_value(htab, pptr, value, onallcpus, map_flags);
        }
 }
 
@@ -985,7 +994,7 @@ static bool fd_htab_map_needs_adjust(const struct bpf_htab 
*htab)
 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                         void *value, u32 key_size, u32 hash,
                                         bool percpu, bool onallcpus,
-                                        struct htab_elem *old_elem)
+                                        struct htab_elem *old_elem, u64 
map_flags)
 {
        u32 size = htab->map.value_size;
        bool prealloc = htab_is_prealloc(htab);
@@ -1043,7 +1052,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab 
*htab, void *key,
                        pptr = *(void __percpu **)ptr;
                }
 
-               pcpu_init_value(htab, pptr, value, onallcpus);
+               pcpu_init_value(htab, pptr, value, onallcpus, map_flags);
 
                if (!prealloc)
                        htab_elem_set_ptr(l_new, key_size, pptr);
@@ -1147,7 +1156,7 @@ static long htab_map_update_elem(struct bpf_map *map, 
void *key, void *value,
        }
 
        l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
-                               l_old);
+                               l_old, map_flags);
        if (IS_ERR(l_new)) {
                /* all pre-allocated elements are in use or memory exhausted */
                ret = PTR_ERR(l_new);
@@ -1249,6 +1258,15 @@ static long htab_lru_map_update_elem(struct bpf_map 
*map, void *key, void *value
        return ret;
 }
 
+static int htab_map_check_update_flags(bool onallcpus, u64 map_flags)
+{
+       if (unlikely(!onallcpus && map_flags > BPF_EXIST))
+               return -EINVAL;
+       if (unlikely(onallcpus && ((map_flags & BPF_F_LOCK) || (u32)map_flags > 
BPF_F_ALL_CPUS)))
+               return -EINVAL;
+       return 0;
+}
+
 static long htab_map_update_elem_in_place(struct bpf_map *map, void *key,
                                          void *value, u64 map_flags,
                                          bool percpu, bool onallcpus)
@@ -1262,9 +1280,9 @@ static long htab_map_update_elem_in_place(struct bpf_map 
*map, void *key,
        u32 key_size, hash;
        int ret;
 
-       if (unlikely(map_flags > BPF_EXIST))
-               /* unknown flags */
-               return -EINVAL;
+       ret = htab_map_check_update_flags(onallcpus, map_flags);
+       if (unlikely(ret))
+               return ret;
 
        WARN_ON_ONCE(!bpf_rcu_lock_held());
 
@@ -1289,7 +1307,7 @@ static long htab_map_update_elem_in_place(struct bpf_map 
*map, void *key,
                /* Update value in-place */
                if (percpu) {
                        pcpu_copy_value(htab, htab_elem_get_ptr(l_old, 
key_size),
-                                       value, onallcpus);
+                                       value, onallcpus, map_flags);
                } else {
                        void **inner_map_pptr = htab_elem_value(l_old, 
key_size);
 
@@ -1298,7 +1316,7 @@ static long htab_map_update_elem_in_place(struct bpf_map 
*map, void *key,
                }
        } else {
                l_new = alloc_htab_elem(htab, key, value, key_size,
-                                       hash, percpu, onallcpus, NULL);
+                                       hash, percpu, onallcpus, NULL, 
map_flags);
                if (IS_ERR(l_new)) {
                        ret = PTR_ERR(l_new);
                        goto err;
@@ -1324,9 +1342,9 @@ static long __htab_lru_percpu_map_update_elem(struct 
bpf_map *map, void *key,
        u32 key_size, hash;
        int ret;
 
-       if (unlikely(map_flags > BPF_EXIST))
-               /* unknown flags */
-               return -EINVAL;
+       ret = htab_map_check_update_flags(onallcpus, map_flags);
+       if (unlikely(ret))
+               return ret;
 
        WARN_ON_ONCE(!bpf_rcu_lock_held());
 
@@ -1363,10 +1381,10 @@ static long __htab_lru_percpu_map_update_elem(struct 
bpf_map *map, void *key,
 
                /* per-cpu hash map can update value in-place */
                pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
-                               value, onallcpus);
+                               value, onallcpus, map_flags);
        } else {
                pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
-                               value, onallcpus);
+                               value, onallcpus, map_flags);
                hlist_nulls_add_head_rcu(&l_new->hash_node, head);
                l_new = NULL;
        }
@@ -1678,9 +1696,9 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
        void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
        void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
        u32 batch, max_count, size, bucket_size, map_id;
+       u64 elem_map_flags, map_flags, allowed_flags;
        u32 bucket_cnt, total, key_size, value_size;
        struct htab_elem *node_to_free = NULL;
-       u64 elem_map_flags, map_flags;
        struct hlist_nulls_head *head;
        struct hlist_nulls_node *n;
        unsigned long flags = 0;
@@ -1690,9 +1708,12 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
        int ret = 0;
 
        elem_map_flags = attr->batch.elem_flags;
-       if ((elem_map_flags & ~BPF_F_LOCK) ||
-           ((elem_map_flags & BPF_F_LOCK) && 
!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
-               return -EINVAL;
+       allowed_flags = BPF_F_LOCK;
+       if (!do_delete && is_percpu)
+               allowed_flags |= BPF_F_CPU;
+       ret = bpf_map_check_op_flags(map, elem_map_flags, allowed_flags);
+       if (ret)
+               return ret;
 
        map_flags = attr->batch.flags;
        if (map_flags)
@@ -1715,7 +1736,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
        key_size = htab->map.key_size;
        value_size = htab->map.value_size;
        size = round_up(value_size, 8);
-       if (is_percpu)
+       if (is_percpu && !(elem_map_flags & BPF_F_CPU))
                value_size = size * num_possible_cpus();
        total = 0;
        /* while experimenting with hash tables with sizes ranging from 10 to
@@ -1798,10 +1819,17 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
                        void __percpu *pptr;
 
                        pptr = htab_elem_get_ptr(l, map->key_size);
-                       for_each_possible_cpu(cpu) {
-                               copy_map_value_long(&htab->map, dst_val + off, 
per_cpu_ptr(pptr, cpu));
-                               check_and_init_map_value(&htab->map, dst_val + 
off);
-                               off += size;
+                       if (elem_map_flags & BPF_F_CPU) {
+                               cpu = elem_map_flags >> 32;
+                               copy_map_value(&htab->map, dst_val, 
per_cpu_ptr(pptr, cpu));
+                               check_and_init_map_value(&htab->map, dst_val);
+                       } else {
+                               for_each_possible_cpu(cpu) {
+                                       copy_map_value_long(&htab->map, dst_val 
+ off,
+                                                           per_cpu_ptr(pptr, 
cpu));
+                                       check_and_init_map_value(&htab->map, 
dst_val + off);
+                                       off += size;
+                               }
                        }
                } else {
                        value = htab_elem_value(l, key_size);
@@ -2357,7 +2385,7 @@ static void 
*htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *k
        return NULL;
 }
 
-int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 
map_flags)
 {
        struct htab_elem *l;
        void __percpu *pptr;
@@ -2374,16 +2402,22 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void 
*key, void *value)
        l = __htab_map_lookup_elem(map, key);
        if (!l)
                goto out;
+       ret = 0;
        /* We do not mark LRU map element here in order to not mess up
         * eviction heuristics when user space does a map walk.
         */
        pptr = htab_elem_get_ptr(l, map->key_size);
+       if (map_flags & BPF_F_CPU) {
+               cpu = map_flags >> 32;
+               copy_map_value(map, value, per_cpu_ptr(pptr, cpu));
+               check_and_init_map_value(map, value);
+               goto out;
+       }
        for_each_possible_cpu(cpu) {
                copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
                check_and_init_map_value(map, value + off);
                off += size;
        }
-       ret = 0;
 out:
        rcu_read_unlock();
        return ret;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index edb7462a34f13..3cff19a4be527 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -316,7 +316,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void 
*key, void *value,
        bpf_disable_instrumentation();
        if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
            map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
-               err = bpf_percpu_hash_copy(map, key, value);
+               err = bpf_percpu_hash_copy(map, key, value, flags);
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
                err = bpf_percpu_array_copy(map, key, value, flags);
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
-- 
2.51.2


Reply via email to