Add libbpf support for the BPF_F_CPU flag for percpu maps by embedding the
cpu info into the high 32 bits of:

1. **flags**: bpf_map_lookup_elem_flags(), bpf_map__lookup_elem(),
   bpf_map_update_elem() and bpf_map__update_elem()
2. **opts->elem_flags**: bpf_map_lookup_batch() and
   bpf_map_update_batch()

And the flag can be BPF_F_ALL_CPUS, but cannot be
'BPF_F_CPU | BPF_F_ALL_CPUS'.

Behavior:

* If the flag is BPF_F_ALL_CPUS, the update is applied across all CPUs.
* If the flag is BPF_F_CPU, it updates value only to the specified CPU.
* If the flag is BPF_F_CPU, lookup value only from the specified CPU.
* lookup does not support BPF_F_ALL_CPUS.

Acked-by: Andrii Nakryiko <[email protected]>
Signed-off-by: Leon Hwang <[email protected]>
---
 tools/lib/bpf/bpf.h    |  8 ++++++++
 tools/lib/bpf/libbpf.c | 26 ++++++++++++++++++++------
 tools/lib/bpf/libbpf.h | 21 ++++++++-------------
 3 files changed, 36 insertions(+), 19 deletions(-)

diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index e983a3e40d612..ffd93feffd71d 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -289,6 +289,14 @@ LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, 
void *in_batch,
  *    Update spin_lock-ed map elements. This must be
  *    specified if the map value contains a spinlock.
  *
+ * **BPF_F_CPU**
+ *    As for percpu maps, update value on the specified CPU. And the cpu
+ *    info is embedded into the high 32 bits of **opts->elem_flags**.
+ *
+ * **BPF_F_ALL_CPUS**
+ *    As for percpu maps, update value across all CPUs. This flag cannot
+ *    be used with BPF_F_CPU at the same time.
+ *
  * @param fd BPF map file descriptor
  * @param keys pointer to an array of *count* keys
  * @param values pointer to an array of *count* values
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 706e7481bdf6b..65b9b5e955449 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -10913,7 +10913,7 @@ bpf_object__find_map_fd_by_name(const struct bpf_object 
*obj, const char *name)
 }
 
 static int validate_map_op(const struct bpf_map *map, size_t key_sz,
-                          size_t value_sz, bool check_value_sz)
+                          size_t value_sz, bool check_value_sz, __u64 flags)
 {
        if (!map_is_created(map)) /* map is not yet created */
                return -ENOENT;
@@ -10940,6 +10940,20 @@ static int validate_map_op(const struct bpf_map *map, 
size_t key_sz,
                int num_cpu = libbpf_num_possible_cpus();
                size_t elem_sz = roundup(map->def.value_size, 8);
 
+               if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) {
+                       if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS)) {
+                               pr_warn("map '%s': BPF_F_CPU and BPF_F_ALL_CPUS 
are mutually exclusive\n",
+                                       map->name);
+                               return -EINVAL;
+                       }
+                       if (map->def.value_size != value_sz) {
+                               pr_warn("map '%s': unexpected value size %zu 
provided for either BPF_F_CPU or BPF_F_ALL_CPUS, expected %u\n",
+                                       map->name, value_sz, 
map->def.value_size);
+                               return -EINVAL;
+                       }
+                       break;
+               }
+
                if (value_sz != num_cpu * elem_sz) {
                        pr_warn("map '%s': unexpected value size %zu provided 
for per-CPU map, expected %d * %zu = %zd\n",
                                map->name, value_sz, num_cpu, elem_sz, num_cpu 
* elem_sz);
@@ -10964,7 +10978,7 @@ int bpf_map__lookup_elem(const struct bpf_map *map,
 {
        int err;
 
-       err = validate_map_op(map, key_sz, value_sz, true);
+       err = validate_map_op(map, key_sz, value_sz, true, flags);
        if (err)
                return libbpf_err(err);
 
@@ -10977,7 +10991,7 @@ int bpf_map__update_elem(const struct bpf_map *map,
 {
        int err;
 
-       err = validate_map_op(map, key_sz, value_sz, true);
+       err = validate_map_op(map, key_sz, value_sz, true, flags);
        if (err)
                return libbpf_err(err);
 
@@ -10989,7 +11003,7 @@ int bpf_map__delete_elem(const struct bpf_map *map,
 {
        int err;
 
-       err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
+       err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, 
flags);
        if (err)
                return libbpf_err(err);
 
@@ -11002,7 +11016,7 @@ int bpf_map__lookup_and_delete_elem(const struct 
bpf_map *map,
 {
        int err;
 
-       err = validate_map_op(map, key_sz, value_sz, true);
+       err = validate_map_op(map, key_sz, value_sz, true, flags);
        if (err)
                return libbpf_err(err);
 
@@ -11014,7 +11028,7 @@ int bpf_map__get_next_key(const struct bpf_map *map,
 {
        int err;
 
-       err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
+       err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, 0);
        if (err)
                return libbpf_err(err);
 
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5118d0a90e243..7c38b2e546080 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1196,12 +1196,13 @@ LIBBPF_API struct bpf_map *bpf_map__inner_map(struct 
bpf_map *map);
  * @param key_sz size in bytes of key data, needs to match BPF map 
definition's **key_size**
  * @param value pointer to memory in which looked up value will be stored
  * @param value_sz size in byte of value data memory; it has to match BPF map
- * definition's **value_size**. For per-CPU BPF maps value size has to be
- * a product of BPF map value size and number of possible CPUs in the system
- * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
- * per-CPU values value size has to be aligned up to closest 8 bytes for
- * alignment reasons, so expected size is: `round_up(value_size, 8)
- * * libbpf_num_possible_cpus()`.
+ * definition's **value_size**. For per-CPU BPF maps, value size can be
+ * `value_size` if either **BPF_F_CPU** or **BPF_F_ALL_CPUS** is specified
+ * in **flags**, otherwise a product of BPF map value size and number of
+ * possible CPUs in the system (could be fetched with
+ * **libbpf_num_possible_cpus()**). Note also that for per-CPU values value
+ * size has to be aligned up to closest 8 bytes, so expected size is:
+ * `round_up(value_size, 8) * libbpf_num_possible_cpus()`.
  * @flags extra flags passed to kernel for this operation
  * @return 0, on success; negative error, otherwise
  *
@@ -1219,13 +1220,7 @@ LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map 
*map,
  * @param key pointer to memory containing bytes of the key
  * @param key_sz size in bytes of key data, needs to match BPF map 
definition's **key_size**
  * @param value pointer to memory containing bytes of the value
- * @param value_sz size in byte of value data memory; it has to match BPF map
- * definition's **value_size**. For per-CPU BPF maps value size has to be
- * a product of BPF map value size and number of possible CPUs in the system
- * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
- * per-CPU values value size has to be aligned up to closest 8 bytes for
- * alignment reasons, so expected size is: `round_up(value_size, 8)
- * * libbpf_num_possible_cpus()`.
+ * @param value_sz refer to **bpf_map__lookup_elem**'s description.'
  * @flags extra flags passed to kernel for this operation
  * @return 0, on success; negative error, otherwise
  *
-- 
2.51.2


Reply via email to