This patch exports bpf_cgroup_storages_alloc and bpf_cgroup_storages_free
helpers to the header file and reuses them in bpf_test_run.

v2:
  - fix build without CONFIG_CGROUP_BPF (kernel test robot <l...@intel.com>)

Signed-off-by: Dmitry Yakunin <z...@yandex-team.ru>
---
 include/linux/bpf-cgroup.h | 36 ++++++++++++++++++++++++++++++++++++
 kernel/bpf/cgroup.c        | 25 -------------------------
 net/bpf/test_run.c         | 16 ++++------------
 3 files changed, 40 insertions(+), 37 deletions(-)

diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 2c6f266..5c10fe6 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -175,6 +175,33 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, 
void *key, void *value);
 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
                                     void *value, u64 flags);
 
+static inline void bpf_cgroup_storages_free(struct bpf_cgroup_storage
+                                           
*storage[MAX_BPF_CGROUP_STORAGE_TYPE])
+{
+       enum bpf_cgroup_storage_type stype;
+
+       for_each_cgroup_storage_type(stype)
+               bpf_cgroup_storage_free(storage[stype]);
+}
+
+static inline int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage
+                                           
*storage[MAX_BPF_CGROUP_STORAGE_TYPE],
+                                           struct bpf_prog *prog)
+{
+       enum bpf_cgroup_storage_type stype;
+
+       for_each_cgroup_storage_type(stype) {
+               storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
+               if (IS_ERR(storage[stype])) {
+                       storage[stype] = NULL;
+                       bpf_cgroup_storages_free(storage);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                            \
 ({                                                                           \
@@ -398,6 +425,15 @@ static inline int bpf_percpu_cgroup_storage_update(struct 
bpf_map *map,
        return 0;
 }
 
+static inline void bpf_cgroup_storages_free(
+       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
+
+static inline int bpf_cgroup_storages_alloc(
+       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
+       struct bpf_prog *prog) {
+       return 0;
+}
+
 #define cgroup_bpf_enabled (0)
 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index ac53102..e4c2792 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -28,31 +28,6 @@ void cgroup_bpf_offline(struct cgroup *cgrp)
        percpu_ref_kill(&cgrp->bpf.refcnt);
 }
 
-static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
-{
-       enum bpf_cgroup_storage_type stype;
-
-       for_each_cgroup_storage_type(stype)
-               bpf_cgroup_storage_free(storages[stype]);
-}
-
-static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
-                                    struct bpf_prog *prog)
-{
-       enum bpf_cgroup_storage_type stype;
-
-       for_each_cgroup_storage_type(stype) {
-               storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
-               if (IS_ERR(storages[stype])) {
-                       storages[stype] = NULL;
-                       bpf_cgroup_storages_free(storages);
-                       return -ENOMEM;
-               }
-       }
-
-       return 0;
-}
-
 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
                                       struct bpf_cgroup_storage *src[])
 {
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 1e10a7e..5c4835c 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -19,20 +19,13 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, 
u32 repeat,
                        u32 *retval, u32 *time, bool xdp)
 {
        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 
NULL };
-       enum bpf_cgroup_storage_type stype;
        u64 time_start, time_spent = 0;
        int ret = 0;
        u32 i;
 
-       for_each_cgroup_storage_type(stype) {
-               storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
-               if (IS_ERR(storage[stype])) {
-                       storage[stype] = NULL;
-                       for_each_cgroup_storage_type(stype)
-                               bpf_cgroup_storage_free(storage[stype]);
-                       return -ENOMEM;
-               }
-       }
+       ret = bpf_cgroup_storages_alloc(storage, prog);
+       if (ret)
+               return ret;
 
        if (!repeat)
                repeat = 1;
@@ -72,8 +65,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 
repeat,
        do_div(time_spent, repeat);
        *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
 
-       for_each_cgroup_storage_type(stype)
-               bpf_cgroup_storage_free(storage[stype]);
+       bpf_cgroup_storages_free(storage);
 
        return ret;
 }
-- 
2.7.4

Reply via email to