On 4/26/19 10:11 AM, Martin KaFai Lau wrote:
> This patch supports probing for the new BPF_MAP_TYPE_SK_STORAGE.
> BPF_MAP_TYPE_SK_STORAGE enforces BTF usage, so the new probe
> requires to create and load a BTF also.
> 
> Signed-off-by: Martin KaFai Lau <ka...@fb.com>
> ---
>   tools/bpf/bpftool/map.c       |  1 +
>   tools/lib/bpf/libbpf_probes.c | 74 ++++++++++++++++++++++++++++++++++-
>   2 files changed, 74 insertions(+), 1 deletion(-)
> 
> diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
> index 10b6c9d3e525..693ab3da37de 100644
> --- a/tools/bpf/bpftool/map.c
> +++ b/tools/bpf/bpftool/map.c
> @@ -46,6 +46,7 @@ const char * const map_type_name[] = {
>       [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE]    = "percpu_cgroup_storage",
>       [BPF_MAP_TYPE_QUEUE]                    = "queue",
>       [BPF_MAP_TYPE_STACK]                    = "stack",
> +     [BPF_MAP_TYPE_SK_STORAGE]               = "sk_storage",
>   };
>   
>   const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
> diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
> index 0f25541632e3..2e26ddb005e4 100644
> --- a/tools/lib/bpf/libbpf_probes.c
> +++ b/tools/lib/bpf/libbpf_probes.c
> @@ -9,6 +9,7 @@
>   #include <net/if.h>
>   #include <sys/utsname.h>
>   
> +#include <linux/btf.h>
>   #include <linux/filter.h>
>   #include <linux/kernel.h>
>   
> @@ -130,11 +131,65 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, 
> __u32 ifindex)
>       return errno != EINVAL && errno != EOPNOTSUPP;
>   }
>   
> +static int load_btf(void)
> +{
> +#define BTF_INFO_ENC(kind, kind_flag, vlen) \
> +     ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
> +#define BTF_TYPE_ENC(name, info, size_or_type) \
> +     (name), (info), (size_or_type)
> +#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
> +     ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
> +#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
> +     BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
> +     BTF_INT_ENC(encoding, bits_offset, bits)
> +#define BTF_MEMBER_ENC(name, type, bits_offset) \
> +     (name), (type), (bits_offset)
> +
> +     const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
> +     /* struct bpf_spin_lock {
> +      *   int val;
> +      * };
> +      * struct val {
> +      *   int cnt;
> +      *   struct bpf_spin_lock l;
> +      * };
> +      */
> +     __u32 btf_raw_types[] = {
> +             /* int */
> +             BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
> +             /* struct bpf_spin_lock */                      /* [2] */
> +             BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
> +             BTF_MEMBER_ENC(15, 1, 0), /* int val; */
> +             /* struct val */                                /* [3] */
> +             BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
> +             BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
> +             BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
> +     };
> +     struct btf_header btf_hdr = {
> +             .magic = BTF_MAGIC,
> +             .version = BTF_VERSION,
> +             .hdr_len = sizeof(struct btf_header),
> +             .type_len = sizeof(btf_raw_types),
> +             .str_off = sizeof(btf_raw_types),
> +             .str_len = sizeof(btf_str_sec),
> +     };
> +     __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
> +                  sizeof(btf_str_sec)];
> +
> +     memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
> +     memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
> +     memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
> +            btf_str_sec, sizeof(btf_str_sec));
> +
> +     return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0);
> +}

In the future, different map types could want to test different type
configurations. Maybe btf_raw_types and btf_str_sec could be provided
as a parameter for load_btf()?

> +
>   bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
>   {
>       int key_size, value_size, max_entries, map_flags;
> +     __u32 btf_key_type_id = 0, btf_value_type_id = 0;
>       struct bpf_create_map_attr attr = {};
> -     int fd = -1, fd_inner;
> +     int fd = -1, btf_fd = -1, fd_inner;
>   
>       key_size        = sizeof(__u32);
>       value_size      = sizeof(__u32);
> @@ -160,6 +215,16 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, 
> __u32 ifindex)
>       case BPF_MAP_TYPE_STACK:
>               key_size        = 0;
>               break;
> +     case BPF_MAP_TYPE_SK_STORAGE:
> +             btf_key_type_id = 1;
> +             btf_value_type_id = 3;
> +             value_size = 8;
> +             max_entries = 0;
> +             map_flags = BPF_F_NO_PREALLOC;
> +             btf_fd = load_btf();
> +             if (btf_fd < 0)
> +                     return false;
> +             break;
>       case BPF_MAP_TYPE_UNSPEC:
>       case BPF_MAP_TYPE_HASH:
>       case BPF_MAP_TYPE_ARRAY:
> @@ -205,11 +270,18 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, 
> __u32 ifindex)
>               attr.max_entries = max_entries;
>               attr.map_flags = map_flags;
>               attr.map_ifindex = ifindex;
> +             if (btf_fd >= 0) {
> +                     attr.btf_fd = btf_fd;
> +                     attr.btf_key_type_id = btf_key_type_id;
> +                     attr.btf_value_type_id = btf_value_type_id;
> +             }
>   
>               fd = bpf_create_map_xattr(&attr);
>       }
>       if (fd >= 0)
>               close(fd);
> +     if (btf_fd >= 0)
> +             close(btf_fd);
>   
>       return fd >= 0;
>   }
> 

Reply via email to