These hooks mirror existing BPF_CGROUP_INET{4,6}_SOCK hooks (which trigger upon socket creation), but trigger when the socket is closed.
Signed-off-by: Stanislav Fomichev <s...@google.com> --- include/linux/bpf-cgroup.h | 6 ++++++ include/net/inet_common.h | 1 + include/uapi/linux/bpf.h | 2 ++ kernel/bpf/syscall.c | 8 ++++++++ net/core/filter.c | 7 +++++++ net/ipv4/af_inet.c | 13 ++++++++++++- net/ipv6/af_inet6.c | 5 ++++- 7 files changed, 40 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 588dd5f0bd85..31fcfe215d80 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -176,6 +176,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) +#define BPF_CGROUP_RUN_PROG_INET4_SOCK_RELEASE(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_SOCK_RELEASE) + +#define BPF_CGROUP_RUN_PROG_INET6_SOCK_RELEASE(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_SOCK_RELEASE) + #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 975901a95c0f..0e64046afe30 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -17,6 +17,7 @@ struct sockaddr; struct socket; int inet_release(struct socket *sock); +int __inet_release(struct socket *sock); int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 91c43884f295..8e78aa28a42e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -186,6 +186,8 @@ enum bpf_attach_type { BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, BPF_FLOW_DISSECTOR, + BPF_CGROUP_INET4_SOCK_RELEASE, + BPF_CGROUP_INET6_SOCK_RELEASE, __MAX_BPF_ATTACH_TYPE }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b155cd17c1bd..6fa113448580 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1417,6 +1417,8 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, case BPF_PROG_TYPE_CGROUP_SOCK: switch (expected_attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET4_SOCK_RELEASE: + case BPF_CGROUP_INET6_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: return 0; @@ -1709,6 +1711,8 @@ static int bpf_prog_attach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_SKB; break; case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET4_SOCK_RELEASE: + case BPF_CGROUP_INET6_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: ptype = BPF_PROG_TYPE_CGROUP_SOCK; @@ -1791,6 +1795,8 @@ static int bpf_prog_detach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_SKB; break; case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET4_SOCK_RELEASE: + case BPF_CGROUP_INET6_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: ptype = BPF_PROG_TYPE_CGROUP_SOCK; @@ -1841,6 +1847,8 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_CGROUP_INET_INGRESS: case BPF_CGROUP_INET_EGRESS: case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET4_SOCK_RELEASE: + case BPF_CGROUP_INET6_SOCK_RELEASE: case BPF_CGROUP_INET4_BIND: case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET4_POST_BIND: diff --git a/net/core/filter.c b/net/core/filter.c index 2b3b436ef545..b4da6793fdbc 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5885,12 +5885,16 @@ static bool __sock_filter_check_attach_type(int off, switch (attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: goto full_access; + case BPF_CGROUP_INET4_SOCK_RELEASE: + case BPF_CGROUP_INET6_SOCK_RELEASE: + goto read_only; default: return false; } case bpf_ctx_range(struct bpf_sock, src_ip4): switch (attach_type) { case BPF_CGROUP_INET4_POST_BIND: + case BPF_CGROUP_INET4_SOCK_RELEASE: goto read_only; default: return false; @@ -5898,6 +5902,7 @@ static bool __sock_filter_check_attach_type(int off, case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): switch (attach_type) { case BPF_CGROUP_INET6_POST_BIND: + case BPF_CGROUP_INET6_SOCK_RELEASE: goto read_only; default: return false; @@ -5906,6 +5911,8 @@ static bool __sock_filter_check_attach_type(int off, switch (attach_type) { case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: + case BPF_CGROUP_INET4_SOCK_RELEASE: + case BPF_CGROUP_INET6_SOCK_RELEASE: goto read_only; default: return false; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 0dfb72c46671..b703ad242365 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -403,7 +403,7 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, * function we are destroying the object and from then on nobody * should refer to it. */ -int inet_release(struct socket *sock) +int __inet_release(struct socket *sock) { struct sock *sk = sock->sk; @@ -429,6 +429,17 @@ int inet_release(struct socket *sock) } return 0; } +EXPORT_SYMBOL(__inet_release); + +int inet_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (sk && !sk->sk_kern_sock) + BPF_CGROUP_RUN_PROG_INET4_SOCK_RELEASE(sk); + + return __inet_release(sock); +} EXPORT_SYMBOL(inet_release); int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index d99753b5e39b..44c86595eba8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -464,13 +464,16 @@ int inet6_release(struct socket *sock) if (!sk) return -EINVAL; + if (!sk->sk_kern_sock) + BPF_CGROUP_RUN_PROG_INET6_SOCK_RELEASE(sock->sk); + /* Free mc lists */ ipv6_sock_mc_close(sk); /* Free ac lists */ ipv6_sock_ac_close(sk); - return inet_release(sock); + return __inet_release(sock); } EXPORT_SYMBOL(inet6_release); -- 2.20.1.321.g9e740568ce-goog