This patch add a member in struct netns_core. And this is
a counter for socket_inuse in the _net_ namespace. The patch
will add/sub counter in the sk_alloc or sk_free. Because socket and
sock is in pair. It's a easy way to maintain the code and help
developer to review. More important, it avoids holding the _net_
namespace again.

Signed-off-by: Martin Zhang <zhangjunweimar...@didichuxing.com>
Signed-off-by: Tonghao Zhang <zhangtong...@didichuxing.com>
---
v3 --> v4:
1. add noop function for !CONF_PROC_FS case.
2. change the __this_cpu_add to this_cpu_add. This is reported by lkp.
reported at: http://patchwork.ozlabs.org/patch/837424/
---
 include/net/netns/core.h |  1 +
 include/net/sock.h       |  1 +
 net/core/sock.c          | 40 +++++++++++++++++++++++++++++++++++++++-
 net/socket.c             | 13 ++-----------
 4 files changed, 43 insertions(+), 12 deletions(-)

diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 6490b79..1de41f3 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -11,6 +11,7 @@ struct netns_core {
        int     sysctl_somaxconn;
 
        struct prot_inuse __percpu *prot_inuse;
+       int __percpu *sock_inuse;
 };
 
 #endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 6f1be97..169a26f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1263,6 +1263,7 @@ static inline void sk_sockets_allocated_inc(struct sock 
*sk)
 /* Called with local bh disabled */
 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
 int sock_prot_inuse_get(struct net *net, struct proto *proto);
+int sock_inuse_get(struct net *net);
 #else
 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
                int inc)
diff --git a/net/core/sock.c b/net/core/sock.c
index b899d86..04bbab1 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -145,6 +145,8 @@
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
+static void sock_inuse_add(struct net *net, int val);
+
 /**
  * sk_ns_capable - General socket capability test
  * @sk: Socket to use a capability on or through
@@ -1536,6 +1538,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t 
priority,
                if (likely(sk->sk_net_refcnt))
                        get_net(net);
                sock_net_set(sk, net);
+               sock_inuse_add(net, 1);
                refcount_set(&sk->sk_wmem_alloc, 1);
 
                mem_cgroup_sk_alloc(sk);
@@ -1597,6 +1600,8 @@ void sk_destruct(struct sock *sk)
 
 static void __sk_free(struct sock *sk)
 {
+       sock_inuse_add(sock_net(sk), -1);
+
        if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
                sock_diag_broadcast_destroy(sk);
        else
@@ -1665,6 +1670,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const 
gfp_t priority)
                newsk->sk_backlog.head  = newsk->sk_backlog.tail = NULL;
                newsk->sk_backlog.len = 0;
 
+               sock_inuse_add(sock_net(newsk), 1);
                atomic_set(&newsk->sk_rmem_alloc, 0);
                /*
                 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
@@ -3060,15 +3066,43 @@ int sock_prot_inuse_get(struct net *net, struct proto 
*prot)
 }
 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
 
+static void sock_inuse_add(struct net *net, int val)
+{
+       this_cpu_add(*net->core.sock_inuse, val);
+}
+
+int sock_inuse_get(struct net *net)
+{
+       int cpu, res = 0;
+
+       for_each_possible_cpu(cpu)
+               res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+
+       return res >= 0 ? res : 0;
+}
+
+EXPORT_SYMBOL_GPL(sock_inuse_get);
+
 static int __net_init sock_inuse_init_net(struct net *net)
 {
        net->core.prot_inuse = alloc_percpu(struct prot_inuse);
-       return net->core.prot_inuse ? 0 : -ENOMEM;
+       if (!net->core.prot_inuse)
+               return -ENOMEM;
+
+       net->core.sock_inuse = alloc_percpu(int);
+       if (!net->core.sock_inuse)
+               goto out;
+
+       return 0;
+out:
+       free_percpu(net->core.prot_inuse);
+       return -ENOMEM;
 }
 
 static void __net_exit sock_inuse_exit_net(struct net *net)
 {
        free_percpu(net->core.prot_inuse);
+       free_percpu(net->core.sock_inuse);
 }
 
 static struct pernet_operations net_inuse_ops = {
@@ -3111,6 +3145,10 @@ static inline void assign_proto_idx(struct proto *prot)
 static inline void release_proto_idx(struct proto *prot)
 {
 }
+
+static void sock_inuse_add(struct net *net, int val)
+{
+}
 #endif
 
 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
diff --git a/net/socket.c b/net/socket.c
index b085f14..1bdf364 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2646,17 +2646,8 @@ static int __init sock_init(void)
 #ifdef CONFIG_PROC_FS
 void socket_seq_show(struct seq_file *seq)
 {
-       int cpu;
-       int counter = 0;
-
-       for_each_possible_cpu(cpu)
-           counter += per_cpu(sockets_in_use, cpu);
-
-       /* It can be negative, by the way. 8) */
-       if (counter < 0)
-               counter = 0;
-
-       seq_printf(seq, "sockets: used %d\n", counter);
+       seq_printf(seq, "sockets: used %d\n",
+                  sock_inuse_get(seq->private));
 }
 #endif                         /* CONFIG_PROC_FS */
 
-- 
1.8.3.1

Reply via email to