Since net could be obtained from RCU lists,
and there is a race with net destruction,
the patch converts net::count to refcount_t.

This provides sanity checks for the cases of
incrementing counter of already dead net,
when maybe_get_net() has to used instead
of get_net().

Drivers: allyesconfig and allmodconfig are OK.

Suggested-by: Eric Dumazet <eric.duma...@gmail.com>
Signed-off-by: Kirill Tkhai <ktk...@virtuozzo.com>
---
 include/net/net_namespace.h   |    8 ++++----
 net/core/net-sysfs.c          |    6 +++---
 net/core/net_namespace.c      |    8 ++++----
 net/ipv4/inet_timewait_sock.c |    4 ++--
 net/ipv4/tcp_metrics.c        |    2 +-
 5 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 10f99dafd5ac..f8a84a2c2341 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -51,7 +51,7 @@ struct net {
        refcount_t              passive;        /* To decided when the network
                                                 * namespace should be freed.
                                                 */
-       atomic_t                count;          /* To decided when the network
+       refcount_t              count;          /* To decided when the network
                                                 *  namespace should be shut 
down.
                                                 */
        spinlock_t              rules_mod_lock;
@@ -195,7 +195,7 @@ void __put_net(struct net *net);
 
 static inline struct net *get_net(struct net *net)
 {
-       atomic_inc(&net->count);
+       refcount_inc(&net->count);
        return net;
 }
 
@@ -206,14 +206,14 @@ static inline struct net *maybe_get_net(struct net *net)
         * exists.  If the reference count is zero this
         * function fails and returns NULL.
         */
-       if (!atomic_inc_not_zero(&net->count))
+       if (!refcount_inc_not_zero(&net->count))
                net = NULL;
        return net;
 }
 
 static inline void put_net(struct net *net)
 {
-       if (atomic_dec_and_test(&net->count))
+       if (refcount_dec_and_test(&net->count))
                __put_net(net);
 }
 
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 799b75268291..7bf8b85ade16 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -961,7 +961,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int 
old_num, int new_num)
        while (--i >= new_num) {
                struct kobject *kobj = &dev->_rx[i].kobj;
 
-               if (!atomic_read(&dev_net(dev)->count))
+               if (!refcount_read(&dev_net(dev)->count))
                        kobj->uevent_suppress = 1;
                if (dev->sysfs_rx_queue_group)
                        sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1367,7 +1367,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int 
old_num, int new_num)
        while (--i >= new_num) {
                struct netdev_queue *queue = dev->_tx + i;
 
-               if (!atomic_read(&dev_net(dev)->count))
+               if (!refcount_read(&dev_net(dev)->count))
                        queue->kobj.uevent_suppress = 1;
 #ifdef CONFIG_BQL
                sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
 {
        struct device *dev = &ndev->dev;
 
-       if (!atomic_read(&dev_net(ndev)->count))
+       if (!refcount_read(&dev_net(ndev)->count))
                dev_set_uevent_suppress(dev, 1);
 
        kobject_get(&dev->kobj);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 60a71be75aea..2213d45fcafd 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -35,7 +35,7 @@ LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
 
 struct net init_net = {
-       .count          = ATOMIC_INIT(1),
+       .count          = REFCOUNT_INIT(1),
        .dev_base_head  = LIST_HEAD_INIT(init_net.dev_base_head),
 };
 EXPORT_SYMBOL(init_net);
@@ -224,10 +224,10 @@ int peernet2id_alloc(struct net *net, struct net *peer)
        bool alloc;
        int id;
 
-       if (atomic_read(&net->count) == 0)
+       if (refcount_read(&net->count) == 0)
                return NETNSA_NSID_NOT_ASSIGNED;
        spin_lock_bh(&net->nsid_lock);
-       alloc = atomic_read(&peer->count) == 0 ? false : true;
+       alloc = refcount_read(&peer->count) == 0 ? false : true;
        id = __peernet2id_alloc(net, peer, &alloc);
        spin_unlock_bh(&net->nsid_lock);
        if (alloc && id >= 0)
@@ -284,7 +284,7 @@ static __net_init int setup_net(struct net *net, struct 
user_namespace *user_ns)
        int error = 0;
        LIST_HEAD(net_exit_list);
 
-       atomic_set(&net->count, 1);
+       refcount_set(&net->count, 1);
        refcount_set(&net->passive, 1);
        net->dev_base_seq = 1;
        net->user_ns = user_ns;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 277ff69a312d..c3ea4906d237 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -270,14 +270,14 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int 
family)
                                continue;
                        tw = inet_twsk(sk);
                        if ((tw->tw_family != family) ||
-                               atomic_read(&twsk_net(tw)->count))
+                               refcount_read(&twsk_net(tw)->count))
                                continue;
 
                        if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
                                continue;
 
                        if (unlikely((tw->tw_family != family) ||
-                                    atomic_read(&twsk_net(tw)->count))) {
+                                    refcount_read(&twsk_net(tw)->count))) {
                                inet_twsk_put(tw);
                                goto restart;
                        }
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 759e6bc8327b..03b51cdcc731 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -892,7 +892,7 @@ static void tcp_metrics_flush_all(struct net *net)
                pp = &hb->chain;
                for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
                        match = net ? net_eq(tm_net(tm), net) :
-                               !atomic_read(&tm_net(tm)->count);
+                               !refcount_read(&tm_net(tm)->count);
                        if (match) {
                                *pp = tm->tcpm_next;
                                kfree_rcu(tm, rcu_head);

Reply via email to