On Tue, Nov 14, 2017 at 5:53 AM, Kirill Tkhai <ktk...@virtuozzo.com> wrote:
> @@ -406,7 +406,7 @@ struct net *copy_net_ns(unsigned long flags,
>
>         get_user_ns(user_ns);
>
> -       rv = mutex_lock_killable(&net_mutex);
> +       rv = down_read_killable(&net_sem);
>         if (rv < 0) {
>                 net_free(net);
>                 dec_net_namespaces(ucounts);
> @@ -421,7 +421,7 @@ struct net *copy_net_ns(unsigned long flags,
>                 list_add_tail_rcu(&net->list, &net_namespace_list);
>                 rtnl_unlock();
>         }
> -       mutex_unlock(&net_mutex);
> +       up_read(&net_sem);
>         if (rv < 0) {
>                 dec_net_namespaces(ucounts);
>                 put_user_ns(user_ns);
> @@ -446,7 +446,7 @@ static void cleanup_net(struct work_struct *work)
>         list_replace_init(&cleanup_list, &net_kill_list);
>         spin_unlock_irq(&cleanup_list_lock);
>
> -       mutex_lock(&net_mutex);
> +       down_read(&net_sem);
>
>         /* Don't let anyone else find us. */
>         rtnl_lock();
> @@ -486,7 +486,7 @@ static void cleanup_net(struct work_struct *work)
>         list_for_each_entry_reverse(ops, &pernet_list, list)
>                 ops_free_list(ops, &net_exit_list);
>
> -       mutex_unlock(&net_mutex);
> +       up_read(&net_sem);

After your patch setup_net() could run concurrently with cleanup_net(),
given that ops_exit_list() is called on error path of setup_net() too,
it means ops->exit() now could run concurrently if it doesn't have its
own lock. Not sure if this breaks any existing user.

Reply via email to