As long as most of the Network Stack run under the NET_LOCK() we want to
avoid grabbing the lock from different contexts.  Because when multiple
threads are waiting for the lock, all of them are awaken upon rw_exit().

This not only lead to wasted cycles, it also create a userland starvation
since kernel threads are preferred by the scheduler.  Making the scheduler
more clever is not worth it, the stack is still single threaded.

So I'd like to move as much task as possible grabbing the NET_LOCK() from
`systq' to `softnetq'.

Diff below does that for the linkstate and watchdog tasks.

Index: net/if.c
===================================================================
RCS file: /cvs/src/sys/net/if.c,v
retrieving revision 1.505
diff -u -p -r1.505 if.c
--- net/if.c    11 Jul 2017 12:51:05 -0000      1.505
+++ net/if.c    18 Jul 2017 13:35:26 -0000
@@ -1057,10 +1057,10 @@ if_detach(struct ifnet *ifp)
 
        /* Remove the watchdog timeout & task */
        timeout_del(ifp->if_slowtimo);
-       task_del(systq, ifp->if_watchdogtask);
+       task_del(softnettq, ifp->if_watchdogtask);
 
        /* Remove the link state task */
-       task_del(systq, ifp->if_linkstatetask);
+       task_del(softnettq, ifp->if_linkstatetask);
 
 #if NBPFILTER > 0
        bpfdetach(ifp);
@@ -1587,6 +1587,7 @@ if_linkstate_task(void *xifidx)
        struct ifnet *ifp;
        int s;
 
+       KERNEL_LOCK();
        NET_LOCK(s);
 
        ifp = if_get(ifidx);
@@ -1595,6 +1596,7 @@ if_linkstate_task(void *xifidx)
        if_put(ifp);
 
        NET_UNLOCK(s);
+       KERNEL_UNLOCK();
 }
 
 void
@@ -1615,7 +1617,7 @@ if_linkstate(struct ifnet *ifp)
 void
 if_link_state_change(struct ifnet *ifp)
 {
-       task_add(systq, ifp->if_linkstatetask);
+       task_add(softnettq, ifp->if_linkstatetask);
 }
 
 /*
@@ -1631,7 +1633,7 @@ if_slowtimo(void *arg)
 
        if (ifp->if_watchdog) {
                if (ifp->if_timer > 0 && --ifp->if_timer == 0)
-                       task_add(systq, ifp->if_watchdogtask);
+                       task_add(softnettq, ifp->if_watchdogtask);
                timeout_add(ifp->if_slowtimo, hz / IFNET_SLOWHZ);
        }
        splx(s);
@@ -1648,10 +1650,12 @@ if_watchdog_task(void *xifidx)
        if (ifp == NULL)
                return;
 
+       KERNEL_LOCK();
        s = splnet();
        if (ifp->if_watchdog)
                (*ifp->if_watchdog)(ifp);
        splx(s);
+       KERNEL_UNLOCK();
 
        if_put(ifp);
 }

Reply via email to