Since these paths became netlock serialized, we don't need to handle
side effects caused by context switch.

somove() return type changed to the type of void because we can't have
concurrent somove() while we sleep. SB_SPLICE bits we set before
somove() call will be cleared within somove() if splicing will be
finished.

Index: sys/kern/uipc_socket.c
===================================================================
RCS file: /cvs/src/sys/kern/uipc_socket.c,v
retrieving revision 1.298
diff -u -p -r1.298 uipc_socket.c
--- sys/kern/uipc_socket.c      27 Jan 2023 18:46:34 -0000      1.298
+++ sys/kern/uipc_socket.c      27 Jan 2023 20:41:10 -0000
@@ -64,7 +64,7 @@ void  soidle(void *);
 void   sotask(void *);
 void   soreaper(void *);
 void   soput(void *);
-int    somove(struct socket *, int);
+void   somove(struct socket *, int);
 void   sorflush(struct socket *);
 
 void   filt_sordetach(struct knote *kn);
@@ -123,7 +123,6 @@ struct pool socket_pool;
 #ifdef SOCKET_SPLICE
 struct pool sosplice_pool;
 struct taskq *sosplice_taskq;
-struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk");
 #endif
 
 void
@@ -1239,25 +1238,16 @@ sosplice(struct socket *so, int fd, off_
 {
        struct file     *fp;
        struct socket   *sosp;
-       struct sosplice *sp;
-       struct taskq    *tq;
        int              error = 0;
 
        soassertlocked(so);
 
        if (sosplice_taskq == NULL) {
-               rw_enter_write(&sosplice_lock);
-               if (sosplice_taskq == NULL) {
-                       tq = taskq_create("sosplice", 1, IPL_SOFTNET,
-                           TASKQ_MPSAFE);
-                       /* Ensure the taskq is fully visible to other CPUs. */
-                       membar_producer();
-                       sosplice_taskq = tq;
-               }
-               rw_exit_write(&sosplice_lock);
+               sosplice_taskq = taskq_create("sosplice", 1, IPL_SOFTNET,
+                   TASKQ_MPSAFE);
+               if (sosplice_taskq == NULL)
+                       return (ENOMEM);
        }
-       if (sosplice_taskq == NULL)
-               return (ENOMEM);
 
        if ((so->so_proto->pr_flags & PR_SPLICE) == 0)
                return (EPROTONOSUPPORT);
@@ -1266,13 +1256,8 @@ sosplice(struct socket *so, int fd, off_
        if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
            (so->so_proto->pr_flags & PR_CONNREQUIRED))
                return (ENOTCONN);
-       if (so->so_sp == NULL) {
-               sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO);
-               if (so->so_sp == NULL)
-                       so->so_sp = sp;
-               else
-                       pool_put(&sosplice_pool, sp);
-       }
+       if (so->so_sp == NULL)
+               so->so_sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO);
 
        /* If no fd is given, unsplice by removing existing link. */
        if (fd < 0) {
@@ -1301,13 +1286,8 @@ sosplice(struct socket *so, int fd, off_
                error = EPROTONOSUPPORT;
                goto frele;
        }
-       if (sosp->so_sp == NULL) {
-               sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO);
-               if (sosp->so_sp == NULL)
-                       sosp->so_sp = sp;
-               else
-                       pool_put(&sosplice_pool, sp);
-       }
+       if (sosp->so_sp == NULL)
+               sosp->so_sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO);
 
        /* Lock both receive and send buffer. */
        if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) {
@@ -1320,7 +1300,6 @@ sosplice(struct socket *so, int fd, off_
 
        if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) {
                error = EBUSY;
-               goto release;
        }
        if (sosp->so_options & SO_ACCEPTCONN) {
                error = EOPNOTSUPP;
@@ -1334,6 +1313,8 @@ sosplice(struct socket *so, int fd, off_
        /* Splice so and sosp together. */
        so->so_sp->ssp_socket = sosp;
        sosp->so_sp->ssp_soback = so;
+       so->so_rcv.sb_flags |= SB_SPLICE;
+       sosp->so_snd.sb_flags |= SB_SPLICE;
        so->so_splicelen = 0;
        so->so_splicemax = max;
        if (tv)
@@ -1343,14 +1324,7 @@ sosplice(struct socket *so, int fd, off_
        timeout_set_proc(&so->so_idleto, soidle, so);
        task_set(&so->so_splicetask, sotask, so);
 
-       /*
-        * To prevent softnet interrupt from calling somove() while
-        * we sleep, the socket buffers are not marked as spliced yet.
-        */
-       if (somove(so, M_WAIT)) {
-               so->so_rcv.sb_flags |= SB_SPLICE;
-               sosp->so_snd.sb_flags |= SB_SPLICE;
-       }
+       somove(so, M_WAIT);
 
  release:
        sbunlock(sosp, &sosp->so_snd);
@@ -1448,9 +1422,8 @@ soput(void *arg)
  * Move data from receive buffer of spliced source socket to send
  * buffer of drain socket.  Try to move as much as possible in one
  * big chunk.  It is a TCP only implementation.
- * Return value 0 means splicing has been finished, 1 continue.
  */
-int
+void
 somove(struct socket *so, int wait)
 {
        struct socket   *sosp = so->so_sp->ssp_socket;
@@ -1721,11 +1694,10 @@ somove(struct socket *so, int wait)
            (sosp->so_snd.sb_state & SS_CANTSENDMORE) ||
            maxreached || error) {
                sounsplice(so, sosp, 0);
-               return (0);
+       } else {
+               if (timerisset(&so->so_idletv))
+                       timeout_add_tv(&so->so_idleto, &so->so_idletv);
        }
-       if (timerisset(&so->so_idletv))
-               timeout_add_tv(&so->so_idleto, &so->so_idletv);
-       return (1);
 }
 
 #endif /* SOCKET_SPLICE */

Reply via email to