I got inspired eariler today, and found that it seemed
it might be easy to kill off the 'list' member of
struct sk_buff without changing sk_buff_head at all.

I got very far.  Nearly every single piece of code was
easy to change to pass in an explicit SKB list instead
of skb->list to the SKB queue management functions.

The big exception was SCTP.  I can't believe after being
in the kernel for several years it has all of this complicated
list handling, SKB structure overlaying, and casting all over
the place.  It was a big downer after a very positive day of
coding.

First, it casts "struct sctp_chunk *" pointers to
"struct sk_buff *" so that it can "borrow" the SKB list
handling functions.  I just copied over the skb_*() routines
it used in this way to be sctp_chunk_*(), and used them
throughout and eliminated the ugly casts.  This can be
simplified a lot further, since it really doesn't care about
'qlen'.  In fact, what it wants is just the most basic list
handling, ala linux/list.h   So just sticking a list_head
into sctp_chunk and replacing sctp_chunk_list with a list_head
as well should do the trick.

Some of the rest of the SCTP stuff was transformable with not
too much effort.

But then I really got stymied by the reassembly and partial queue
handling.  These SCTP ulp event things make a layer of abstraction to
the skb_unlink() point such that you can't know what list the SKB is
on.  One way to deal with this is to store the list pointer in
the event struct, and that's likely what will happen at first.  This
isn't trivial because you have to make sure the assignment is done
at all of the receive packet list insertion points, some places
even use sk_buff_head lists on the local stack making this chore
even more "exciting" :(

But I didn't try to do that properly for now, and SCTP needs to
be disabled in the config to play with this patch below.

Another case that needs some careful study and review by others
is the usbnet.c driver.  Man, that is another piece of networking
code that could use some serious cleanups.  I bet it would be
a simpler driver if it did things NAPI style too.  But of particular
concern is all of the SKB data area mangling it does to workaround
restrictions in various USB net device implementations.

This patch goes on top of the skb_queue_empty() diff I sent earlier
today.  I know I may have missed some skb_unlink() et al. fixups
for things that I didn't enable in my config, so patches to
cure that would be appreciated.  It's ususally a very simplistic
transformation.  Even TCP, my biggest fear, only needed some minor
modifications to tcp_collapse() and the rest was straightforward.

Frankly, other than the SCTP parts, this is not very invasive at all.
But it needs a lot of testing and review before I'd feel comfortable
sending it along.

diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb 
        if (err) {
                BT_ERR("%s bulk tx submit failed urb %p err %d", 
                                        bfusb->hdev->name, urb, err);
-               skb_unlink(skb);
+               skb_unlink(skb, &bfusb->pending_q);
                usb_free_urb(urb);
        } else
                atomic_inc(&bfusb->pending_tx);
@@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb
 
        read_lock(&bfusb->lock);
 
-       skb_unlink(skb);
+       skb_unlink(skb, &bfusb->pending_q);
        skb_queue_tail(&bfusb->completed_q, skb);
 
        bfusb_tx_wakeup(bfusb);
@@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb 
        if (err) {
                BT_ERR("%s bulk rx submit failed urb %p err %d",
                                        bfusb->hdev->name, urb, err);
-               skb_unlink(skb);
+               skb_unlink(skb, &bfusb->pending_q);
                kfree_skb(skb);
                usb_free_urb(urb);
        }
@@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb
                buf   += len;
        }
 
-       skb_unlink(skb);
+       skb_unlink(skb, &bfusb->pending_q);
        kfree_skb(skb);
 
        bfusb_rx_submit(bfusb, urb);
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -678,7 +678,7 @@ static void handle_packet_response(struc
                 return;
         }
 
-       __skb_unlink(skb, skb->list);
+       __skb_unlink(skb, &host->pending_packet_queue);
 
        if (packet->state == hpsb_queued) {
                packet->sendtime = jiffies;
@@ -986,7 +986,7 @@ void abort_timedouts(unsigned long __opa
                packet = (struct hpsb_packet *)skb->data;
 
                if (time_before(packet->sendtime + expire, jiffies)) {
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, &host->pending_packet_queue);
                        packet->state = hpsb_complete;
                        packet->ack_code = ACKX_TIMEOUT;
                        queue_packet_complete(packet);
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -179,7 +179,7 @@ static int shaper_start_xmit(struct sk_b
                         */
                        if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
                        {
-                               skb_unlink(ptr);
+                               skb_unlink(ptr, &shaper->sendq);
                                dev_kfree_skb(ptr);
                        }
                        ptr=tmp;
@@ -302,7 +302,7 @@ static void shaper_kick(struct shaper *s
                         *      Pull the frame and get interrupts back on.
                         */
                         
-                       skb_unlink(skb);
+                       skb_unlink(skb, &shaper->sendq);
                        if (shaper->recovery < 
                            SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
                                shaper->recovery = SHAPERCB(skb)->shapeclock + 
SHAPERCB(skb)->shapelen;
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -2903,9 +2903,8 @@ static struct net_device_stats *usbnet_g
  * completion callbacks.  2.5 should have fixed those bugs...
  */
 
-static void defer_bh (struct usbnet *dev, struct sk_buff *skb)
+static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct 
sk_buff_head *list)
 {
-       struct sk_buff_head     *list = skb->list;
        unsigned long           flags;
 
        spin_lock_irqsave (&list->lock, flags);
@@ -3120,7 +3119,7 @@ block:
                break;
        }
 
-       defer_bh (dev, skb);
+       defer_bh(dev, skb, &dev->rxq);
 
        if (urb) {
                if (netif_running (dev->net)
@@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb
 
        urb->dev = NULL;
        entry->state = tx_done;
-       defer_bh (dev, skb);
+       defer_bh(dev, skb, &dev->txq);
 }
 
 /*-------------------------------------------------------------------------*/
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -205,7 +205,6 @@ struct sk_buff {
        struct sk_buff          *next;
        struct sk_buff          *prev;
 
-       struct sk_buff_head     *list;
        struct sock             *sk;
        struct timeval          stamp;
        struct net_device       *dev;
@@ -596,7 +595,6 @@ static inline void __skb_queue_head(stru
 {
        struct sk_buff *prev, *next;
 
-       newsk->list = list;
        list->qlen++;
        prev = (struct sk_buff *)list;
        next = prev->next;
@@ -621,7 +619,6 @@ static inline void __skb_queue_tail(stru
 {
        struct sk_buff *prev, *next;
 
-       newsk->list = list;
        list->qlen++;
        next = (struct sk_buff *)list;
        prev = next->prev;
@@ -654,7 +651,6 @@ static inline struct sk_buff *__skb_dequ
                next->prev   = prev;
                prev->next   = next;
                result->next = result->prev = NULL;
-               result->list = NULL;
        }
        return result;
 }
@@ -663,7 +659,7 @@ static inline struct sk_buff *__skb_dequ
 /*
  *     Insert a packet on a list.
  */
-extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk);
+extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, 
struct sk_buff_head *list);
 static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
@@ -671,24 +667,23 @@ static inline void __skb_insert(struct s
        newsk->next = next;
        newsk->prev = prev;
        next->prev  = prev->next = newsk;
-       newsk->list = list;
        list->qlen++;
 }
 
 /*
  *     Place a packet after a given packet in a list.
  */
-extern void       skb_append(struct sk_buff *old, struct sk_buff *newsk);
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+extern void       skb_append(struct sk_buff *old, struct sk_buff *newsk, 
struct sk_buff_head *list);
+static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, 
struct sk_buff_head *list)
 {
-       __skb_insert(newsk, old, old->next, old->list);
+       __skb_insert(newsk, old, old->next, list);
 }
 
 /*
  * remove sk_buff from list. _Must_ be called atomically, and with
  * the list known..
  */
-extern void       skb_unlink(struct sk_buff *skb);
+extern void       skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
        struct sk_buff *next, *prev;
@@ -697,7 +692,6 @@ static inline void __skb_unlink(struct s
        next       = skb->next;
        prev       = skb->prev;
        skb->next  = skb->prev = NULL;
-       skb->list  = NULL;
        next->prev = prev;
        prev->next = next;
 }
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -582,6 +582,13 @@ void sctp_datamsg_track(struct sctp_chun
 void sctp_chunk_fail(struct sctp_chunk *, int error);
 int sctp_chunk_abandoned(struct sctp_chunk *);
 
+struct sctp_chunk_list {
+       struct sctp_chunk       *next;
+       struct sctp_chunk       *prev;
+
+       __u32           qlen;
+       spinlock_t      lock;
+};
 
 /* RFC2960 1.4 Key Terms
  *
@@ -592,13 +599,9 @@ int sctp_chunk_abandoned(struct sctp_chu
  * each chunk as well as a few other header pointers...
  */
 struct sctp_chunk {
-       /* These first three elements MUST PRECISELY match the first
-        * three elements of struct sk_buff.  This allows us to reuse
-        * all the skb_* queue management functions.
-        */
        struct sctp_chunk *next;
        struct sctp_chunk *prev;
-       struct sk_buff_head *list;
+       struct sctp_chunk_list *list;
        atomic_t refcnt;
 
        /* This is our link to the per-transport transmitted list.  */
@@ -682,6 +685,114 @@ struct sctp_chunk {
        __u8 tsn_missing_report; /* Data chunk missing counter. */
 };
 
+static inline void sctp_chunk_list_init(struct sctp_chunk_list *list)
+{
+       spin_lock_init(&list->lock);
+       list->prev = list->next = (struct sctp_chunk *)list;
+       list->qlen = 0;
+}
+
+static inline int sctp_chunk_queue_empty(const struct sctp_chunk_list *list)
+{
+       return list->next == (struct sctp_chunk *)list;
+}
+
+static inline struct sctp_chunk *sctp_chunk_peek(struct sctp_chunk_list *list_)
+{
+       struct sctp_chunk *list = ((struct sctp_chunk *)list_)->next;
+       if (list == (struct sctp_chunk *)list_)
+               list = NULL;
+       return list;
+}
+
+static inline struct sctp_chunk *__sctp_chunk_dequeue(struct sctp_chunk_list 
*list)
+{
+       struct sctp_chunk *next, *prev, *result;
+
+       prev = (struct sctp_chunk *) list;
+       next = prev->next;
+       result = NULL;
+       if (next != prev) {
+               result       = next;
+               next         = next->next;
+               list->qlen--;
+               next->prev   = prev;
+               prev->next   = next;
+               result->next = result->prev = NULL;
+       }
+       return result;
+}
+
+static inline struct sctp_chunk *sctp_chunk_dequeue(struct sctp_chunk_list 
*list)
+{
+       unsigned long flags;
+       struct sctp_chunk *result;
+
+       spin_lock_irqsave(&list->lock, flags);
+       result = __sctp_chunk_dequeue(list);
+       spin_unlock_irqrestore(&list->lock, flags);
+       return result;
+}
+
+static inline void sctp_chunk_unlink(struct sctp_chunk *chunk)
+{
+       struct sctp_chunk_list *list = chunk->list;
+       struct sctp_chunk *next, *prev;
+
+       if (list) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&list->lock, flags);
+               if (chunk->list == list) {
+                       list->qlen--;
+                       next       = chunk->next;
+                       prev       = chunk->prev;
+                       chunk->next = chunk->prev = NULL;
+                       chunk->list  = NULL;
+                       next->prev = prev;
+                       prev->next = next;
+               }
+               spin_unlock_irqrestore(&list->lock, flags);
+       }
+}
+
+static inline void __sctp_chunk_queue_head(struct sctp_chunk_list *list,
+                                          struct sctp_chunk *newsk)
+{
+       struct sctp_chunk *prev, *next;
+
+       newsk->list = list;
+       list->qlen++;
+       prev = (struct sctp_chunk *)list;
+       next = prev->next;
+       newsk->next = next;
+       newsk->prev = prev;
+       next->prev  = prev->next = newsk;
+}
+
+static inline void __sctp_chunk_queue_tail(struct sctp_chunk_list *list,
+                                          struct sctp_chunk *newsk)
+{
+       struct sctp_chunk *prev, *next;
+
+       newsk->list = list;
+       list->qlen++;
+       next = (struct sctp_chunk *)list;
+       prev = next->prev;
+       newsk->next = next;
+       newsk->prev = prev;
+       next->prev  = prev->next = newsk;
+}
+
+static inline void sctp_chunk_queue_tail(struct sctp_chunk_list *list, struct 
sctp_chunk *newsk)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&list->lock, flags);
+       __sctp_chunk_queue_tail(list, newsk);
+       spin_unlock_irqrestore(&list->lock, flags);
+}
+
 void sctp_chunk_hold(struct sctp_chunk *);
 void sctp_chunk_put(struct sctp_chunk *);
 int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
@@ -717,7 +828,7 @@ struct sctp_packet {
        __u32 vtag;
 
        /* This contains the payload chunks.  */
-       struct sk_buff_head chunks;
+       struct sctp_chunk_list chunks;
 
        /* This is the overhead of the sctp and ip headers. */
        size_t overhead;
@@ -974,7 +1085,7 @@ struct sctp_inq {
        /* This is actually a queue of sctp_chunk each
         * containing a partially decoded packet.
         */
-       struct sk_buff_head in;
+       struct sctp_chunk_list in;
        /* This is the packet which is currently off the in queue and is
         * being worked on through the inbound chunk processing.
         */
@@ -1017,7 +1128,7 @@ struct sctp_outq {
        struct sctp_association *asoc;
 
        /* Data pending that has never been transmitted.  */
-       struct sk_buff_head out;
+       struct sctp_chunk_list out;
 
        unsigned out_qlen;      /* Total length of queued data chunks. */
 
@@ -1025,7 +1136,7 @@ struct sctp_outq {
        unsigned error;
 
        /* These are control chunks we want to send.  */
-       struct sk_buff_head control;
+       struct sctp_chunk_list control;
 
        /* These are chunks that have been sacked but are above the
         * CTSN, or cumulative tsn ack point.
@@ -1672,7 +1783,7 @@ struct sctp_association {
         *  which already resides in sctp_outq.  Please move this
         *  queue and its supporting logic down there.  --piggy]
         */
-       struct sk_buff_head addip_chunks;
+       struct sctp_chunk_list addip_chunks;
 
        /* ADDIP Section 4.1 ASCONF Chunk Procedures
         *
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -34,7 +34,6 @@
 
 void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
 {
-       struct sk_buff *skb;
        unsigned long flags;
        struct sk_buff *skb_from = (struct sk_buff *) from;
        struct sk_buff *skb_to = (struct sk_buff *) to;
@@ -47,8 +46,6 @@ void skb_migrate(struct sk_buff_head *fr
        prev->next = skb_to;
        to->prev->next = from->next;
        to->prev = from->prev;
-       for (skb = from->next; skb != skb_to; skb = skb->next)
-               skb->list = to;
        to->qlen += from->qlen;
        spin_unlock(&to->lock);
        from->prev = skb_from;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -76,7 +76,7 @@ void ax25_requeue_frames(ax25_cb *ax25)
                if (skb_prev == NULL)
                        skb_queue_head(&ax25->write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &ax25->write_queue);
                skb_prev = skb;
        }
 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb)
 
 void __kfree_skb(struct sk_buff *skb)
 {
-       BUG_ON(skb->list != NULL);
-
        dst_release(skb->dst);
 #ifdef CONFIG_XFRM
        secpath_put(skb->sp);
@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff
 #define C(x) n->x = skb->x
 
        n->next = n->prev = NULL;
-       n->list = NULL;
        n->sk = NULL;
        C(stamp);
        C(dev);
@@ -404,7 +401,6 @@ static void copy_skb_header(struct sk_bu
         */
        unsigned long offset = new->data - old->data;
 
-       new->list       = NULL;
        new->sk         = NULL;
        new->dev        = old->dev;
        new->real_dev   = old->real_dev;
@@ -1344,50 +1340,43 @@ void skb_queue_tail(struct sk_buff_head 
        __skb_queue_tail(list, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+
 /**
  *     skb_unlink      -       remove a buffer from a list
  *     @skb: buffer to remove
+ *     @list: list to use
  *
- *     Place a packet after a given packet in a list. The list locks are taken
- *     and this function is atomic with respect to other list locked calls
+ *     Remove a packet from a list. The list locks are taken and this
+ *     function is atomic with respect to other list locked calls
  *
- *     Works even without knowing the list it is sitting on, which can be
- *     handy at times. It also means that THE LIST MUST EXIST when you
- *     unlink. Thus a list must have its contents unlinked before it is
- *     destroyed.
+ *     You must know what list the SKB is on.
  */
-void skb_unlink(struct sk_buff *skb)
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
-       struct sk_buff_head *list = skb->list;
-
-       if (list) {
-               unsigned long flags;
+       unsigned long flags;
 
-               spin_lock_irqsave(&list->lock, flags);
-               if (skb->list == list)
-                       __skb_unlink(skb, skb->list);
-               spin_unlock_irqrestore(&list->lock, flags);
-       }
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_unlink(skb, list);
+       spin_unlock_irqrestore(&list->lock, flags);
 }
 
-
 /**
  *     skb_append      -       append a buffer
  *     @old: buffer to insert after
  *     @newsk: buffer to insert
+ *     @list: list to use
  *
  *     Place a packet after a given packet in a list. The list locks are taken
  *     and this function is atomic with respect to other list locked calls.
  *     A buffer cannot be placed on two lists at the same time.
  */
-
-void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct 
sk_buff_head *list)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&old->list->lock, flags);
-       __skb_append(old, newsk);
-       spin_unlock_irqrestore(&old->list->lock, flags);
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_append(old, newsk, list);
+       spin_unlock_irqrestore(&list->lock, flags);
 }
 
 
@@ -1395,19 +1384,21 @@ void skb_append(struct sk_buff *old, str
  *     skb_insert      -       insert a buffer
  *     @old: buffer to insert before
  *     @newsk: buffer to insert
+ *     @list: list to use
+ *
+ *     Place a packet before a given packet in a list. The list locks are
+ *     taken and this function is atomic with respect to other list locked
+ *     calls.
  *
- *     Place a packet before a given packet in a list. The list locks are taken
- *     and this function is atomic with respect to other list locked calls
  *     A buffer cannot be placed on two lists at the same time.
  */
-
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct 
sk_buff_head *list)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&old->list->lock, flags);
-       __skb_insert(newsk, old->prev, old, old->list);
-       spin_unlock_irqrestore(&old->list->lock, flags);
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_insert(newsk, old->prev, old, list);
+       spin_unlock_irqrestore(&list->lock, flags);
 }
 
 #if 0
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1763,7 +1763,7 @@ static int dn_recvmsg(struct kiocb *iocb
                nskb = skb->next;
 
                if (skb->len == 0) {
-                       skb_unlink(skb);
+                       skb_unlink(skb, queue);
                        kfree_skb(skb);
                        /* 
                         * N.B. Don't refer to skb or cb after this point
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -479,7 +479,7 @@ int dn_nsp_check_xmit_queue(struct sock 
                xmit_count = cb2->xmit_count;
                segnum = cb2->segnum;
                /* Remove and drop ack'ed packet */
-               skb_unlink(ack);
+               skb_unlink(ack, list);
                kfree_skb(ack);
                ack = NULL;
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -975,7 +975,7 @@ do_fault:
        if (!skb->len) {
                if (sk->sk_send_head == skb)
                        sk->sk_send_head = NULL;
-               __skb_unlink(skb, skb->list);
+               __skb_unlink(skb, &sk->sk_write_queue);
                sk_stream_free_skb(sk, skb);
        }
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct so
                        seq_rtt = now - scb->when;
                tcp_dec_pcount_approx(&tp->fackets_out, skb);
                tcp_packets_out_dec(tp, skb);
-               __skb_unlink(skb, skb->list);
+               __skb_unlink(skb, &sk->sk_write_queue);
                sk_stream_free_skb(sk, skb);
        }
 
@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *s
 
                if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
                        SOCK_DEBUG(sk, "ofo packet was already received \n");
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, &tp->out_of_order_queue);
                        __kfree_skb(skb);
                        continue;
                }
@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *s
                           tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
                           TCP_SKB_CB(skb)->end_seq);
 
-               __skb_unlink(skb, skb->list);
+               __skb_unlink(skb, &tp->out_of_order_queue);
                __skb_queue_tail(&sk->sk_receive_queue, skb);
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                if(skb->h.th->fin)
@@ -3027,7 +3027,7 @@ drop:
                u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
                if (seq == TCP_SKB_CB(skb1)->end_seq) {
-                       __skb_append(skb1, skb);
+                       __skb_append(skb1, skb, &tp->out_of_order_queue);
 
                        if (!tp->rx_opt.num_sacks ||
                            tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3071,7 @@ drop:
                               tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, 
end_seq);
                               break;
                       }
-                      __skb_unlink(skb1, skb1->list);
+                      __skb_unlink(skb1, &tp->out_of_order_queue);
                       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, 
TCP_SKB_CB(skb1)->end_seq);
                       __kfree_skb(skb1);
                }
@@ -3088,8 +3088,9 @@ add_sack:
  * simplifies code)
  */
 static void
-tcp_collapse(struct sock *sk, struct sk_buff *head,
-            struct sk_buff *tail, u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list,
+            struct sk_buff *head, struct sk_buff *tail,
+            u32 start, u32 end)
 {
        struct sk_buff *skb;
 
@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_
                /* No new bits? It is possible on ofo queue. */
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                        struct sk_buff *next = skb->next;
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, list);
                        __kfree_skb(skb);
                        NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
                        skb = next;
@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_
                nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
-               __skb_insert(nskb, skb->prev, skb, skb->list);
+               __skb_insert(nskb, skb->prev, skb, list);
                sk_stream_set_owner_r(nskb, sk);
 
                /* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_
                        }
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                                struct sk_buff *next = skb->next;
-                               __skb_unlink(skb, skb->list);
+                               __skb_unlink(skb, list);
                                __kfree_skb(skb);
                                NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
                                skb = next;
@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struc
                if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
-                       tcp_collapse(sk, head, skb, start, end);
+                       tcp_collapse(sk, &tp->out_of_order_queue,
+                                    head, skb, start, end);
                        head = skb;
                        if (skb == (struct sk_buff *)&tp->out_of_order_queue)
                                break;
@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
-       tcp_collapse(sk, sk->sk_receive_queue.next,
+       tcp_collapse(sk, &sk->sk_receive_queue,
+                    sk->sk_receive_queue.next,
                     (struct sk_buff*)&sk->sk_receive_queue,
                     tp->copied_seq, tp->rcv_nxt);
        sk_stream_mem_reclaim(sk);
@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * 
                struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
                tp->copied_seq++;
                if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, &sk->sk_receive_queue);
                        __kfree_skb(skb);
                }
        }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -507,7 +507,7 @@ static int tcp_fragment(struct sock *sk,
 
        /* Link BUFF into the send queue. */
        skb_header_release(buff);
-       __skb_append(skb, buff);
+       __skb_append(skb, buff, &sk->sk_write_queue);
 
        return 0;
 }
@@ -892,7 +892,7 @@ static int tso_fragment(struct sock *sk,
 
        /* Link BUFF into the send queue. */
        skb_header_release(buff);
-       __skb_append(skb, buff);
+       __skb_append(skb, buff, &sk->sk_write_queue);
 
        return 0;
 }
@@ -1257,7 +1257,7 @@ static void tcp_retrans_try_collapse(str
                       tcp_skb_pcount(next_skb) != 1);
 
                /* Ok.  We will be able to collapse the packet. */
-               __skb_unlink(next_skb, next_skb->list);
+               __skb_unlink(next_skb, &sk->sk_write_queue);
 
                memcpy(skb_put(skb, next_skb_size), next_skb->data, 
next_skb_size);
 
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -988,9 +988,6 @@ void irlap_resend_rejected_frames(struct
                        IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
                        return;
                }
-               /* Unlink tx_skb from list */
-               tx_skb->next = tx_skb->prev = NULL;
-               tx_skb->list = NULL;
 
                /* Clear old Nr field + poll bit */
                tx_skb->data[1] &= 0x0f;
@@ -1063,9 +1060,6 @@ void irlap_resend_rejected_frame(struct 
                        IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
                        return;
                }
-               /* Unlink tx_skb from list */
-               tx_skb->next = tx_skb->prev = NULL;
-               tx_skb->list = NULL;
 
                /* Clear old Nr field + poll bit */
                tx_skb->data[1] &= 0x0f;
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -78,7 +78,7 @@ void lapb_requeue_frames(struct lapb_cb 
                if (!skb_prev)
                        skb_queue_head(&lapb->write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &lapb->write_queue);
                skb_prev = skb;
        }
 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -714,7 +714,7 @@ static int llc_ui_recvmsg(struct kiocb *
        if (uaddr)
                memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
        msg->msg_namelen = sizeof(*uaddr);
-       if (!skb->list) {
+       if (!skb->next) {
 dgram_free:
                kfree_skb(skb);
        }
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -71,7 +71,11 @@ int llc_conn_state_process(struct sock *
 
        if (!ev->ind_prim && !ev->cfm_prim) {
                /* indicate or confirm not required */
-               if (!skb->list)
+               /* XXX this is not very pretty, perhaps we should store
+                * XXX indicate/confirm-needed state in the llc_conn_state_ev
+                * XXX control block of the SKB instead? -DaveM
+                */
+               if (!skb->next)
                        goto out_kfree_skb;
                goto out_skb_put;
        }
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -77,7 +77,7 @@ void nr_requeue_frames(struct sock *sk)
                if (skb_prev == NULL)
                        skb_queue_head(&sk->sk_write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &sk->sk_write_queue);
                skb_prev = skb;
        }
 }
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -74,7 +74,7 @@ void rose_requeue_frames(struct sock *sk
                if (skb_prev == NULL)
                        skb_queue_head(&sk->sk_write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &sk->sk_write_queue);
                skb_prev = skb;
        }
 }
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -203,7 +203,7 @@ static struct sctp_association *sctp_ass
         */
        asoc->addip_serial = asoc->c.initial_tsn;
 
-       skb_queue_head_init(&asoc->addip_chunks);
+       sctp_chunk_list_init(&asoc->addip_chunks);
 
        /* Make an empty list of remote transport addresses.  */
        INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -50,7 +50,7 @@
 /* Initialize an SCTP inqueue.  */
 void sctp_inq_init(struct sctp_inq *queue)
 {
-       skb_queue_head_init(&queue->in);
+       sctp_chunk_list_init(&queue->in);
        queue->in_progress = NULL;
 
        /* Create a task for delivering data.  */
@@ -65,7 +65,7 @@ void sctp_inq_free(struct sctp_inq *queu
        struct sctp_chunk *chunk;
 
        /* Empty the queue.  */
-       while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL)
+       while ((chunk = sctp_chunk_dequeue(&queue->in)) != NULL)
                sctp_chunk_free(chunk);
 
        /* If there is a packet which is currently being worked on,
@@ -92,7 +92,7 @@ void sctp_inq_push(struct sctp_inq *q, s
         * Eventually, we should clean up inqueue to not rely
         * on the BH related data structures.
         */
-       skb_queue_tail(&(q->in), (struct sk_buff *) packet);
+       sctp_chunk_queue_tail(&q->in, packet);
        q->immediate.func(q->immediate.data);
 }
 
@@ -132,11 +132,10 @@ struct sctp_chunk *sctp_inq_pop(struct s
        /* Do we need to take the next packet out of the queue to process? */
        if (!chunk) {
                /* Is the queue empty?  */
-               if (skb_queue_empty(&queue->in))
+               if (sctp_chunk_queue_empty(&queue->in))
                        return NULL;
 
-               chunk = queue->in_progress =
-                       (struct sctp_chunk *) skb_dequeue(&queue->in);
+               chunk = queue->in_progress = sctp_chunk_dequeue(&queue->in);
 
                /* This is the first chunk in the packet.  */
                chunk->singleton = 1;
diff --git a/net/sctp/output.c b/net/sctp/output.c
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -108,7 +108,7 @@ struct sctp_packet *sctp_packet_init(str
        packet->transport = transport;
        packet->source_port = sport;
        packet->destination_port = dport;
-       skb_queue_head_init(&packet->chunks);
+       sctp_chunk_list_init(&packet->chunks);
        if (asoc) {
                struct sctp_sock *sp = sctp_sk(asoc->base.sk);  
                overhead = sp->pf->af->net_header_len; 
@@ -133,7 +133,7 @@ void sctp_packet_free(struct sctp_packet
 
        SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);
 
-        while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) 
!= NULL)
+        while ((chunk = __sctp_chunk_dequeue(&packet->chunks)) != NULL)
                sctp_chunk_free(chunk);
 
        if (packet->malloced)
@@ -276,7 +276,7 @@ append:
                packet->has_sack = 1;
 
        /* It is OK to send this chunk.  */
-       __skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk);
+       __sctp_chunk_queue_tail(&packet->chunks, chunk);
        packet->size += chunk_len;
        chunk->transport = packet->transport;
 finish:
@@ -305,7 +305,7 @@ int sctp_packet_transmit(struct sctp_pac
        SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);
 
        /* Do NOT generate a chunkless packet. */
-       chunk = (struct sctp_chunk *)skb_peek(&packet->chunks);
+       chunk = sctp_chunk_peek(&packet->chunks);
        if (unlikely(!chunk))
                return err;
 
@@ -370,7 +370,7 @@ int sctp_packet_transmit(struct sctp_pac
         * [This whole comment explains WORD_ROUND() below.]
         */
        SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n");
-       while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != 
NULL) {
+       while ((chunk = __sctp_chunk_dequeue(&packet->chunks)) != NULL) {
                if (sctp_chunk_is_data(chunk)) {
 
                        if (!chunk->has_tsn) {
@@ -511,7 +511,7 @@ err:
         * will get resent or dropped later.
         */
 
-       while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != 
NULL) {
+       while ((chunk = __sctp_chunk_dequeue(&packet->chunks)) != NULL) {
                if (!sctp_chunk_is_data(chunk))
                        sctp_chunk_free(chunk);
        }
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -75,7 +75,7 @@ static void sctp_generate_fwdtsn(struct 
 static inline void sctp_outq_head_data(struct sctp_outq *q,
                                        struct sctp_chunk *ch)
 {
-       __skb_queue_head(&q->out, (struct sk_buff *)ch);
+       __sctp_chunk_queue_head(&q->out, ch);
        q->out_qlen += ch->skb->len;
        return;
 }
@@ -84,7 +84,7 @@ static inline void sctp_outq_head_data(s
 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
 {
        struct sctp_chunk *ch;
-       ch = (struct sctp_chunk *)__skb_dequeue(&q->out);
+       ch = __sctp_chunk_dequeue(&q->out);
        if (ch)
                q->out_qlen -= ch->skb->len;
        return ch;
@@ -93,7 +93,7 @@ static inline struct sctp_chunk *sctp_ou
 static inline void sctp_outq_tail_data(struct sctp_outq *q,
                                       struct sctp_chunk *ch)
 {
-       __skb_queue_tail(&q->out, (struct sk_buff *)ch);
+       __sctp_chunk_queue_tail(&q->out, ch);
        q->out_qlen += ch->skb->len;
        return;
 }
@@ -197,8 +197,8 @@ static inline int sctp_cacc_skip(struct 
 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
 {
        q->asoc = asoc;
-       skb_queue_head_init(&q->out);
-       skb_queue_head_init(&q->control);
+       sctp_chunk_list_init(&q->out);
+       sctp_chunk_list_init(&q->control);
        INIT_LIST_HEAD(&q->retransmit);
        INIT_LIST_HEAD(&q->sacked);
        INIT_LIST_HEAD(&q->abandoned);
@@ -269,7 +269,7 @@ void sctp_outq_teardown(struct sctp_outq
        q->error = 0;
 
        /* Throw away any leftover control chunks. */
-       while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL)
+       while ((chunk = sctp_chunk_dequeue(&q->control)) != NULL)
                sctp_chunk_free(chunk);
 }
 
@@ -333,7 +333,7 @@ int sctp_outq_tail(struct sctp_outq *q, 
                        break;
                };
        } else {
-               __skb_queue_tail(&q->control, (struct sk_buff *) chunk);
+               __sctp_chunk_queue_tail(&q->control, chunk);
                SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
        }
 
@@ -650,7 +650,7 @@ int sctp_outq_flush(struct sctp_outq *q,
        __u16 sport = asoc->base.bind_addr.port;
        __u16 dport = asoc->peer.port;
        __u32 vtag = asoc->peer.i.init_tag;
-       struct sk_buff_head *queue;
+       struct sctp_chunk_list *queue;
        struct sctp_transport *transport = NULL;
        struct sctp_transport *new_transport;
        struct sctp_chunk *chunk;
@@ -676,7 +676,7 @@ int sctp_outq_flush(struct sctp_outq *q,
         */
 
        queue = &q->control;
-       while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) {
+       while ((chunk = sctp_chunk_dequeue(queue)) != NULL) {
                /* Pick the right transport to use. */
                new_transport = chunk->transport;
 
@@ -1149,8 +1149,9 @@ int sctp_outq_sack(struct sctp_outq *q, 
        /* See if all chunks are acked.
         * Make sure the empty queue handler will get run later.
         */
-       q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) &&
-                       list_empty(&q->retransmit);
+       q->empty = (sctp_chunk_queue_empty(&q->out) &&
+                   sctp_chunk_queue_empty(&q->control) &&
+                   list_empty(&q->retransmit));
        if (!q->empty)
                goto finish;
 
@@ -1729,7 +1730,7 @@ static void sctp_generate_fwdtsn(struct 
                                              nskips, &ftsn_skip_arr[0]); 
 
        if (ftsn_chunk) {
-               __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk);
+               __sctp_chunk_queue_tail(&q->control, ftsn_chunk);
                SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
        }
 }
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1117,7 +1117,7 @@ static void sctp_chunk_destroy(struct sc
 void sctp_chunk_free(struct sctp_chunk *chunk)
 {
        /* Make sure that we are not on any list.  */
-       skb_unlink((struct sk_buff *) chunk);
+       sctp_chunk_unlink(chunk);
        list_del_init(&chunk->transmitted_list);
 
        /* Release our reference on the message tracker. */
@@ -2739,7 +2739,7 @@ int sctp_process_asconf_ack(struct sctp_
        asoc->addip_last_asconf = NULL;
 
        /* Send the next asconf chunk from the addip chunk queue. */
-       asconf = (struct sctp_chunk *)__skb_dequeue(&asoc->addip_chunks);
+       asconf = __sctp_chunk_dequeue(&asoc->addip_chunks);
        if (asconf) {
                /* Hold the chunk until an ASCONF_ACK is received. */
                sctp_chunk_hold(asconf);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -406,7 +406,7 @@ static int sctp_send_asconf(struct sctp_
         * transmission.
         */     
        if (asoc->addip_last_asconf) {
-               __skb_queue_tail(&asoc->addip_chunks, (struct sk_buff *)chunk);
+               __sctp_chunk_queue_tail(&asoc->addip_chunks, chunk);
                goto out;       
        }
 
@@ -4892,7 +4892,7 @@ static void sctp_sock_migrate(struct soc
        sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
                event = sctp_skb2event(skb);
                if (event->asoc == assoc) {
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, &oldsk->sk_receive_queue);
                        __skb_queue_tail(&newsk->sk_receive_queue, skb);
                }
        }
@@ -4921,7 +4921,7 @@ static void sctp_sock_migrate(struct soc
                sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
                        event = sctp_skb2event(skb);
                        if (event->asoc == assoc) {
-                               __skb_unlink(skb, skb->list);
+                               __skb_unlink(skb, &oldsp->pd_lobby);
                                __skb_queue_tail(queue, skb);
                        }
                }
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -269,7 +269,7 @@ static inline void sctp_ulpq_store_reasm
  * payload was fragmented on the way and ip had to reassemble them.
  * We add the rest of skb's to the first skb's fraglist.
  */
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff 
*f_frag, struct sk_buff *l_frag)
+static struct sctp_ulpevent *sctp_make_reassembled_event(struct sctp_ulpq 
*ulpq, struct sk_buff *f_frag, struct sk_buff *l_frag)
 {
        struct sk_buff *pos;
        struct sctp_ulpevent *event;
@@ -294,7 +294,7 @@ static struct sctp_ulpevent *sctp_make_r
                skb_shinfo(f_frag)->frag_list = pos;
 
        /* Remove the first fragment from the reassembly queue.  */
-       __skb_unlink(f_frag, f_frag->list);
+       __skb_unlink(f_frag, &ulpq->reasm);
        while (pos) {
 
                pnext = pos->next;
@@ -304,7 +304,7 @@ static struct sctp_ulpevent *sctp_make_r
                f_frag->data_len += pos->len;
 
                /* Remove the fragment from the reassembly queue.  */
-               __skb_unlink(pos, pos->list);
+               __skb_unlink(pos, &ulpq->reasm);
        
                /* Break if we have reached the last fragment.  */
                if (pos == l_frag)
@@ -375,7 +375,7 @@ static inline struct sctp_ulpevent *sctp
 done:
        return retval;
 found:
-       retval = sctp_make_reassembled_event(first_frag, pos);
+       retval = sctp_make_reassembled_event(ulpq, first_frag, pos);
        if (retval)
                retval->msg_flags |= MSG_EOR;
        goto done;
@@ -435,7 +435,7 @@ static inline struct sctp_ulpevent *sctp
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(first_frag, last_frag);
+       retval = sctp_make_reassembled_event(ulpq, first_frag, last_frag);
        if (retval && is_last)
                retval->msg_flags |= MSG_EOR;
 
@@ -527,7 +527,7 @@ static inline struct sctp_ulpevent *sctp
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(first_frag, last_frag);
+       retval = sctp_make_reassembled_event(ulpq, first_frag, last_frag);
        return retval;
 }
 
@@ -567,7 +567,7 @@ static inline void sctp_ulpq_retrieve_or
                /* Found it, so mark in the ssnmap. */
                sctp_ssn_next(in, sid);
 
-               __skb_unlink(pos, pos->list);
+               __skb_unlink(pos, &ulpq->lobby);
 
                /* Attach all gathered skbs to the event.  */
                __skb_queue_tail(sctp_event2skb(event)->list, pos);
@@ -686,7 +686,7 @@ static inline void sctp_ulpq_reap_ordere
                /* Found it, so mark in the ssnmap. */         
                sctp_ssn_next(in, csid);
 
-               __skb_unlink(pos, pos->list);
+               __skb_unlink(pos, &ulpq->lobby);
                if (!event) {                                           
                        /* Create a temporary list to collect chunks on.  */
                        event = sctp_skb2event(pos);
@@ -694,7 +694,7 @@ static inline void sctp_ulpq_reap_ordere
                        __skb_queue_tail(&temp, sctp_event2skb(event));
                } else {
                        /* Attach all gathered skbs to the event.  */
-                       __skb_queue_tail(sctp_event2skb(event)->list, pos);
+                       __skb_queue_tail(&temp, pos);
                }
        }
 
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -286,16 +286,16 @@ void unix_gc(void)
                        skb = skb_peek(&s->sk_receive_queue);
                        while (skb &&
                               skb != (struct sk_buff *)&s->sk_receive_queue) {
-                               nextsk=skb->next;
+                               nextsk = skb->next;
                                /*
                                 *      Do we have file descriptors ?
                                 */
-                               if(UNIXCB(skb).fp)
-                               {
-                                       __skb_unlink(skb, skb->list);
-                                       __skb_queue_tail(&hitlist,skb);
+                               if (UNIXCB(skb).fp) {
+                                       __skb_unlink(skb,
+                                                    &s->sk_receive_queue);
+                                       __skb_queue_tail(&hitlist, skb);
                                }
-                               skb=nextsk;
+                               skb = nextsk;
                        }
                        spin_unlock(&s->sk_receive_queue.lock);
                }
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -80,7 +80,7 @@ void x25_requeue_frames(struct sock *sk)
                if (!skb_prev)
                        skb_queue_head(&sk->sk_write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &sk->sk_write_queue);
                skb_prev = skb;
        }
 }
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to