From: =?ISO-8859-1?q?Ilpo_J=E4rvinen?= <[EMAIL PROTECTED]>

This work was mostly done by David S. Miller.

Signed-off-by: Ilpo Järvinen <[EMAIL PROTECTED]>
---
 include/linux/skbuff.h   |    3 ++
 include/linux/tcp.h      |    2 +
 include/net/tcp.h        |   62 ++++++++++++++++++++++++++++++++++++++++++++-
 net/ipv4/tcp_ipv4.c      |    1 +
 net/ipv4/tcp_minisocks.c |    1 +
 net/ipv6/tcp_ipv6.c      |    1 +
 6 files changed, 68 insertions(+), 2 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d39f53e..b00e7e8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -18,6 +18,7 @@
 #include <linux/compiler.h>
 #include <linux/time.h>
 #include <linux/cache.h>
+#include <linux/rbtree.h>
 
 #include <asm/atomic.h>
 #include <asm/types.h>
@@ -253,6 +254,8 @@ struct sk_buff {
        struct sk_buff          *next;
        struct sk_buff          *prev;
 
+       struct rb_node          rb;
+
        struct sock             *sk;
        ktime_t                 tstamp;
        struct net_device       *dev;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 08027f1..c24fc06 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -174,6 +174,7 @@ struct tcp_md5sig {
 
 #include <linux/skbuff.h>
 #include <linux/dmaengine.h>
+#include <linux/rbtree.h>
 #include <net/sock.h>
 #include <net/inet_connection_sock.h>
 #include <net/inet_timewait_sock.h>
@@ -320,6 +321,7 @@ struct tcp_sock {
        u32     snd_cwnd_used;
        u32     snd_cwnd_stamp;
 
+       struct rb_root          write_queue_rb;
        struct sk_buff_head     out_of_order_queue; /* Out of order segments go 
here */
 
        u32     rcv_wnd;        /* Current receiver window              */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 967f256..433c6a6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1190,6 +1190,11 @@ static inline void               
tcp_put_md5sig_pool(void)
        put_cpu();
 }
 
+static inline void tcp_write_queue_init(struct sock *sk)
+{
+       tcp_sk(sk)->write_queue_rb = RB_ROOT;
+}
+
 /* write queue abstraction */
 static inline void tcp_write_queue_purge(struct sock *sk)
 {
@@ -1197,6 +1202,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
 
        while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
                sk_stream_free_skb(sk, skb);
+       tcp_sk(sk)->write_queue_rb = RB_ROOT;
        sk_stream_mem_reclaim(sk);
 }
 
@@ -1252,7 +1258,7 @@ static inline void tcp_advance_send_head(struct sock *sk, 
struct sk_buff *skb)
        TCP_SKB_CB(skb)->fack_count = TCP_SKB_CB(prev)->fack_count +
                                      tcp_skb_pcount(prev);
 
-       sk->sk_send_head = skb->next;
+       sk->sk_send_head = tcp_write_queue_next(sk, skb);
        if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
                sk->sk_send_head = NULL;
 }
@@ -1268,10 +1274,58 @@ static inline void tcp_init_send_head(struct sock *sk)
        sk->sk_send_head = NULL;
 }
 
+static inline struct sk_buff *tcp_write_queue_find(struct sock *sk, __u32 seq)
+{
+       struct rb_node *rb_node = tcp_sk(sk)->write_queue_rb.rb_node;
+       struct sk_buff *skb = NULL;
+
+       while (rb_node) {
+               struct sk_buff *tmp = rb_entry(rb_node,struct sk_buff,rb);
+               if (after(TCP_SKB_CB(tmp)->end_seq, seq)) {
+                       skb = tmp;
+                       if (!after(TCP_SKB_CB(tmp)->seq, seq))
+                               break;
+                       rb_node = rb_node->rb_left;
+               } else
+                       rb_node = rb_node->rb_right;
+
+       }
+       return skb;
+}
+
+static inline void tcp_rb_insert(struct sk_buff *skb, struct rb_root *root)
+{
+       struct rb_node **rb_link, *rb_parent;
+       __u32 seq = TCP_SKB_CB(skb)->seq;
+
+       rb_link = &root->rb_node;
+       rb_parent = NULL;
+       while (*rb_link) {
+               struct sk_buff *tmp;
+
+               rb_parent = *rb_link;
+               tmp = rb_entry(rb_parent,struct sk_buff,rb);
+               if (after(TCP_SKB_CB(tmp)->end_seq, seq)) {
+                       BUG_ON(!after(TCP_SKB_CB(tmp)->seq, seq));
+                       rb_link = &rb_parent->rb_left;
+               } else {
+                       rb_link = &rb_parent->rb_right;
+               }
+       }
+       rb_link_node(&skb->rb, rb_parent, rb_link);
+       rb_insert_color(&skb->rb, root);
+}
+
+static inline void tcp_rb_unlink(struct sk_buff *skb, struct rb_root *root)
+{
+       rb_erase(&skb->rb, root);
+}
+
 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff 
*skb)
 {
        TCP_SKB_CB(skb)->fack_count = 0;
        __skb_queue_tail(&sk->sk_write_queue, skb);
+       tcp_rb_insert(skb, &tcp_sk(sk)->write_queue_rb);
 }
 
 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff 
*skb)
@@ -1296,6 +1350,7 @@ static inline void __tcp_add_write_queue_head(struct sock 
*sk, struct sk_buff *s
        BUG_ON(!skb_queue_empty(&sk->sk_write_queue));
        __skb_queue_head(&sk->sk_write_queue, skb);
        TCP_SKB_CB(skb)->fack_count = 0;
+       tcp_rb_insert(skb, &tcp_sk(sk)->write_queue_rb);
 }
 
 /* An insert into the middle of the write queue causes the fack
@@ -1326,15 +1381,17 @@ static inline void tcp_insert_write_queue_after(struct 
sk_buff *skb,
 {
        __skb_append(skb, buff, &sk->sk_write_queue);
        tcp_reset_fack_counts(sk, buff);
+       tcp_rb_insert(buff, &tcp_sk(sk)->write_queue_rb);
 }
 
-/* Insert skb between prev and next on the write queue of sk.  */
+/* Insert new before skb on the write queue of sk.  */
 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
                                                  struct sk_buff *skb,
                                                  struct sock *sk)
 {
        __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
        tcp_reset_fack_counts(sk, new);
+       tcp_rb_insert(new, &tcp_sk(sk)->write_queue_rb);
 
        if (sk->sk_send_head == skb)
                sk->sk_send_head = new;
@@ -1343,6 +1400,7 @@ static inline void tcp_insert_write_queue_before(struct 
sk_buff *new,
 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
 {
        __skb_unlink(skb, &sk->sk_write_queue);
+       tcp_rb_unlink(skb, &tcp_sk(sk)->write_queue_rb);
 }
 
 static inline int tcp_skb_is_last(const struct sock *sk,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 652c323..5a27e42 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1849,6 +1849,7 @@ static int tcp_v4_init_sock(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
+       tcp_write_queue_init(sk);
        skb_queue_head_init(&tp->out_of_order_queue);
        tcp_init_xmit_timers(sk);
        tcp_prequeue_init(tp);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b61b768..e1a0e4a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -426,6 +426,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, 
struct request_sock *req,
 
                tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
+               tcp_write_queue_init(newsk);
                skb_queue_head_init(&newtp->out_of_order_queue);
                newtp->write_seq = treq->snt_isn + 1;
                newtp->pushed_seq = newtp->write_seq;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 93980c3..f1294dc 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1886,6 +1886,7 @@ static int tcp_v6_init_sock(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
+       tcp_write_queue_init(sk);
        skb_queue_head_init(&tp->out_of_order_queue);
        tcp_init_xmit_timers(sk);
        tcp_prequeue_init(tp);
-- 
1.5.0.6

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to