From: =?ISO-8859-1?q?Ilpo_J=E4rvinen?= <[EMAIL PROTECTED]> The fack count of any skb in the retransmit queue at any given point in time is:
(skb->fack_count - head_skb->fack_count) And we'll use this in the SACK processing loops and possibly elsewhere too. Original idea came from David S. Miller, included couple of bug fixes from Tom Quetchenbach <[EMAIL PROTECTED]>. Signed-off-by: Ilpo Järvinen <[EMAIL PROTECTED]> --- include/net/tcp.h | 41 +++++++++++++++++++++++++++++++++++++++++ 1 files changed, 41 insertions(+), 0 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 5ec1cac..967f256 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -555,6 +555,7 @@ struct tcp_skb_cb { __u32 seq; /* Starting sequence number */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 when; /* used to compute rtt's */ + unsigned int fack_count; /* speed up SACK processing */ __u8 flags; /* TCP header flags. */ /* NOTE: These must match up to the flags byte in a @@ -1220,6 +1221,11 @@ static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_bu return skb->next; } +static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb) +{ + return skb->prev; +} + #define tcp_for_write_queue(skb, sk) \ for (skb = (sk)->sk_write_queue.next; \ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ @@ -1241,6 +1247,11 @@ static inline struct sk_buff *tcp_send_head(struct sock *sk) static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) { + struct sk_buff *prev = tcp_write_queue_prev(sk, skb); + + TCP_SKB_CB(skb)->fack_count = TCP_SKB_CB(prev)->fack_count + + tcp_skb_pcount(prev); + sk->sk_send_head = skb->next; if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) sk->sk_send_head = NULL; @@ -1259,6 +1270,7 @@ static inline void tcp_init_send_head(struct sock *sk) static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) { + TCP_SKB_CB(skb)->fack_count = 0; __skb_queue_tail(&sk->sk_write_queue, skb); } @@ -1275,9 +1287,36 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb } } +/* This is only used for tcp_send_synack(), so the write queue should + * be empty. If that stops being true, the fack_count assignment + * will need to be more elaborate. + */ static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) { + BUG_ON(!skb_queue_empty(&sk->sk_write_queue)); __skb_queue_head(&sk->sk_write_queue, skb); + TCP_SKB_CB(skb)->fack_count = 0; +} + +/* An insert into the middle of the write queue causes the fack + * counts in subsequent packets to become invalid, fix them up. + */ +static inline void tcp_reset_fack_counts(struct sock *sk, struct sk_buff *skb) +{ + struct sk_buff *prev = skb->prev; + unsigned int fc = 0; + + if (prev != (struct sk_buff *) &sk->sk_write_queue) + fc = TCP_SKB_CB(prev)->fack_count + tcp_skb_pcount(prev); + + tcp_for_write_queue_from(skb, sk) { + if (!before(TCP_SKB_CB(skb)->seq, tcp_sk(sk)->snd_nxt) || + TCP_SKB_CB(skb)->fack_count == fc) + break; + + TCP_SKB_CB(skb)->fack_count = fc; + fc += tcp_skb_pcount(skb); + } } /* Insert buff after skb on the write queue of sk. */ @@ -1286,6 +1325,7 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb, struct sock *sk) { __skb_append(skb, buff, &sk->sk_write_queue); + tcp_reset_fack_counts(sk, buff); } /* Insert skb between prev and next on the write queue of sk. */ @@ -1294,6 +1334,7 @@ static inline void tcp_insert_write_queue_before(struct sk_buff *new, struct sock *sk) { __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); + tcp_reset_fack_counts(sk, new); if (sk->sk_send_head == skb) sk->sk_send_head = new; -- 1.5.0.6 -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html