Move all the code that does linear TCP slowstart to one
inline function to ease later patch to add ABC support.

Signed-off-by: Stephen Hemminger <[EMAIL PROTECTED]>

--- net-2.6.orig/include/net/tcp.h
+++ net-2.6/include/net/tcp.h
@@ -762,6 +762,16 @@ static inline __u32 tcp_current_ssthresh
                            (tp->snd_cwnd >> 2)));
 }
 
+/*
+ * Linear increase during slow start
+ */
+static inline void tcp_slow_start(struct tcp_sock *tp)
+{
+       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+               tp->snd_cwnd++;
+}
+
+
 static inline void tcp_sync_left_out(struct tcp_sock *tp)
 {
        if (tp->rx_opt.sack_ok &&
--- net-2.6.orig/net/ipv4/tcp_bic.c
+++ net-2.6/net/ipv4/tcp_bic.c
@@ -216,24 +216,20 @@ static void bictcp_cong_avoid(struct soc
 
        bictcp_low_utilization(sk, data_acked);
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
-               /* In "safe" area, increase. */
-               if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                       tp->snd_cwnd++;
-       } else {
-               bictcp_update(ca, tp->snd_cwnd);
+       if (tp->snd_cwnd <= tp->snd_ssthresh)
+               return tcp_slow_start(tp);
 
-                /* In dangerous area, increase slowly.
-                * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
-                */
-               if (tp->snd_cwnd_cnt >= ca->cnt) {
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-               } else
-                       tp->snd_cwnd_cnt++;
-       }
+       bictcp_update(ca, tp->snd_cwnd);
 
+       /* In dangerous area, increase slowly.
+        * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
+        */
+       if (tp->snd_cwnd_cnt >= ca->cnt) {
+               if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+                       tp->snd_cwnd++;
+               tp->snd_cwnd_cnt = 0;
+       } else
+               tp->snd_cwnd_cnt++;
 }
 
 /*
--- net-2.6.orig/net/ipv4/tcp_cong.c
+++ net-2.6/net/ipv4/tcp_cong.c
@@ -185,21 +185,19 @@ void tcp_reno_cong_avoid(struct sock *sk
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-        if (tp->snd_cwnd <= tp->snd_ssthresh) {
-                /* In "safe" area, increase. */
+       /* In "safe" area, increase. */
+        if (tp->snd_cwnd <= tp->snd_ssthresh)
+               return tcp_slow_start(tp);
+
+       /* In dangerous area, increase slowly.
+        * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
+        */
+       if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
                if (tp->snd_cwnd < tp->snd_cwnd_clamp)
                        tp->snd_cwnd++;
-       } else {
-                /* In dangerous area, increase slowly.
-                * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
-                */
-               if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-               } else
-                       tp->snd_cwnd_cnt++;
-       }
+               tp->snd_cwnd_cnt = 0;
+       } else
+               tp->snd_cwnd_cnt++;
 }
 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
 
--- net-2.6.orig/net/ipv4/tcp_highspeed.c
+++ net-2.6/net/ipv4/tcp_highspeed.c
@@ -115,28 +115,26 @@ static void hstcp_cong_avoid(struct sock
        struct tcp_sock *tp = tcp_sk(sk);
        struct hstcp *ca = inet_csk_ca(sk);
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
-               if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                       tp->snd_cwnd++;
-       } else {
-               /* Update AIMD parameters */
-               if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
-                       while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
-                              ca->ai < HSTCP_AIMD_MAX)
-                               ca->ai++;
-               } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) {
-                       while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
-                              ca->ai > 0)
-                               ca->ai--;
-               }
+       if (tp->snd_cwnd <= tp->snd_ssthresh)
+               return tcp_slow_start(tp);
 
-               /* Do additive increase */
-               if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
-                       tp->snd_cwnd_cnt += ca->ai;
-                       if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-                               tp->snd_cwnd++;
-                               tp->snd_cwnd_cnt -= tp->snd_cwnd;
-                       }
+       /* Update AIMD parameters */
+       if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
+               while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
+                      ca->ai < HSTCP_AIMD_MAX)
+                       ca->ai++;
+       } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) {
+               while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
+                      ca->ai > 0)
+                       ca->ai--;
+       }
+
+       /* Do additive increase */
+       if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
+               tp->snd_cwnd_cnt += ca->ai;
+               if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
+                       tp->snd_cwnd++;
+                       tp->snd_cwnd_cnt -= tp->snd_cwnd;
                }
        }
 }
--- net-2.6.orig/net/ipv4/tcp_htcp.c
+++ net-2.6/net/ipv4/tcp_htcp.c
@@ -206,29 +206,26 @@ static void htcp_cong_avoid(struct sock 
        struct tcp_sock *tp = tcp_sk(sk);
        struct htcp *ca = inet_csk_ca(sk);
 
-        if (tp->snd_cwnd <= tp->snd_ssthresh) {
-                /* In "safe" area, increase. */
+        if (tp->snd_cwnd <= tp->snd_ssthresh)
+               return tcp_slow_start(tp);
+
+       measure_rtt(sk);
+
+       /* keep track of number of round-trip times since last backoff event */
+       if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) {
+               ca->ccount++;
+               ca->snd_cwnd_cnt2 = 0;
+               htcp_alpha_update(ca);
+       }
+
+       /* In dangerous area, increase slowly.
+        * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
+        */
+       if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) {
                if (tp->snd_cwnd < tp->snd_cwnd_clamp)
                        tp->snd_cwnd++;
-       } else {
-               measure_rtt(sk);
-
-               /* keep track of number of round-trip times since last backoff 
event */
-               if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) {
-                       ca->ccount++;
-                       ca->snd_cwnd_cnt2 = 0;
-                       htcp_alpha_update(ca);
-               }
-
-                /* In dangerous area, increase slowly.
-                * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
-                */
-               if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) {
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-                       ca->ccount++;
-               }
+               tp->snd_cwnd_cnt = 0;
+               ca->ccount++;
        }
 }
 
--- net-2.6.orig/net/ipv4/tcp_scalable.c
+++ net-2.6/net/ipv4/tcp_scalable.c
@@ -20,17 +20,14 @@ static void tcp_scalable_cong_avoid(stru
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+       if (tp->snd_cwnd <= tp->snd_ssthresh)
+               return tcp_slow_start(tp);
+
+       tp->snd_cwnd_cnt++;
+       if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
                tp->snd_cwnd++;
-       } else {
-               tp->snd_cwnd_cnt++;
-               if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
-                       tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-               }
+               tp->snd_cwnd_cnt = 0;
        }
-       tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
-       tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 static u32 tcp_scalable_ssthresh(struct sock *sk)
--- net-2.6.orig/net/ipv4/tcp_vegas.c
+++ net-2.6/net/ipv4/tcp_vegas.c
@@ -195,7 +195,7 @@ static void tcp_vegas_cong_avoid(struct 
        if (after(ack, vegas->beg_snd_nxt)) {
                /* Do the Vegas once-per-RTT cwnd adjustment. */
                u32 old_wnd, old_snd_cwnd;
-
+               u32 rtt, target_cwnd, diff;
 
                /* Here old_wnd is essentially the window of data that was
                 * sent during the previous RTT, and has all
@@ -231,15 +231,9 @@ static void tcp_vegas_cong_avoid(struct 
                 * If  we have 3 samples, we should be OK.
                 */
 
-               if (vegas->cntRTT <= 2) {
-                       /* We don't have enough RTT samples to do the Vegas
-                        * calculation, so we'll behave like Reno.
-                        */
-                       if (tp->snd_cwnd > tp->snd_ssthresh)
-                               tp->snd_cwnd++;
-               } else {
-                       u32 rtt, target_cwnd, diff;
-
+               if (vegas->cntRTT <= 2)
+                       tcp_reno_cong_avoid(sk, ack, seq_rtt, flag);
+               else {
                        /* We have enough RTT samples, so, using the Vegas
                         * algorithm, we determine if we should increase or
                         * decrease cwnd, and by how much.
@@ -293,6 +287,9 @@ static void tcp_vegas_cong_avoid(struct 
                                                           (target_cwnd >>
                                                            V_PARAM_SHIFT)+1);
 
+                               } else {
+                                       /* Normal Reno style slow start 
increase */
+                                       tcp_slow_start(tp);
                                }
                        } else {
                                /* Congestion avoidance. */
@@ -326,37 +323,17 @@ static void tcp_vegas_cong_avoid(struct 
                                else if (next_snd_cwnd < tp->snd_cwnd)
                                        tp->snd_cwnd--;
                        }
-               }
 
-               /* Wipe the slate clean for the next RTT. */
-               vegas->cntRTT = 0;
-               vegas->minRTT = 0x7fffffff;
+                       if (tp->snd_cwnd < 2)
+                               tp->snd_cwnd = 2;
+                       else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
+                               tp->snd_cwnd = tp->snd_cwnd_clamp;
+               }
        }
 
-       /* The following code is executed for every ack we receive,
-        * except for conditions checked in should_advance_cwnd()
-        * before the call to tcp_cong_avoid(). Mainly this means that
-        * we only execute this code if the ack actually acked some
-        * data.
-        */
-
-       /* If we are in slow start, increase our cwnd in response to this ACK.
-        * (If we are not in slow start then we are in congestion avoidance,
-        * and adjust our congestion window only once per RTT. See the code
-        * above.)
-        */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
-               tp->snd_cwnd++;
-
-       /* to keep cwnd from growing without bound */
-       tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
-
-       /* Make sure that we are never so timid as to reduce our cwnd below
-        * 2 MSS.
-        *
-        * Going below 2 MSS would risk huge delayed ACKs from our receiver.
-        */
-       tp->snd_cwnd = max(tp->snd_cwnd, 2U);
+       /* Wipe the slate clean for the next RTT. */
+       vegas->cntRTT = 0;
+       vegas->minRTT = 0x7fffffff;
 }
 
 /* Extract info for Tcp socket info provided via netlink. */

--
Stephen Hemminger <[EMAIL PROTECTED]>
OSDL http://developer.osdl.org/~shemminger

-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to