On Mon, May 21, 2018 at 6:08 PM, Eric Dumazet <eduma...@google.com> wrote:
> We want to add finer control of the number of ACK packets sent after
> ECN events.
>
> This patch is not changing current behavior, it only enables following
> change.
>
> Signed-off-by: Eric Dumazet <eduma...@google.com>

Acked-by: Soheil Hassas Yeganeh <soh...@google.com>

> ---
>  net/ipv4/tcp_input.c | 24 +++++++++++++-----------
>  1 file changed, 13 insertions(+), 11 deletions(-)
>
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index 
> aebb29ab2fdf2ceaa182cd11928f145a886149ff..2e970e9f4e09d966b703af2d14d521a4328eba7e
>  100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -203,21 +203,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const 
> struct sk_buff *skb)
>         }
>  }
>
> -static void tcp_incr_quickack(struct sock *sk)
> +static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
>  {
>         struct inet_connection_sock *icsk = inet_csk(sk);
>         unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * 
> icsk->icsk_ack.rcv_mss);
>
>         if (quickacks == 0)
>                 quickacks = 2;
> +       quickacks = min(quickacks, max_quickacks);
>         if (quickacks > icsk->icsk_ack.quick)
> -               icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
> +               icsk->icsk_ack.quick = quickacks;
>  }
>
> -static void tcp_enter_quickack_mode(struct sock *sk)
> +static void tcp_enter_quickack_mode(struct sock *sk, unsigned int 
> max_quickacks)
>  {
>         struct inet_connection_sock *icsk = inet_csk(sk);
> -       tcp_incr_quickack(sk);
> +
> +       tcp_incr_quickack(sk, max_quickacks);
>         icsk->icsk_ack.pingpong = 0;
>         icsk->icsk_ack.ato = TCP_ATO_MIN;
>  }
> @@ -261,7 +263,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const 
> struct sk_buff *skb)
>                  * it is probably a retransmit.
>                  */
>                 if (tp->ecn_flags & TCP_ECN_SEEN)
> -                       tcp_enter_quickack_mode((struct sock *)tp);
> +                       tcp_enter_quickack_mode((struct sock *)tp, 
> TCP_MAX_QUICKACKS);
>                 break;
>         case INET_ECN_CE:
>                 if (tcp_ca_needs_ecn((struct sock *)tp))
> @@ -269,7 +271,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const 
> struct sk_buff *skb)
>
>                 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
>                         /* Better not delay acks, sender can have a very low 
> cwnd */
> -                       tcp_enter_quickack_mode((struct sock *)tp);
> +                       tcp_enter_quickack_mode((struct sock *)tp, 
> TCP_MAX_QUICKACKS);
>                         tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
>                 }
>                 tp->ecn_flags |= TCP_ECN_SEEN;
> @@ -686,7 +688,7 @@ static void tcp_event_data_recv(struct sock *sk, struct 
> sk_buff *skb)
>                 /* The _first_ data packet received, initialize
>                  * delayed ACK engine.
>                  */
> -               tcp_incr_quickack(sk);
> +               tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
>                 icsk->icsk_ack.ato = TCP_ATO_MIN;
>         } else {
>                 int m = now - icsk->icsk_ack.lrcvtime;
> @@ -702,7 +704,7 @@ static void tcp_event_data_recv(struct sock *sk, struct 
> sk_buff *skb)
>                         /* Too long gap. Apparently sender failed to
>                          * restart window, so that we send ACKs quickly.
>                          */
> -                       tcp_incr_quickack(sk);
> +                       tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
>                         sk_mem_reclaim(sk);
>                 }
>         }
> @@ -4179,7 +4181,7 @@ static void tcp_send_dupack(struct sock *sk, const 
> struct sk_buff *skb)
>         if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
>             before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
>                 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
> -               tcp_enter_quickack_mode(sk);
> +               tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
>
>                 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
>                         u32 end_seq = TCP_SKB_CB(skb)->end_seq;
> @@ -4706,7 +4708,7 @@ static void tcp_data_queue(struct sock *sk, struct 
> sk_buff *skb)
>                 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, 
> TCP_SKB_CB(skb)->end_seq);
>
>  out_of_window:
> -               tcp_enter_quickack_mode(sk);
> +               tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
>                 inet_csk_schedule_ack(sk);
>  drop:
>                 tcp_drop(sk, skb);
> @@ -5790,7 +5792,7 @@ static int tcp_rcv_synsent_state_process(struct sock 
> *sk, struct sk_buff *skb,
>                          * to stand against the temptation 8)     --ANK
>                          */
>                         inet_csk_schedule_ack(sk);
> -                       tcp_enter_quickack_mode(sk);
> +                       tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
>                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
>                                                   TCP_DELACK_MAX, 
> TCP_RTO_MAX);
>
> --
> 2.17.0.441.gb46fe60e1d-goog
>

Reply via email to