This patch adds TCP_NLA_SND_SSTHRESH stat into SCM_TIMESTAMPING_OPT_STATS
that reports tcp_sock.snd_ssthresh.

Signed-off-by: Yousuk Seung <ysse...@google.com>
Signed-off-by: Neal Cardwell <ncardw...@google.com>
Signed-off-by: Priyaranjan Jha <priyar...@google.com>
Signed-off-by: Soheil Hassas Yeganeh <soh...@google.com>
Signed-off-by: Yuchung Cheng <ych...@google.com>
---
 include/uapi/linux/tcp.h | 1 +
 net/ipv4/tcp.c           | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 4c0ae0faf7ca..560374c978f9 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -243,6 +243,7 @@ enum {
        TCP_NLA_DELIVERY_RATE_APP_LMT, /* delivery rate application limited ? */
        TCP_NLA_SNDQ_SIZE,      /* Data (bytes) pending in send queue */
        TCP_NLA_CA_STATE,       /* ca_state of socket */
+       TCP_NLA_SND_SSTHRESH,   /* Slow start size threshold */
 
 };
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fb350f740f69..e553f84bde83 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3031,7 +3031,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const 
struct sock *sk)
        u32 rate;
 
        stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) +
-                         4 * nla_total_size(sizeof(u32)) +
+                         5 * nla_total_size(sizeof(u32)) +
                          3 * nla_total_size(sizeof(u8)), GFP_ATOMIC);
        if (!stats)
                return NULL;
@@ -3061,6 +3061,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const 
struct sock *sk)
 
        nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, 
inet_csk(sk)->icsk_retransmits);
        nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, 
!!tp->rate_app_limited);
+       nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
 
        nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
        nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
-- 
2.16.2.804.g6dcf76e118-goog

Reply via email to