On 1/23/18, 5:05 PM, "Daniel Borkmann" <dan...@iogearbox.net> wrote:
On 01/20/2018 02:45 AM, Lawrence Brakmo wrote: > Add support for reading many more tcp_sock fields > > state, same as sk->sk_state > rtt_min same as sk->rtt_min.s[0].v (current rtt_min) > snd_ssthresh > rcv_nxt > snd_nxt > snd_una > mss_cache > ecn_flags > rate_delivered > rate_interval_us > packets_out > retrans_out > total_retrans > segs_in > data_segs_in > segs_out > data_segs_out > bytes_received (__u64) > bytes_acked (__u64) > > Signed-off-by: Lawrence Brakmo <bra...@fb.com> > --- > include/uapi/linux/bpf.h | 19 +++++++ > net/core/filter.c | 134 ++++++++++++++++++++++++++++++++++++++++++----- > 2 files changed, 140 insertions(+), 13 deletions(-) > > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > index 2a8c40a..ff34f3c 100644 > --- a/include/uapi/linux/bpf.h > +++ b/include/uapi/linux/bpf.h > @@ -979,6 +979,25 @@ struct bpf_sock_ops { > __u32 snd_cwnd; > __u32 srtt_us; /* Averaged RTT << 3 in usecs */ > __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ > + __u32 state; > + __u32 rtt_min; > + __u32 snd_ssthresh; > + __u32 rcv_nxt; > + __u32 snd_nxt; > + __u32 snd_una; > + __u32 mss_cache; > + __u32 ecn_flags; > + __u32 rate_delivered; > + __u32 rate_interval_us; > + __u32 packets_out; > + __u32 retrans_out; > + __u32 total_retrans; > + __u32 segs_in; > + __u32 data_segs_in; > + __u32 segs_out; > + __u32 data_segs_out; Btw, this will have a 4 bytes hole in here which the user can otherwise address out of the prog. Could you add the sk_txhash from the next patch in between here instead? Good point. Will fix in new patch. Thanks Daniel. > + __u64 bytes_received; > + __u64 bytes_acked; > }; > > /* List of known BPF sock_ops operators. > diff --git a/net/core/filter.c b/net/core/filter.c > index c9411dc..98665ba 100644 > --- a/net/core/filter.c > +++ b/net/core/filter.c > @@ -3849,34 +3849,43 @@ void bpf_warn_invalid_xdp_action(u32 act) > } > EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); > > -static bool __is_valid_sock_ops_access(int off, int size) > +static bool sock_ops_is_valid_access(int off, int size, > + enum bpf_access_type type, > + struct bpf_insn_access_aux *info) > { > + const int size_default = sizeof(__u32); > + > if (off < 0 || off >= sizeof(struct bpf_sock_ops)) > return false; > + > /* The verifier guarantees that size > 0. */ > if (off % size != 0) > return false; > - if (size != sizeof(__u32)) > - return false; > - > - return true; > -} > > -static bool sock_ops_is_valid_access(int off, int size, > - enum bpf_access_type type, > - struct bpf_insn_access_aux *info) > -{ > if (type == BPF_WRITE) { > switch (off) { > - case offsetof(struct bpf_sock_ops, op) ... > - offsetof(struct bpf_sock_ops, replylong[3]): > + case bpf_ctx_range_till(struct bpf_sock_ops, op, replylong[3]): > + if (size != size_default) > + return false; > break; > default: > return false; > } > + } else { > + switch (off) { > + case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, > + bytes_acked): > + if (size != sizeof(__u64)) > + return false; > + break; > + default: > + if (size != size_default) > + return false; > + break; > + } > } > > - return __is_valid_sock_ops_access(off, size); > + return true; > }