This patch removes the iph field from the state structure, which is not
properly initialized. Instead, add a new field to make the "do we want
to set DF" be the state bit and move the code to set the DF flag from
ip_frag_next().

Joint work with Pablo and Linus.

Fixes: 19c3401a917b ("net: ipv4: place control buffer handling away from 
fragmentation iterators")
Reported-by: Patrick Schönthaler <patr...@notvads.ovh>
Signed-off-by: Eric Dumazet <eduma...@google.com>
Signed-off-by: Pablo Neira Ayuso <pa...@netfilter.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>
---
 include/net/ip.h                           |  4 ++--
 net/bridge/netfilter/nf_conntrack_bridge.c |  2 +-
 net/ipv4/ip_output.c                       | 11 ++++++-----
 3 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/include/net/ip.h b/include/net/ip.h
index 95bb77f95bcc..a2c61c36dc4a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -185,7 +185,7 @@ static inline struct sk_buff *ip_fraglist_next(struct 
ip_fraglist_iter *iter)
 }
 
 struct ip_frag_state {
-       struct iphdr    *iph;
+       bool            DF;
        unsigned int    hlen;
        unsigned int    ll_rs;
        unsigned int    mtu;
@@ -196,7 +196,7 @@ struct ip_frag_state {
 };
 
 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
-                 unsigned int mtu, struct ip_frag_state *state);
+                 unsigned int mtu, bool DF, struct ip_frag_state *state);
 struct sk_buff *ip_frag_next(struct sk_buff *skb,
                             struct ip_frag_state *state);
 
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c 
b/net/bridge/netfilter/nf_conntrack_bridge.c
index 8842798c29e6..3f51ff4db184 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -93,7 +93,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
         * This may also be a clone skbuff, we could preserve the geometry for
         * the copies but probably not worth the effort.
         */
-       ip_frag_init(skb, hlen, ll_rs, frag_max_size, &state);
+       ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
 
        while (state.left > 0) {
                struct sk_buff *skb2;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 28fca408812c..06a8b56ba33d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -645,11 +645,12 @@ void ip_fraglist_prepare(struct sk_buff *skb, struct 
ip_fraglist_iter *iter)
 EXPORT_SYMBOL(ip_fraglist_prepare);
 
 void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
-                 unsigned int ll_rs, unsigned int mtu,
+                 unsigned int ll_rs, unsigned int mtu, bool DF,
                  struct ip_frag_state *state)
 {
        struct iphdr *iph = ip_hdr(skb);
 
+       state->DF = DF;
        state->hlen = hlen;
        state->ll_rs = ll_rs;
        state->mtu = mtu;
@@ -668,9 +669,6 @@ static void ip_frag_ipcb(struct sk_buff *from, struct 
sk_buff *to,
        /* Copy the flags to each fragment. */
        IPCB(to)->flags = IPCB(from)->flags;
 
-       if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
-               state->iph->frag_off |= htons(IP_DF);
-
        /* ANK: dirty, but effective trick. Upgrade options only if
         * the segment to be fragmented was THE FIRST (otherwise,
         * options are already fixed) and make it ONCE
@@ -738,6 +736,8 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct 
ip_frag_state *state)
         */
        iph = ip_hdr(skb2);
        iph->frag_off = htons((state->offset >> 3));
+       if (state->DF)
+               iph->frag_off |= htons(IP_DF);
 
        /*
         *      Added AC : If we are fragmenting a fragment that's not the
@@ -881,7 +881,8 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
         *      Fragment the datagram.
         */
 
-       ip_frag_init(skb, hlen, ll_rs, mtu, &state);
+       ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
+                    &state);
 
        /*
         *      Keep copying data until we run out.
-- 
2.11.0

Reply via email to