commit:     84b9ee968b0af2855792c462f51eefa3bcb66980
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 18 16:27:30 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Feb 18 16:27:30 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=84b9ee96

Linux patch 4.4.50

 0000_README             |   4 +
 1049_linux-4.4.50.patch | 763 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 767 insertions(+)

diff --git a/0000_README b/0000_README
index 976dbf2..7f2e718 100644
--- a/0000_README
+++ b/0000_README
@@ -239,6 +239,10 @@ Patch:  1048_linux-4.4.49.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.49
 
+Patch:  1049_linux-4.4.50.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.50
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1049_linux-4.4.50.patch b/1049_linux-4.4.50.patch
new file mode 100644
index 0000000..ac8d290
--- /dev/null
+++ b/1049_linux-4.4.50.patch
@@ -0,0 +1,763 @@
+diff --git a/Makefile b/Makefile
+index 5fab6d4068b5..10993715abb8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index bbff8ec6713e..28a4b34310b2 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -502,8 +502,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
+               return;
+ 
+       for (ring = 0; ring < priv->rx_ring_num; ring++) {
+-              if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
++              if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
++                      local_bh_disable();
+                       napi_reschedule(&priv->rx_cq[ring]->napi);
++                      local_bh_enable();
++              }
+       }
+ }
+ 
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index dc7d970bd1c0..effcdbfb06e9 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev)
+ {
+       dev->mtu                = 64 * 1024;
+       dev->hard_header_len    = ETH_HLEN;     /* 14   */
++      dev->min_header_len     = ETH_HLEN;     /* 14   */
+       dev->addr_len           = ETH_ALEN;     /* 6    */
+       dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
+       dev->flags              = IFF_LOOPBACK;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 159a68782bec..79de9608ac48 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -725,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, 
struct msghdr *m,
+       ssize_t n;
+ 
+       if (q->flags & IFF_VNET_HDR) {
+-              vnet_hdr_len = q->vnet_hdr_sz;
++              vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ 
+               err = -EINVAL;
+               if (len < vnet_hdr_len)
+@@ -865,7 +865,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ 
+       if (q->flags & IFF_VNET_HDR) {
+               struct virtio_net_hdr vnet_hdr;
+-              vnet_hdr_len = q->vnet_hdr_sz;
++              vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+               if (iov_iter_count(iter) < vnet_hdr_len)
+                       return -EINVAL;
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 111b972e3053..c31d8e74f131 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1108,9 +1108,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, 
struct tun_file *tfile,
+       }
+ 
+       if (tun->flags & IFF_VNET_HDR) {
+-              if (len < tun->vnet_hdr_sz)
++              int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
++
++              if (len < vnet_hdr_sz)
+                       return -EINVAL;
+-              len -= tun->vnet_hdr_sz;
++              len -= vnet_hdr_sz;
+ 
+               n = copy_from_iter(&gso, sizeof(gso), from);
+               if (n != sizeof(gso))
+@@ -1122,7 +1124,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, 
struct tun_file *tfile,
+ 
+               if (tun16_to_cpu(tun, gso.hdr_len) > len)
+                       return -EINVAL;
+-              iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
++              iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
+       }
+ 
+       if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
+@@ -1301,7 +1303,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+               vlan_hlen = VLAN_HLEN;
+ 
+       if (tun->flags & IFF_VNET_HDR)
+-              vnet_hdr_sz = tun->vnet_hdr_sz;
++              vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+ 
+       total = skb->len + vlan_hlen + vnet_hdr_sz;
+ 
+diff --git a/include/linux/can/core.h b/include/linux/can/core.h
+index a0875001b13c..df08a41d5be5 100644
+--- a/include/linux/can/core.h
++++ b/include/linux/can/core.h
+@@ -45,10 +45,9 @@ struct can_proto {
+ extern int  can_proto_register(const struct can_proto *cp);
+ extern void can_proto_unregister(const struct can_proto *cp);
+ 
+-extern int  can_rx_register(struct net_device *dev, canid_t can_id,
+-                          canid_t mask,
+-                          void (*func)(struct sk_buff *, void *),
+-                          void *data, char *ident);
++int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
++                  void (*func)(struct sk_buff *, void *),
++                  void *data, char *ident, struct sock *sk);
+ 
+ extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
+                             canid_t mask,
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 93a6a2c66d15..4035bbe40971 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1399,6 +1399,7 @@ enum netdev_priv_flags {
+  *    @mtu:           Interface MTU value
+  *    @type:          Interface hardware type
+  *    @hard_header_len: Maximum hardware header length.
++ *    @min_header_len:  Minimum hardware header length
+  *
+  *    @needed_headroom: Extra headroom the hardware may need, but not in all
+  *                      cases can this be guaranteed
+@@ -1619,6 +1620,7 @@ struct net_device {
+       unsigned int            mtu;
+       unsigned short          type;
+       unsigned short          hard_header_len;
++      unsigned short          min_header_len;
+ 
+       unsigned short          needed_headroom;
+       unsigned short          needed_tailroom;
+@@ -2541,6 +2543,8 @@ static inline bool dev_validate_header(const struct 
net_device *dev,
+ {
+       if (likely(len >= dev->hard_header_len))
+               return true;
++      if (len < dev->min_header_len)
++              return false;
+ 
+       if (capable(CAP_SYS_RAWIO)) {
+               memset(ll_header + len, 0, dev->hard_header_len - len);
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index 3ebb168b9afc..a34b141f125f 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff 
*skb,
+       }
+ 
+       for (opt_iter = 6; opt_iter < opt_len;) {
++              if (opt_iter + 1 == opt_len) {
++                      err_offset = opt_iter;
++                      goto out;
++              }
+               tag_len = opt[opt_iter + 1];
+               if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
+                       err_offset = opt_iter + 1;
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 166d436196c1..928f58064098 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, 
canid_t *mask,
+  * @func: callback function on filter match
+  * @data: returned parameter for callback function
+  * @ident: string for calling module identification
++ * @sk: socket pointer (might be NULL)
+  *
+  * Description:
+  *  Invokes the callback function with the received sk_buff and the given
+@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, 
canid_t *mask,
+  */
+ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+                   void (*func)(struct sk_buff *, void *), void *data,
+-                  char *ident)
++                  char *ident, struct sock *sk)
+ {
+       struct receiver *r;
+       struct hlist_head *rl;
+@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t 
can_id, canid_t mask,
+               r->func    = func;
+               r->data    = data;
+               r->ident   = ident;
++              r->sk      = sk;
+ 
+               hlist_add_head_rcu(&r->list, rl);
+               d->entries++;
+@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
+ static void can_rx_delete_receiver(struct rcu_head *rp)
+ {
+       struct receiver *r = container_of(rp, struct receiver, rcu);
++      struct sock *sk = r->sk;
+ 
+       kmem_cache_free(rcv_cache, r);
++      if (sk)
++              sock_put(sk);
+ }
+ 
+ /**
+@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t 
can_id, canid_t mask,
+       spin_unlock(&can_rcvlists_lock);
+ 
+       /* schedule the receiver item for deletion */
+-      if (r)
++      if (r) {
++              if (r->sk)
++                      sock_hold(r->sk);
+               call_rcu(&r->rcu, can_rx_delete_receiver);
++      }
+ }
+ EXPORT_SYMBOL(can_rx_unregister);
+ 
+diff --git a/net/can/af_can.h b/net/can/af_can.h
+index fca0fe9fc45a..b86f5129e838 100644
+--- a/net/can/af_can.h
++++ b/net/can/af_can.h
+@@ -50,13 +50,14 @@
+ 
+ struct receiver {
+       struct hlist_node list;
+-      struct rcu_head rcu;
+       canid_t can_id;
+       canid_t mask;
+       unsigned long matches;
+       void (*func)(struct sk_buff *, void *);
+       void *data;
+       char *ident;
++      struct sock *sk;
++      struct rcu_head rcu;
+ };
+ 
+ #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 24d66c1cc0cd..4ccfd356baed 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1179,7 +1179,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, 
struct msghdr *msg,
+                               err = can_rx_register(dev, op->can_id,
+                                                     REGMASK(op->can_id),
+                                                     bcm_rx_handler, op,
+-                                                    "bcm");
++                                                    "bcm", sk);
+ 
+                               op->rx_reg_dev = dev;
+                               dev_put(dev);
+@@ -1188,7 +1188,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, 
struct msghdr *msg,
+               } else
+                       err = can_rx_register(NULL, op->can_id,
+                                             REGMASK(op->can_id),
+-                                            bcm_rx_handler, op, "bcm");
++                                            bcm_rx_handler, op, "bcm", sk);
+               if (err) {
+                       /* this bcm rx op is broken -> remove it */
+                       list_del(&op->list);
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 455168718c2e..77c8af4047ef 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
+ {
+       return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
+                              gwj->ccgw.filter.can_mask, can_can_gw_rcv,
+-                             gwj, "gw");
++                             gwj, "gw", NULL);
+ }
+ 
+ static inline void cgw_unregister_filter(struct cgw_job *gwj)
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 56af689ca999..e9403a26a1d5 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, 
struct sock *sk,
+       for (i = 0; i < count; i++) {
+               err = can_rx_register(dev, filter[i].can_id,
+                                     filter[i].can_mask,
+-                                    raw_rcv, sk, "raw");
++                                    raw_rcv, sk, "raw", sk);
+               if (err) {
+                       /* clean up successfully registered filters */
+                       while (--i >= 0)
+@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, 
struct sock *sk,
+ 
+       if (err_mask)
+               err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
+-                                    raw_rcv, sk, "raw");
++                                    raw_rcv, sk, "raw", sk);
+ 
+       return err;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 0798a0f1b395..08215a85c742 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1676,24 +1676,19 @@ EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
+ 
+ static struct static_key netstamp_needed __read_mostly;
+ #ifdef HAVE_JUMP_LABEL
+-/* We are not allowed to call static_key_slow_dec() from irq context
+- * If net_disable_timestamp() is called from irq context, defer the
+- * static_key_slow_dec() calls.
+- */
+ static atomic_t netstamp_needed_deferred;
+-#endif
+-
+-void net_enable_timestamp(void)
++static void netstamp_clear(struct work_struct *work)
+ {
+-#ifdef HAVE_JUMP_LABEL
+       int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+ 
+-      if (deferred) {
+-              while (--deferred)
+-                      static_key_slow_dec(&netstamp_needed);
+-              return;
+-      }
++      while (deferred--)
++              static_key_slow_dec(&netstamp_needed);
++}
++static DECLARE_WORK(netstamp_work, netstamp_clear);
+ #endif
++
++void net_enable_timestamp(void)
++{
+       static_key_slow_inc(&netstamp_needed);
+ }
+ EXPORT_SYMBOL(net_enable_timestamp);
+@@ -1701,12 +1696,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
+ void net_disable_timestamp(void)
+ {
+ #ifdef HAVE_JUMP_LABEL
+-      if (in_interrupt()) {
+-              atomic_inc(&netstamp_needed_deferred);
+-              return;
+-      }
+-#endif
++      /* net_disable_timestamp() can be called from non process context */
++      atomic_inc(&netstamp_needed_deferred);
++      schedule_work(&netstamp_work);
++#else
+       static_key_slow_dec(&netstamp_needed);
++#endif
+ }
+ EXPORT_SYMBOL(net_disable_timestamp);
+ 
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index de85d4e1cf43..52dcd414c2af 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -353,6 +353,7 @@ void ether_setup(struct net_device *dev)
+       dev->header_ops         = &eth_header_ops;
+       dev->type               = ARPHRD_ETHER;
+       dev->hard_header_len    = ETH_HLEN;
++      dev->min_header_len     = ETH_HLEN;
+       dev->mtu                = ETH_DATA_LEN;
+       dev->addr_len           = ETH_ALEN;
+       dev->tx_queue_len       = 1000; /* Ethernet wants good queues */
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index bdb2a07ec363..6cc3e1d602fb 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1657,6 +1657,10 @@ int cipso_v4_validate(const struct sk_buff *skb, 
unsigned char **option)
+                               goto validate_return_locked;
+                       }
+ 
++              if (opt_iter + 1 == opt_len) {
++                      err_offset = opt_iter;
++                      goto validate_return_locked;
++              }
+               tag_len = tag[1];
+               if (tag_len > (opt_len - opt_iter)) {
+                       err_offset = opt_iter + 1;
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 9ce202549e7a..bc14c5bb124b 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1192,7 +1192,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct 
sk_buff *skb)
+               pktinfo->ipi_ifindex = 0;
+               pktinfo->ipi_spec_dst.s_addr = 0;
+       }
+-      skb_dst_drop(skb);
++      /* We need to keep the dst for __ip_options_echo()
++       * We could restrict the test to opt.ts_needtime || opt.srr,
++       * but the following is good enough as IP options are not often used.
++       */
++      if (unlikely(IPCB(skb)->opt.optlen))
++              skb_dst_force(skb);
++      else
++              skb_dst_drop(skb);
+ }
+ 
+ int ip_setsockopt(struct sock *sk, int level,
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 23160d2b3f71..3a00512addbc 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -645,6 +645,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, 
struct pingfakehdr *pfh,
+ {
+       struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
+ 
++      if (!skb)
++              return 0;
+       pfh->wcheck = csum_partial((char *)&pfh->icmph,
+               sizeof(struct icmphdr), pfh->wcheck);
+       pfh->icmph.checksum = csum_fold(pfh->wcheck);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 69daa81736f6..600dcda840d1 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -783,6 +783,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
+                               ret = -EAGAIN;
+                               break;
+                       }
++                      /* if __tcp_splice_read() got nothing while we have
++                       * an skb in receive queue, we do not want to loop.
++                       * This might happen with URG data.
++                       */
++                      if (!skb_queue_empty(&sk->sk_receive_queue))
++                              break;
+                       sk_wait_data(sk, &timeo, NULL);
+                       if (signal_pending(current)) {
+                               ret = sock_intr_errno(timeo);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 0795647e94c6..de95714d021c 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2383,9 +2383,11 @@ u32 __tcp_select_window(struct sock *sk)
+       int full_space = min_t(int, tp->window_clamp, allowed_space);
+       int window;
+ 
+-      if (mss > full_space)
++      if (unlikely(mss > full_space)) {
+               mss = full_space;
+-
++              if (mss <= 0)
++                      return 0;
++      }
+       if (free_space < (full_space >> 1)) {
+               icsk->icsk_ack.quick = 0;
+ 
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 17430f341073..e89135828c3d 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -55,6 +55,7 @@
+ #include <net/ip6_fib.h>
+ #include <net/ip6_route.h>
+ #include <net/ip6_tunnel.h>
++#include <net/gre.h>
+ 
+ 
+ static bool log_ecn_error = true;
+@@ -367,35 +368,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
+ 
+ 
+ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+-              u8 type, u8 code, int offset, __be32 info)
++                     u8 type, u8 code, int offset, __be32 info)
+ {
+-      const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
+-      __be16 *p = (__be16 *)(skb->data + offset);
+-      int grehlen = offset + 4;
++      const struct gre_base_hdr *greh;
++      const struct ipv6hdr *ipv6h;
++      int grehlen = sizeof(*greh);
+       struct ip6_tnl *t;
++      int key_off = 0;
+       __be16 flags;
++      __be32 key;
+ 
+-      flags = p[0];
+-      if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
+-              if (flags&(GRE_VERSION|GRE_ROUTING))
+-                      return;
+-              if (flags&GRE_KEY) {
+-                      grehlen += 4;
+-                      if (flags&GRE_CSUM)
+-                              grehlen += 4;
+-              }
++      if (!pskb_may_pull(skb, offset + grehlen))
++              return;
++      greh = (const struct gre_base_hdr *)(skb->data + offset);
++      flags = greh->flags;
++      if (flags & (GRE_VERSION | GRE_ROUTING))
++              return;
++      if (flags & GRE_CSUM)
++              grehlen += 4;
++      if (flags & GRE_KEY) {
++              key_off = grehlen + offset;
++              grehlen += 4;
+       }
+ 
+-      /* If only 8 bytes returned, keyed message will be dropped here */
+-      if (!pskb_may_pull(skb, grehlen))
++      if (!pskb_may_pull(skb, offset + grehlen))
+               return;
+       ipv6h = (const struct ipv6hdr *)skb->data;
+-      p = (__be16 *)(skb->data + offset);
++      greh = (const struct gre_base_hdr *)(skb->data + offset);
++      key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
+ 
+       t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
+-                              flags & GRE_KEY ?
+-                              *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
+-                              p[1]);
++                               key, greh->protocol);
+       if (!t)
+               return;
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 2994d1f1a661..6c6161763c2f 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -479,18 +479,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
+ 
+ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ {
+-      const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
+-      __u8 nexthdr = ipv6h->nexthdr;
+-      __u16 off = sizeof(*ipv6h);
++      const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
++      unsigned int nhoff = raw - skb->data;
++      unsigned int off = nhoff + sizeof(*ipv6h);
++      u8 next, nexthdr = ipv6h->nexthdr;
+ 
+       while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
+-              __u16 optlen = 0;
+               struct ipv6_opt_hdr *hdr;
+-              if (raw + off + sizeof(*hdr) > skb->data &&
+-                  !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
++              u16 optlen;
++
++              if (!pskb_may_pull(skb, off + sizeof(*hdr)))
+                       break;
+ 
+-              hdr = (struct ipv6_opt_hdr *) (raw + off);
++              hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+               if (nexthdr == NEXTHDR_FRAGMENT) {
+                       struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
+                       if (frag_hdr->frag_off)
+@@ -501,20 +502,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, 
__u8 *raw)
+               } else {
+                       optlen = ipv6_optlen(hdr);
+               }
++              /* cache hdr->nexthdr, since pskb_may_pull() might
++               * invalidate hdr
++               */
++              next = hdr->nexthdr;
+               if (nexthdr == NEXTHDR_DEST) {
+-                      __u16 i = off + 2;
++                      u16 i = 2;
++
++                      /* Remember : hdr is no longer valid at this point. */
++                      if (!pskb_may_pull(skb, off + optlen))
++                              break;
++
+                       while (1) {
+                               struct ipv6_tlv_tnl_enc_lim *tel;
+ 
+                               /* No more room for encapsulation limit */
+-                              if (i + sizeof (*tel) > off + optlen)
++                              if (i + sizeof(*tel) > optlen)
+                                       break;
+ 
+-                              tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
++                              tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data 
+ off + i);
+                               /* return index of option if found and valid */
+                               if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
+                                   tel->length == 1)
+-                                      return i;
++                                      return i + off - nhoff;
+                               /* else jump to next option */
+                               if (tel->type)
+                                       i += tel->length + 2;
+@@ -522,7 +532,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 
*raw)
+                                       i++;
+                       }
+               }
+-              nexthdr = hdr->nexthdr;
++              nexthdr = next;
+               off += optlen;
+       }
+       return 0;
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 3da2b16356eb..184f0fe35dc6 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1389,6 +1389,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
+       tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+       if (!tunnel->dst_cache) {
+               free_percpu(dev->tstats);
++              dev->tstats = NULL;
+               return -ENOMEM;
+       }
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 5f581616bf6a..76a8c8057a23 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -974,6 +974,16 @@ drop:
+       return 0; /* don't send reset */
+ }
+ 
++static void tcp_v6_restore_cb(struct sk_buff *skb)
++{
++      /* We need to move header back to the beginning if xfrm6_policy_check()
++       * and tcp_v6_fill_cb() are going to be called again.
++       * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
++       */
++      memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
++              sizeof(struct inet6_skb_parm));
++}
++
+ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct 
sk_buff *skb,
+                                        struct request_sock *req,
+                                        struct dst_entry *dst,
+@@ -1163,8 +1173,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct 
sock *sk, struct sk_buff *
+                                                     sk_gfp_atomic(sk, 
GFP_ATOMIC));
+                       consume_skb(ireq->pktopts);
+                       ireq->pktopts = NULL;
+-                      if (newnp->pktoptions)
++                      if (newnp->pktoptions) {
++                              tcp_v6_restore_cb(newnp->pktoptions);
+                               skb_set_owner_r(newnp->pktoptions, newsk);
++                      }
+               }
+       }
+ 
+@@ -1179,16 +1191,6 @@ out:
+       return NULL;
+ }
+ 
+-static void tcp_v6_restore_cb(struct sk_buff *skb)
+-{
+-      /* We need to move header back to the beginning if xfrm6_policy_check()
+-       * and tcp_v6_fill_cb() are going to be called again.
+-       * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+-       */
+-      memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+-              sizeof(struct inet6_skb_parm));
+-}
+-
+ /* The socket must have it's spinlock held when we get
+  * here, unless it is a TCP_LISTEN socket.
+  *
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 5871537af387..763e8e241ce3 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -273,6 +273,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct 
sk_buff *skb,
+ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
+                        const struct l2tp_nl_cmd_ops *ops);
+ void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+ 
+ /* Session reference counts. Incremented when code obtains a reference
+  * to a session.
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index d0e906d39642..445b7cd0826a 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -11,6 +11,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <asm/ioctls.h>
+ #include <linux/icmp.h>
+ #include <linux/module.h>
+ #include <linux/skbuff.h>
+@@ -555,6 +556,30 @@ out:
+       return err ? err : copied;
+ }
+ 
++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
++{
++      struct sk_buff *skb;
++      int amount;
++
++      switch (cmd) {
++      case SIOCOUTQ:
++              amount = sk_wmem_alloc_get(sk);
++              break;
++      case SIOCINQ:
++              spin_lock_bh(&sk->sk_receive_queue.lock);
++              skb = skb_peek(&sk->sk_receive_queue);
++              amount = skb ? skb->len : 0;
++              spin_unlock_bh(&sk->sk_receive_queue.lock);
++              break;
++
++      default:
++              return -ENOIOCTLCMD;
++      }
++
++      return put_user(amount, (int __user *)arg);
++}
++EXPORT_SYMBOL(l2tp_ioctl);
++
+ static struct proto l2tp_ip_prot = {
+       .name              = "L2TP/IP",
+       .owner             = THIS_MODULE,
+@@ -563,7 +588,7 @@ static struct proto l2tp_ip_prot = {
+       .bind              = l2tp_ip_bind,
+       .connect           = l2tp_ip_connect,
+       .disconnect        = l2tp_ip_disconnect,
+-      .ioctl             = udp_ioctl,
++      .ioctl             = l2tp_ioctl,
+       .destroy           = l2tp_ip_destroy_sock,
+       .setsockopt        = ip_setsockopt,
+       .getsockopt        = ip_getsockopt,
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 3c4f867d3633..bcdab1cba773 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -714,7 +714,7 @@ static struct proto l2tp_ip6_prot = {
+       .bind              = l2tp_ip6_bind,
+       .connect           = l2tp_ip6_connect,
+       .disconnect        = l2tp_ip6_disconnect,
+-      .ioctl             = udp_ioctl,
++      .ioctl             = l2tp_ioctl,
+       .destroy           = l2tp_ip6_destroy_sock,
+       .setsockopt        = ipv6_setsockopt,
+       .getsockopt        = ipv6_getsockopt,
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index f223d1c80ccf..f2d28ed74a0a 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2637,7 +2637,7 @@ static int packet_snd(struct socket *sock, struct msghdr 
*msg, size_t len)
+       int vnet_hdr_len;
+       struct packet_sock *po = pkt_sk(sk);
+       unsigned short gso_type = 0;
+-      int hlen, tlen;
++      int hlen, tlen, linear;
+       int extra_len = 0;
+       ssize_t n;
+ 
+@@ -2741,8 +2741,9 @@ static int packet_snd(struct socket *sock, struct msghdr 
*msg, size_t len)
+       err = -ENOBUFS;
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+-      skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
+-                             __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
++      linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
++      linear = max(linear, min_t(int, len, dev->hard_header_len));
++      skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
+                              msg->msg_flags & MSG_DONTWAIT, &err);
+       if (skb == NULL)
+               goto out_unlock;
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index b5fd4ab56156..138f2d667212 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -6960,7 +6960,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association 
*asoc, long *timeo_p,
+                */
+               release_sock(sk);
+               current_timeo = schedule_timeout(current_timeo);
+-              BUG_ON(sk != asoc->base.sk);
++              if (sk != asoc->base.sk)
++                      goto do_error;
+               lock_sock(sk);
+ 
+               *timeo_p = current_timeo;

Reply via email to