commit:     3cb8990c8709a9dbf325aa48a490cb8acbcfc3d5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 31 17:56:11 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul 31 17:56:11 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3cb8990c

Linux patch 4.14.191

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |   4 +
 1190_linux-4.14.191.patch | 385 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 389 insertions(+)

diff --git a/0000_README b/0000_README
index d75f6b6..a0f0dc8 100644
--- a/0000_README
+++ b/0000_README
@@ -803,6 +803,10 @@ Patch:  1189_linux-4.14.190.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.190
 
+Patch:  1190_linux-4.14.191.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.191
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1190_linux-4.14.191.patch b/1190_linux-4.14.191.patch
new file mode 100644
index 0000000..a324207
--- /dev/null
+++ b/1190_linux-4.14.191.patch
@@ -0,0 +1,385 @@
+diff --git a/Makefile b/Makefile
+index 4e5f6615fd98..e31c1ce12895 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 190
++SUBLEVEL = 191
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/drivers/base/regmap/regmap-debugfs.c 
b/drivers/base/regmap/regmap-debugfs.c
+index 36ce3511c733..7d0c83b47259 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -204,6 +204,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, 
unsigned int from,
+       if (*ppos < 0 || !count)
+               return -EINVAL;
+ 
++      if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
++              count = PAGE_SIZE << (MAX_ORDER - 1);
++
+       buf = kmalloc(count, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+@@ -352,6 +355,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file 
*file,
+       if (*ppos < 0 || !count)
+               return -EINVAL;
+ 
++      if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
++              count = PAGE_SIZE << (MAX_ORDER - 1);
++
+       buf = kmalloc(count, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
+index 3eaefecd4448..229cab00c4b0 100644
+--- a/drivers/net/wan/x25_asy.c
++++ b/drivers/net/wan/x25_asy.c
+@@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl)
+       netif_wake_queue(sl->dev);
+ }
+ 
+-/* Send one completely decapsulated IP datagram to the IP layer. */
++/* Send an LAPB frame to the LAPB module to process. */
+ 
+ static void x25_asy_bump(struct x25_asy *sl)
+ {
+@@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl)
+       count = sl->rcount;
+       dev->stats.rx_bytes += count;
+ 
+-      skb = dev_alloc_skb(count+1);
++      skb = dev_alloc_skb(count);
+       if (skb == NULL) {
+               netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
+               dev->stats.rx_dropped++;
+               return;
+       }
+-      skb_push(skb, 1);       /* LAPB internal control */
+       skb_put_data(skb, sl->rbuff, count);
+       skb->protocol = x25_type_trans(skb, sl->dev);
+       err = lapb_data_received(skb->dev, skb);
+@@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl)
+               kfree_skb(skb);
+               printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
+       } else {
+-              netif_rx(skb);
+               dev->stats.rx_packets++;
+       }
+ }
+@@ -355,12 +353,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
+  */
+ 
+ /*
+- *    Called when I frame data arrives. We did the work above - throw it
+- *    at the net layer.
++ *    Called when I frame data arrive. We add a pseudo header for upper
++ *    layers and pass it to upper layers.
+  */
+ 
+ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff 
*skb)
+ {
++      if (skb_cow(skb, 1)) {
++              kfree_skb(skb);
++              return NET_RX_DROP;
++      }
++      skb_push(skb, 1);
++      skb->data[0] = X25_IFACE_DATA;
++
++      skb->protocol = x25_type_trans(skb, dev);
++
+       return netif_rx(skb);
+ }
+ 
+@@ -656,7 +663,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned 
char s)
+       switch (s) {
+       case X25_END:
+               if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+-                  sl->rcount > 2)
++                  sl->rcount >= 2)
+                       x25_asy_bump(sl);
+               clear_bit(SLF_ESCAPE, &sl->flags);
+               sl->rcount = 0;
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 84245d210182..2b07dadc5916 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -761,12 +761,16 @@ xfs_bmap_extents_to_btree(
+       *logflagsp = 0;
+       if ((error = xfs_alloc_vextent(&args))) {
+               xfs_iroot_realloc(ip, -1, whichfork);
++              ASSERT(ifp->if_broot == NULL);
++              XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+               xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+               return error;
+       }
+ 
+       if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
+               xfs_iroot_realloc(ip, -1, whichfork);
++              ASSERT(ifp->if_broot == NULL);
++              XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+               xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+               return -ENOSPC;
+       }
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index 60aea230dc6a..61eb40fef759 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -209,6 +209,8 @@ struct tcp_sock {
+               u8 reord;    /* reordering detected */
+       } rack;
+       u16     advmss;         /* Advertised MSS                       */
++      u8      tlp_retrans:1,  /* TLP is a retransmission */
++              unused_1:7;
+       u32     chrono_start;   /* Start time in jiffies of a TCP chrono */
+       u32     chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
+       u8      chrono_type:2,  /* current chronograph type */
+@@ -229,7 +231,7 @@ struct tcp_sock {
+               syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+               save_syn:1,     /* Save headers of SYN packet */
+               is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+-      u32     tlp_high_seq;   /* snd_nxt at the time of TLP retransmit. */
++      u32     tlp_high_seq;   /* snd_nxt at the time of TLP */
+ 
+ /* RTT measurement */
+       u64     tcp_mstamp;     /* most recent packet received/sent */
+diff --git a/mm/page_owner.c b/mm/page_owner.c
+index 6ac05a6ff2d1..4753b317ef7b 100644
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -617,7 +617,6 @@ static void init_early_allocated_pages(void)
+ {
+       pg_data_t *pgdat;
+ 
+-      drain_all_pages(NULL);
+       for_each_online_pgdat(pgdat)
+               init_zones_in_node(pgdat);
+ }
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 89d074ce10fc..6915eebc7a4a 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1191,7 +1191,10 @@ static int __must_check ax25_connect(struct socket 
*sock,
+       if (addr_len > sizeof(struct sockaddr_ax25) &&
+           fsa->fsa_ax25.sax25_ndigis != 0) {
+               /* Valid number of digipeaters ? */
+-              if (fsa->fsa_ax25.sax25_ndigis < 1 || 
fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
++              if (fsa->fsa_ax25.sax25_ndigis < 1 ||
++                  fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
++                  addr_len < sizeof(struct sockaddr_ax25) +
++                  sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
+                       err = -EINVAL;
+                       goto out_release;
+               }
+@@ -1511,7 +1514,10 @@ static int ax25_sendmsg(struct socket *sock, struct 
msghdr *msg, size_t len)
+                       struct full_sockaddr_ax25 *fsa = (struct 
full_sockaddr_ax25 *)usax;
+ 
+                       /* Valid number of digipeaters ? */
+-                      if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > 
AX25_MAX_DIGIS) {
++                      if (usax->sax25_ndigis < 1 ||
++                          usax->sax25_ndigis > AX25_MAX_DIGIS ||
++                          addr_len < sizeof(struct sockaddr_ax25) +
++                          sizeof(ax25_address) * usax->sax25_ndigis) {
+                               err = -EINVAL;
+                               goto out;
+                       }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1ee177485fd0..7ec549e481e3 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4626,7 +4626,7 @@ static void flush_backlog(struct work_struct *work)
+       skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+               if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+                       __skb_unlink(skb, &sd->input_pkt_queue);
+-                      kfree_skb(skb);
++                      dev_kfree_skb_irq(skb);
+                       input_queue_head_incr(sd);
+               }
+       }
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index baf771d2d088..9d012255cedc 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1028,7 +1028,7 @@ static ssize_t tx_timeout_show(struct netdev_queue 
*queue, char *buf)
+       trans_timeout = queue->trans_timeout;
+       spin_unlock_irq(&queue->_xmit_lock);
+ 
+-      return sprintf(buf, "%lu", trans_timeout);
++      return sprintf(buf, fmt_ulong, trans_timeout);
+ }
+ 
+ static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index cb15338cfda4..0168c700a201 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2733,7 +2733,8 @@ replay:
+                        */
+                       if (err < 0) {
+                               /* If device is not registered at all, free it 
now */
+-                              if (dev->reg_state == NETREG_UNINITIALIZED)
++                              if (dev->reg_state == NETREG_UNINITIALIZED ||
++                                  dev->reg_state == NETREG_UNREGISTERED)
+                                       free_netdev(dev);
+                               goto out;
+                       }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index a2c26c2b3a94..83d03340417a 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3516,10 +3516,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, 
u32 seq)
+       }
+ }
+ 
+-/* This routine deals with acks during a TLP episode.
+- * We mark the end of a TLP episode on receiving TLP dupack or when
+- * ack is after tlp_high_seq.
+- * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
++/* This routine deals with acks during a TLP episode and ends an episode by
++ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
+  */
+ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
+ {
+@@ -3528,7 +3526,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 
ack, int flag)
+       if (before(ack, tp->tlp_high_seq))
+               return;
+ 
+-      if (flag & FLAG_DSACKING_ACK) {
++      if (!tp->tlp_retrans) {
++              /* TLP of new data has been acknowledged */
++              tp->tlp_high_seq = 0;
++      } else if (flag & FLAG_DSACKING_ACK) {
+               /* This DSACK means original and TLP probe arrived; no loss */
+               tp->tlp_high_seq = 0;
+       } else if (after(ack, tp->tlp_high_seq)) {
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 8fc14ad0726a..355ebae883c1 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2500,6 +2500,11 @@ void tcp_send_loss_probe(struct sock *sk)
+       int pcount;
+       int mss = tcp_current_mss(sk);
+ 
++      /* At most one outstanding TLP */
++      if (tp->tlp_high_seq)
++              goto rearm_timer;
++
++      tp->tlp_retrans = 0;
+       skb = tcp_send_head(sk);
+       if (skb) {
+               if (tcp_snd_wnd_test(tp, skb, mss)) {
+@@ -2522,10 +2527,6 @@ void tcp_send_loss_probe(struct sock *sk)
+               return;
+       }
+ 
+-      /* At most one outstanding TLP retransmission. */
+-      if (tp->tlp_high_seq)
+-              goto rearm_timer;
+-
+       if (skb_still_in_host_queue(sk, skb))
+               goto rearm_timer;
+ 
+@@ -2546,10 +2547,12 @@ void tcp_send_loss_probe(struct sock *sk)
+       if (__tcp_retransmit_skb(sk, skb, 1))
+               goto rearm_timer;
+ 
++      tp->tlp_retrans = 1;
++
++probe_sent:
+       /* Record snd_nxt for loss detection. */
+       tp->tlp_high_seq = tp->snd_nxt;
+ 
+-probe_sent:
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
+       /* Reset s.t. tcp_rearm_rto will restart timer from now */
+       inet_csk(sk)->icsk_pending = 0;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index e33258d69246..f335dd4c84e2 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1894,7 +1894,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct 
sk_buff *skb)
+       /*
+        *      UDP-Lite specific tests, ignored on UDP sockets
+        */
+-      if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
++      if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
+ 
+               /*
+                * MIB statistics other than incrementing the error count are
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index e07cc2cfc1a6..802457c0a121 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1169,15 +1169,16 @@ static void ip6gre_destroy_tunnels(struct net *net, 
struct list_head *head)
+ static int __net_init ip6gre_init_net(struct net *net)
+ {
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
++      struct net_device *ndev;
+       int err;
+ 
+-      ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
+-                                        NET_NAME_UNKNOWN,
+-                                        ip6gre_tunnel_setup);
+-      if (!ign->fb_tunnel_dev) {
++      ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
++                          NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
++      if (!ndev) {
+               err = -ENOMEM;
+               goto err_alloc_dev;
+       }
++      ign->fb_tunnel_dev = ndev;
+       dev_net_set(ign->fb_tunnel_dev, net);
+       /* FB netdevice is special: we have one, and only one per netns.
+        * Allowing to move it to another netns is clearly unsafe.
+@@ -1197,7 +1198,7 @@ static int __net_init ip6gre_init_net(struct net *net)
+       return 0;
+ 
+ err_reg_dev:
+-      free_netdev(ign->fb_tunnel_dev);
++      free_netdev(ndev);
+ err_alloc_dev:
+       return err;
+ }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index a2ba7356fa65..38ad3fac8c37 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -629,7 +629,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct 
sk_buff *skb)
+       /*
+        * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
+        */
+-      if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
++      if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
+ 
+               if (up->pcrlen == 0) {          /* full coverage was set  */
+                       net_dbg_ratelimited("UDPLITE6: partial coverage %d 
while full coverage %d requested\n",
+diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
+index b74cde2fd214..e82e91fe6178 100644
+--- a/net/rxrpc/recvmsg.c
++++ b/net/rxrpc/recvmsg.c
+@@ -445,7 +445,7 @@ try_again:
+           list_empty(&rx->recvmsg_q) &&
+           rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
+               release_sock(&rx->sk);
+-              return -ENODATA;
++              return -EAGAIN;
+       }
+ 
+       if (list_empty(&rx->recvmsg_q)) {
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index a980b49d7a4f..f4386ad975cf 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -222,7 +222,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
+       /* this should be in poll */
+       sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ 
+-      if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
++      if (sk->sk_shutdown & SEND_SHUTDOWN)
+               return -EPIPE;
+ 
+       more = msg->msg_flags & MSG_MORE;

Reply via email to