commit:     592fd2b1122f91a49dfa702aeb350477ee3c7c5f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jan  2 20:13:36 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jan  2 20:13:36 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=592fd2b1

Linux patch 4.9.74

 0000_README             |    4 +
 1073_linux-4.9.74.patch | 2923 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2927 insertions(+)

diff --git a/0000_README b/0000_README
index a3e2751..350d2c5 100644
--- a/0000_README
+++ b/0000_README
@@ -335,6 +335,10 @@ Patch:  1072_linux-4.9.73.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.73
 
+Patch:  1073_linux-4.9.74.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.74
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1073_linux-4.9.74.patch b/1073_linux-4.9.74.patch
new file mode 100644
index 0000000..7efaa13
--- /dev/null
+++ b/1073_linux-4.9.74.patch
@@ -0,0 +1,2923 @@
+diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
+index 86a6746f6833..152ec4e87b57 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2795,6 +2795,8 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
+       nopat           [X86] Disable PAT (page attribute table extension of
+                       pagetables) support.
+ 
++      nopcid          [X86-64] Disable the PCID cpu feature.
++
+       norandmaps      Don't use address space randomization.  Equivalent to
+                       echo 0 > /proc/sys/kernel/randomize_va_space
+ 
+diff --git a/Makefile b/Makefile
+index 64eb0bf614ee..075e429732e7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 73
++SUBLEVEL = 74
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+@@ -788,6 +788,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
+ # disable invalid "can't wrap" optimizations for signed / pointers
+ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
+ 
++# Make sure -fstack-check isn't enabled (like gentoo apparently did)
++KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
++
+ # conserve stack if available
+ KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index b9c546a305a4..da8156fd3d58 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -45,7 +45,7 @@ config X86
+       select ARCH_USE_CMPXCHG_LOCKREF         if X86_64
+       select ARCH_USE_QUEUED_RWLOCKS
+       select ARCH_USE_QUEUED_SPINLOCKS
+-      select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
++      select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+       select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+       select ARCH_WANT_FRAME_POINTERS
+       select ARCH_WANT_IPC_PARSE_VERSION      if X86_32
+diff --git a/arch/x86/include/asm/disabled-features.h 
b/arch/x86/include/asm/disabled-features.h
+index 85599ad4d024..21c5ac15657b 100644
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -21,11 +21,13 @@
+ # define DISABLE_K6_MTRR      (1<<(X86_FEATURE_K6_MTRR & 31))
+ # define DISABLE_CYRIX_ARR    (1<<(X86_FEATURE_CYRIX_ARR & 31))
+ # define DISABLE_CENTAUR_MCR  (1<<(X86_FEATURE_CENTAUR_MCR & 31))
++# define DISABLE_PCID         0
+ #else
+ # define DISABLE_VME          0
+ # define DISABLE_K6_MTRR      0
+ # define DISABLE_CYRIX_ARR    0
+ # define DISABLE_CENTAUR_MCR  0
++# define DISABLE_PCID         (1<<(X86_FEATURE_PCID & 31))
+ #endif /* CONFIG_X86_64 */
+ 
+ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+@@ -43,7 +45,7 @@
+ #define DISABLED_MASK1        0
+ #define DISABLED_MASK2        0
+ #define DISABLED_MASK3        
(DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
+-#define DISABLED_MASK4        0
++#define DISABLED_MASK4        (DISABLE_PCID)
+ #define DISABLED_MASK5        0
+ #define DISABLED_MASK6        0
+ #define DISABLED_MASK7        0
+diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
+index 59405a248fc2..9b76cd331990 100644
+--- a/arch/x86/include/asm/hardirq.h
++++ b/arch/x86/include/asm/hardirq.h
+@@ -22,8 +22,8 @@ typedef struct {
+ #ifdef CONFIG_SMP
+       unsigned int irq_resched_count;
+       unsigned int irq_call_count;
+-      unsigned int irq_tlb_count;
+ #endif
++      unsigned int irq_tlb_count;
+ #ifdef CONFIG_X86_THERMAL_VECTOR
+       unsigned int irq_thermal_count;
+ #endif
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 72198c64e646..8b272a08d1a8 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -33,12 +33,6 @@ typedef struct {
+ #endif
+ } mm_context_t;
+ 
+-#ifdef CONFIG_SMP
+ void leave_mm(int cpu);
+-#else
+-static inline void leave_mm(int cpu)
+-{
+-}
+-#endif
+ 
+ #endif /* _ASM_X86_MMU_H */
+diff --git a/arch/x86/include/asm/mmu_context.h 
b/arch/x86/include/asm/mmu_context.h
+index f9dd22469388..d23e35584f15 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -99,10 +99,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
+ 
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct 
*tsk)
+ {
+-#ifdef CONFIG_SMP
+       if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+-#endif
+ }
+ 
+ static inline int init_new_context(struct task_struct *tsk,
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index fc5abff9b7fd..7d2ea6b1f7d9 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -7,6 +7,7 @@
+ #include <asm/processor.h>
+ #include <asm/cpufeature.h>
+ #include <asm/special_insns.h>
++#include <asm/smp.h>
+ 
+ static inline void __invpcid(unsigned long pcid, unsigned long addr,
+                            unsigned long type)
+@@ -65,10 +66,8 @@ static inline void invpcid_flush_all_nonglobals(void)
+ #endif
+ 
+ struct tlb_state {
+-#ifdef CONFIG_SMP
+       struct mm_struct *active_mm;
+       int state;
+-#endif
+ 
+       /*
+        * Access to this CR4 shadow and to H/W CR4 is protected by
+@@ -192,6 +191,14 @@ static inline void __flush_tlb_all(void)
+               __flush_tlb_global();
+       else
+               __flush_tlb();
++
++      /*
++       * Note: if we somehow had PCID but not PGE, then this wouldn't work --
++       * we'd end up flushing kernel translations for the current ASID but
++       * we might fail to flush kernel translations for other cached ASIDs.
++       *
++       * To avoid this issue, we force PCID off if PGE is off.
++       */
+ }
+ 
+ static inline void __flush_tlb_one(unsigned long addr)
+@@ -205,7 +212,6 @@ static inline void __flush_tlb_one(unsigned long addr)
+ /*
+  * TLB flushing:
+  *
+- *  - flush_tlb() flushes the current mm struct TLBs
+  *  - flush_tlb_all() flushes all processes TLBs
+  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+  *  - flush_tlb_page(vma, vmaddr) flushes one page
+@@ -217,84 +223,6 @@ static inline void __flush_tlb_one(unsigned long addr)
+  * and page-granular flushes are available only on i486 and up.
+  */
+ 
+-#ifndef CONFIG_SMP
+-
+-/* "_up" is for UniProcessor.
+- *
+- * This is a helper for other header functions.  *Not* intended to be called
+- * directly.  All global TLB flushes need to either call this, or to bump the
+- * vm statistics themselves.
+- */
+-static inline void __flush_tlb_up(void)
+-{
+-      count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+-      __flush_tlb();
+-}
+-
+-static inline void flush_tlb_all(void)
+-{
+-      count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+-      __flush_tlb_all();
+-}
+-
+-static inline void flush_tlb(void)
+-{
+-      __flush_tlb_up();
+-}
+-
+-static inline void local_flush_tlb(void)
+-{
+-      __flush_tlb_up();
+-}
+-
+-static inline void flush_tlb_mm(struct mm_struct *mm)
+-{
+-      if (mm == current->active_mm)
+-              __flush_tlb_up();
+-}
+-
+-static inline void flush_tlb_page(struct vm_area_struct *vma,
+-                                unsigned long addr)
+-{
+-      if (vma->vm_mm == current->active_mm)
+-              __flush_tlb_one(addr);
+-}
+-
+-static inline void flush_tlb_range(struct vm_area_struct *vma,
+-                                 unsigned long start, unsigned long end)
+-{
+-      if (vma->vm_mm == current->active_mm)
+-              __flush_tlb_up();
+-}
+-
+-static inline void flush_tlb_mm_range(struct mm_struct *mm,
+-         unsigned long start, unsigned long end, unsigned long vmflag)
+-{
+-      if (mm == current->active_mm)
+-              __flush_tlb_up();
+-}
+-
+-static inline void native_flush_tlb_others(const struct cpumask *cpumask,
+-                                         struct mm_struct *mm,
+-                                         unsigned long start,
+-                                         unsigned long end)
+-{
+-}
+-
+-static inline void reset_lazy_tlbstate(void)
+-{
+-}
+-
+-static inline void flush_tlb_kernel_range(unsigned long start,
+-                                        unsigned long end)
+-{
+-      flush_tlb_all();
+-}
+-
+-#else  /* SMP */
+-
+-#include <asm/smp.h>
+-
+ #define local_flush_tlb() __flush_tlb()
+ 
+ #define flush_tlb_mm(mm)      flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
+@@ -303,13 +231,14 @@ static inline void flush_tlb_kernel_range(unsigned long 
start,
+               flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+ 
+ extern void flush_tlb_all(void);
+-extern void flush_tlb_current_task(void);
+-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+ extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+                               unsigned long end, unsigned long vmflag);
+ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+ 
+-#define flush_tlb()   flush_tlb_current_task()
++static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
++{
++      flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
++}
+ 
+ void native_flush_tlb_others(const struct cpumask *cpumask,
+                               struct mm_struct *mm,
+@@ -324,8 +253,6 @@ static inline void reset_lazy_tlbstate(void)
+       this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
+ }
+ 
+-#endif        /* SMP */
+-
+ #ifndef CONFIG_PARAVIRT
+ #define flush_tlb_others(mask, mm, start, end)        \
+       native_flush_tlb_others(mask, mm, start, end)
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index bd17db15a2c1..0b6124315441 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -19,6 +19,14 @@
+ 
+ void __init check_bugs(void)
+ {
++#ifdef CONFIG_X86_32
++      /*
++       * Regardless of whether PCID is enumerated, the SDM says
++       * that it can't be enabled in 32-bit mode.
++       */
++      setup_clear_cpu_cap(X86_FEATURE_PCID);
++#endif
++
+       identify_boot_cpu();
+ #ifndef CONFIG_SMP
+       pr_info("CPU: ");
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 4eece91ada37..91588be529b9 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -163,6 +163,24 @@ static int __init x86_mpx_setup(char *s)
+ }
+ __setup("nompx", x86_mpx_setup);
+ 
++#ifdef CONFIG_X86_64
++static int __init x86_pcid_setup(char *s)
++{
++      /* require an exact match without trailing characters */
++      if (strlen(s))
++              return 0;
++
++      /* do not emit a message if the feature is not present */
++      if (!boot_cpu_has(X86_FEATURE_PCID))
++              return 1;
++
++      setup_clear_cpu_cap(X86_FEATURE_PCID);
++      pr_info("nopcid: PCID feature disabled\n");
++      return 1;
++}
++__setup("nopcid", x86_pcid_setup);
++#endif
++
+ static int __init x86_noinvpcid_setup(char *s)
+ {
+       /* noinvpcid doesn't accept parameters */
+@@ -306,6 +324,25 @@ static __always_inline void setup_smap(struct cpuinfo_x86 
*c)
+       }
+ }
+ 
++static void setup_pcid(struct cpuinfo_x86 *c)
++{
++      if (cpu_has(c, X86_FEATURE_PCID)) {
++              if (cpu_has(c, X86_FEATURE_PGE)) {
++                      cr4_set_bits(X86_CR4_PCIDE);
++              } else {
++                      /*
++                       * flush_tlb_all(), as currently implemented, won't
++                       * work if PCID is on but PGE is not.  Since that
++                       * combination doesn't exist on real hardware, there's
++                       * no reason to try to fully support it, but it's
++                       * polite to avoid corrupting data if we're on
++                       * an improperly configured VM.
++                       */
++                      clear_cpu_cap(c, X86_FEATURE_PCID);
++              }
++      }
++}
++
+ /*
+  * Protection Keys are not available in 32-bit mode.
+  */
+@@ -1064,6 +1101,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+       setup_smep(c);
+       setup_smap(c);
+ 
++      /* Set up PCID */
++      setup_pcid(c);
++
+       /*
+        * The vendor-specific functions might have changed features.
+        * Now we do "generic changes."
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 067f9813fd2c..ce020a69bba9 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -106,6 +106,10 @@ void __noreturn machine_real_restart(unsigned int type)
+       load_cr3(initial_page_table);
+ #else
+       write_cr3(real_mode_header->trampoline_pgd);
++
++      /* Exiting long mode will fail if CR4.PCIDE is set. */
++      if (static_cpu_has(X86_FEATURE_PCID))
++              cr4_clear_bits(X86_CR4_PCIDE);
+ #endif
+ 
+       /* Jump to the identity-mapped low memory code */
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 9fe7b9e1ae30..e803d72ef525 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -115,25 +115,16 @@ static inline void 
smpboot_setup_warm_reset_vector(unsigned long start_eip)
+       spin_lock_irqsave(&rtc_lock, flags);
+       CMOS_WRITE(0xa, 0xf);
+       spin_unlock_irqrestore(&rtc_lock, flags);
+-      local_flush_tlb();
+-      pr_debug("1.\n");
+       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+                                                       start_eip >> 4;
+-      pr_debug("2.\n");
+       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+                                                       start_eip & 0xf;
+-      pr_debug("3.\n");
+ }
+ 
+ static inline void smpboot_restore_warm_reset_vector(void)
+ {
+       unsigned long flags;
+ 
+-      /*
+-       * Install writable page 0 entry to set BIOS data area.
+-       */
+-      local_flush_tlb();
+-
+       /*
+        * Paranoid:  Set warm reset code and vector here back
+        * to default values.
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index 01f30e56f99e..4b3012888ada 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -191,7 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
+       pte_unmap_unlock(pte, ptl);
+ out:
+       up_write(&mm->mmap_sem);
+-      flush_tlb();
++      flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
+ }
+ 
+ 
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 889e7619a091..0381638168d1 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -764,10 +764,8 @@ void __init zone_sizes_init(void)
+ }
+ 
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+-#ifdef CONFIG_SMP
+       .active_mm = &init_mm,
+       .state = 0,
+-#endif
+       .cr4 = ~0UL,    /* fail hard if we screw up cr4 shadow initialization */
+ };
+ EXPORT_SYMBOL_GPL(cpu_tlbstate);
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 75fb01109f94..53b72fb4e781 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -15,7 +15,7 @@
+ #include <linux/debugfs.h>
+ 
+ /*
+- *    Smarter SMP flushing macros.
++ *    TLB flushing, formerly SMP-only
+  *            c/o Linus Torvalds.
+  *
+  *    These mean you can really definitely utterly forget about
+@@ -28,8 +28,6 @@
+  *    Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
+  */
+ 
+-#ifdef CONFIG_SMP
+-
+ struct flush_tlb_info {
+       struct mm_struct *flush_mm;
+       unsigned long flush_start;
+@@ -59,8 +57,6 @@ void leave_mm(int cpu)
+ }
+ EXPORT_SYMBOL_GPL(leave_mm);
+ 
+-#endif /* CONFIG_SMP */
+-
+ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+              struct task_struct *tsk)
+ {
+@@ -91,10 +87,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
+                               set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
+               }
+ 
+-#ifdef CONFIG_SMP
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               this_cpu_write(cpu_tlbstate.active_mm, next);
+-#endif
+ 
+               cpumask_set_cpu(cpu, mm_cpumask(next));
+ 
+@@ -152,9 +146,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
+               if (unlikely(prev->context.ldt != next->context.ldt))
+                       load_mm_ldt(next);
+ #endif
+-      }
+-#ifdef CONFIG_SMP
+-        else {
++      } else {
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
+ 
+@@ -181,11 +173,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
+                       load_mm_ldt(next);
+               }
+       }
+-#endif
+ }
+ 
+-#ifdef CONFIG_SMP
+-
+ /*
+  * The flush IPI assumes that a thread switch happens in this order:
+  * [cpu0: the cpu that switches]
+@@ -287,23 +276,6 @@ void native_flush_tlb_others(const struct cpumask 
*cpumask,
+       smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
+ }
+ 
+-void flush_tlb_current_task(void)
+-{
+-      struct mm_struct *mm = current->mm;
+-
+-      preempt_disable();
+-
+-      count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+-
+-      /* This is an implicit full barrier that synchronizes with switch_mm. */
+-      local_flush_tlb();
+-
+-      trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
+-      if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+-              flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+-      preempt_enable();
+-}
+-
+ /*
+  * See Documentation/x86/tlb.txt for details.  We choose 33
+  * because it is large enough to cover the vast majority (at
+@@ -324,6 +296,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned 
long start,
+       unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
+ 
+       preempt_disable();
++
++      if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
++              base_pages_to_flush = (end - start) >> PAGE_SHIFT;
++      if (base_pages_to_flush > tlb_single_page_flush_ceiling)
++              base_pages_to_flush = TLB_FLUSH_ALL;
++
+       if (current->active_mm != mm) {
+               /* Synchronize with switch_mm. */
+               smp_mb();
+@@ -340,15 +318,11 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned 
long start,
+               goto out;
+       }
+ 
+-      if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
+-              base_pages_to_flush = (end - start) >> PAGE_SHIFT;
+-
+       /*
+        * Both branches below are implicit full barriers (MOV to CR or
+        * INVLPG) that synchronize with switch_mm.
+        */
+-      if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
+-              base_pages_to_flush = TLB_FLUSH_ALL;
++      if (base_pages_to_flush == TLB_FLUSH_ALL) {
+               count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+               local_flush_tlb();
+       } else {
+@@ -369,33 +343,6 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned 
long start,
+       preempt_enable();
+ }
+ 
+-void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
+-{
+-      struct mm_struct *mm = vma->vm_mm;
+-
+-      preempt_disable();
+-
+-      if (current->active_mm == mm) {
+-              if (current->mm) {
+-                      /*
+-                       * Implicit full barrier (INVLPG) that synchronizes
+-                       * with switch_mm.
+-                       */
+-                      __flush_tlb_one(start);
+-              } else {
+-                      leave_mm(smp_processor_id());
+-
+-                      /* Synchronize with switch_mm. */
+-                      smp_mb();
+-              }
+-      }
+-
+-      if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+-              flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
+-
+-      preempt_enable();
+-}
+-
+ static void do_flush_tlb_all(void *info)
+ {
+       count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+@@ -480,5 +427,3 @@ static int __init 
create_tlb_single_page_flush_ceiling(void)
+       return 0;
+ }
+ late_initcall(create_tlb_single_page_flush_ceiling);
+-
+-#endif /* CONFIG_SMP */
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 8f1f7efa848c..2bea87cc0ff2 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -444,6 +444,12 @@ static void __init xen_init_cpuid_mask(void)
+               ~((1 << X86_FEATURE_MTRR) |  /* disable MTRR */
+                 (1 << X86_FEATURE_ACC));   /* thermal monitoring */
+ 
++      /*
++       * Xen PV would need some work to support PCID: CR3 handling as well
++       * as xen_flush_tlb_others() would need updating.
++       */
++      cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32));  /* disable 
PCID */
++
+       if (!xen_initial_domain())
+               cpuid_leaf1_edx_mask &=
+                       ~((1 << X86_FEATURE_ACPI));  /* disable ACPI */
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c 
b/drivers/infiniband/hw/cxgb4/cq.c
+index 19c6477af19f..a856371bbe58 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -575,10 +575,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, 
struct t4_cqe *cqe,
+                       ret = -EAGAIN;
+                       goto skip_cqe;
+               }
+-              if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
++              if (unlikely(!CQE_STATUS(hw_cqe) &&
++                           CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
+                       t4_set_wq_in_error(wq);
+-                      hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
+-                      goto proc_cqe;
++                      hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
+               }
+               goto proc_cqe;
+       }
+diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
+index edae2dcc4927..bb22d325e965 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14226,7 +14226,9 @@ static int tg3_change_mtu(struct net_device *dev, int 
new_mtu)
+       /* Reset PHY, otherwise the read DMA engine will be in a mode that
+        * breaks all requests to 256 bytes.
+        */
+-      if (tg3_asic_rev(tp) == ASIC_REV_57766)
++      if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
++          tg3_asic_rev(tp) == ASIC_REV_5717 ||
++          tg3_asic_rev(tp) == ASIC_REV_5719)
+               reset_phy = true;
+ 
+       err = tg3_restart_hw(tp, reset_phy);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c 
b/drivers/net/ethernet/freescale/fec_main.c
+index 849b8712ec81..917091871259 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -172,10 +172,12 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+ #endif /* CONFIG_M5272 */
+ 
+ /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
++ *
++ * 2048 byte skbufs are allocated. However, alignment requirements
++ * varies between FEC variants. Worst case is 64, so round down by 64.
+  */
+-#define PKT_MAXBUF_SIZE               1522
++#define PKT_MAXBUF_SIZE               (round_down(2048 - 64, 64))
+ #define PKT_MINBUF_SIZE               64
+-#define PKT_MAXBLR_SIZE               1536
+ 
+ /* FEC receive acceleration */
+ #define FEC_RACC_IPDIS                (1 << 1)
+@@ -813,6 +815,12 @@ static void fec_enet_bd_init(struct net_device *dev)
+               for (i = 0; i < txq->bd.ring_size; i++) {
+                       /* Initialize the BD for every fragment in the page. */
+                       bdp->cbd_sc = cpu_to_fec16(0);
++                      if (bdp->cbd_bufaddr &&
++                          !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
++                              dma_unmap_single(&fep->pdev->dev,
++                                               fec32_to_cpu(bdp->cbd_bufaddr),
++                                               fec16_to_cpu(bdp->cbd_datlen),
++                                               DMA_TO_DEVICE);
+                       if (txq->tx_skbuff[i]) {
+                               dev_kfree_skb_any(txq->tx_skbuff[i]);
+                               txq->tx_skbuff[i] = NULL;
+@@ -847,7 +855,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
+       for (i = 0; i < fep->num_rx_queues; i++) {
+               rxq = fep->rx_queue[i];
+               writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
+-              writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
++              writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+ 
+               /* enable DMA1/2 */
+               if (i)
+diff --git a/drivers/net/ethernet/marvell/mvmdio.c 
b/drivers/net/ethernet/marvell/mvmdio.c
+index a0d1b084ecec..7aeb7fedb364 100644
+--- a/drivers/net/ethernet/marvell/mvmdio.c
++++ b/drivers/net/ethernet/marvell/mvmdio.c
+@@ -232,7 +232,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
+                       dev->regs + MVMDIO_ERR_INT_MASK);
+ 
+       } else if (dev->err_interrupt == -EPROBE_DEFER) {
+-              return -EPROBE_DEFER;
++              ret = -EPROBE_DEFER;
++              goto out_mdio;
+       }
+ 
+       mutex_init(&dev->lock);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c 
b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index f7fabecc104f..4c3f1cb7e2c9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -367,7 +367,7 @@ static int mlx5_internal_err_ret_value(struct 
mlx5_core_dev *dev, u16 op,
+       case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+       case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+       case MLX5_CMD_OP_QUERY_Q_COUNTER:
+-      case MLX5_CMD_OP_SET_RATE_LIMIT:
++      case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
+       case MLX5_CMD_OP_QUERY_RATE_LIMIT:
+       case MLX5_CMD_OP_ALLOC_PD:
+       case MLX5_CMD_OP_ALLOC_UAR:
+@@ -502,7 +502,7 @@ const char *mlx5_command_str(int command)
+       MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
+       MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
+       MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+-      MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
++      MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(ALLOC_PD);
+       MLX5_COMMAND_STR_CASE(DEALLOC_PD);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 9d3722930c95..38981db43bc3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3038,6 +3038,7 @@ static netdev_features_t 
mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
+                                                   struct sk_buff *skb,
+                                                   netdev_features_t features)
+ {
++      unsigned int offset = 0;
+       struct udphdr *udph;
+       u16 proto;
+       u16 port = 0;
+@@ -3047,7 +3048,7 @@ static netdev_features_t 
mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
+               proto = ip_hdr(skb)->protocol;
+               break;
+       case htons(ETH_P_IPV6):
+-              proto = ipv6_hdr(skb)->nexthdr;
++              proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
+               break;
+       default:
+               goto out;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c 
b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+index d0a4005fe63a..9346f3985edf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+@@ -303,8 +303,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
+ err_cmd:
+       memset(din, 0, sizeof(din));
+       memset(dout, 0, sizeof(dout));
+-      MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+-      MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
++      MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
++      MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
+       mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+       return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c 
b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+index 104902a93a0b..2be9ec5fd651 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+@@ -60,16 +60,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct 
mlx5_rl_table *table,
+       return ret_entry;
+ }
+ 
+-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
++static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
+                                  u32 rate, u16 index)
+ {
+-      u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
+-      u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
++      u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)]   = {0};
++      u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
+ 
+-      MLX5_SET(set_rate_limit_in, in, opcode,
+-               MLX5_CMD_OP_SET_RATE_LIMIT);
+-      MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
+-      MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
++      MLX5_SET(set_pp_rate_limit_in, in, opcode,
++               MLX5_CMD_OP_SET_PP_RATE_LIMIT);
++      MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
++      MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
+       return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ }
+ 
+@@ -108,7 +108,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, 
u16 *index)
+               entry->refcount++;
+       } else {
+               /* new rate limit */
+-              err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
++              err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
+               if (err) {
+                       mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
+                                     rate, err);
+@@ -144,7 +144,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 
rate)
+       entry->refcount--;
+       if (!entry->refcount) {
+               /* need to remove rate */
+-              mlx5_set_rate_limit_cmd(dev, 0, entry->index);
++              mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
+               entry->rate = 0;
+       }
+ 
+@@ -197,8 +197,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
+       /* Clear all configured rates */
+       for (i = 0; i < table->max_size; i++)
+               if (table->rl_entry[i].rate)
+-                      mlx5_set_rate_limit_cmd(dev, 0,
+-                                              table->rl_entry[i].index);
++                      mlx5_set_pp_rate_limit_cmd(dev, 0,
++                                                 table->rl_entry[i].index);
+ 
+       kfree(dev->priv.rl_table.rl_entry);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c 
b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+index 07a9ba6cfc70..2f74953e4561 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct 
mlx5e_priv *priv, u16 port)
+       struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       struct mlx5e_vxlan *vxlan;
+ 
+-      spin_lock(&vxlan_db->lock);
++      spin_lock_bh(&vxlan_db->lock);
+       vxlan = radix_tree_lookup(&vxlan_db->tree, port);
+-      spin_unlock(&vxlan_db->lock);
++      spin_unlock_bh(&vxlan_db->lock);
+ 
+       return vxlan;
+ }
+@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
+       struct mlx5e_vxlan *vxlan;
+       int err;
+ 
+-      if (mlx5e_vxlan_lookup_port(priv, port))
++      mutex_lock(&priv->state_lock);
++      vxlan = mlx5e_vxlan_lookup_port(priv, port);
++      if (vxlan) {
++              atomic_inc(&vxlan->refcount);
+               goto free_work;
++      }
+ 
+       if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+               goto free_work;
+@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
+               goto err_delete_port;
+ 
+       vxlan->udp_port = port;
++      atomic_set(&vxlan->refcount, 1);
+ 
+-      spin_lock_irq(&vxlan_db->lock);
++      spin_lock_bh(&vxlan_db->lock);
+       err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
+-      spin_unlock_irq(&vxlan_db->lock);
++      spin_unlock_bh(&vxlan_db->lock);
+       if (err)
+               goto err_free;
+ 
+@@ -113,35 +118,39 @@ static void mlx5e_vxlan_add_port(struct work_struct 
*work)
+ err_delete_port:
+       mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+ free_work:
++      mutex_unlock(&priv->state_lock);
+       kfree(vxlan_work);
+ }
+ 
+-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
++static void mlx5e_vxlan_del_port(struct work_struct *work)
+ {
++      struct mlx5e_vxlan_work *vxlan_work =
++              container_of(work, struct mlx5e_vxlan_work, work);
++      struct mlx5e_priv *priv         = vxlan_work->priv;
+       struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
++      u16 port = vxlan_work->port;
+       struct mlx5e_vxlan *vxlan;
++      bool remove = false;
+ 
+-      spin_lock_irq(&vxlan_db->lock);
+-      vxlan = radix_tree_delete(&vxlan_db->tree, port);
+-      spin_unlock_irq(&vxlan_db->lock);
+-
++      mutex_lock(&priv->state_lock);
++      spin_lock_bh(&vxlan_db->lock);
++      vxlan = radix_tree_lookup(&vxlan_db->tree, port);
+       if (!vxlan)
+-              return;
+-
+-      mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
+-
+-      kfree(vxlan);
+-}
++              goto out_unlock;
+ 
+-static void mlx5e_vxlan_del_port(struct work_struct *work)
+-{
+-      struct mlx5e_vxlan_work *vxlan_work =
+-              container_of(work, struct mlx5e_vxlan_work, work);
+-      struct mlx5e_priv *priv = vxlan_work->priv;
+-      u16 port = vxlan_work->port;
++      if (atomic_dec_and_test(&vxlan->refcount)) {
++              radix_tree_delete(&vxlan_db->tree, port);
++              remove = true;
++      }
+ 
+-      __mlx5e_vxlan_core_del_port(priv, port);
++out_unlock:
++      spin_unlock_bh(&vxlan_db->lock);
+ 
++      if (remove) {
++              mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
++              kfree(vxlan);
++      }
++      mutex_unlock(&priv->state_lock);
+       kfree(vxlan_work);
+ }
+ 
+@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
+       struct mlx5e_vxlan *vxlan;
+       unsigned int port = 0;
+ 
+-      spin_lock_irq(&vxlan_db->lock);
++      /* Lockless since we are the only radix-tree consumers, wq is disabled 
*/
+       while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 
1)) {
+               port = vxlan->udp_port;
+-              spin_unlock_irq(&vxlan_db->lock);
+-              __mlx5e_vxlan_core_del_port(priv, (u16)port);
+-              spin_lock_irq(&vxlan_db->lock);
++              radix_tree_delete(&vxlan_db->tree, port);
++              mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
++              kfree(vxlan);
+       }
+-      spin_unlock_irq(&vxlan_db->lock);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h 
b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+index 5def12c048e3..5ef6ae7d568a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+@@ -36,6 +36,7 @@
+ #include "en.h"
+ 
+ struct mlx5e_vxlan {
++      atomic_t refcount;
+       u16 udp_port;
+ };
+ 
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index fbf5945ce00d..2032a6de026b 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
+               phydev->link = 0;
+               if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+                       phydev->drv->config_intr(phydev);
++              return genphy_config_aneg(phydev);
+       }
+ 
+       return 0;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 105fbfb47e3a..db65d9ad4488 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -907,6 +907,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x1199, 0x9079, 10)},   /* Sierra Wireless EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x907b, 8)},    /* Sierra Wireless EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x907b, 10)},   /* Sierra Wireless EM74xx */
++      {QMI_FIXED_INTF(0x1199, 0x9091, 8)},    /* Sierra Wireless EM7565 */
+       {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II 
(Alcatel One Touch L100V LTE) */
+       {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
+       {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index e72234efb648..9b5fc502f6a1 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -576,9 +576,9 @@ enum qeth_cq {
+ };
+ 
+ struct qeth_ipato {
+-      int enabled;
+-      int invert4;
+-      int invert6;
++      bool enabled;
++      bool invert4;
++      bool invert6;
+       struct list_head entries;
+ };
+ 
+diff --git a/drivers/s390/net/qeth_core_main.c 
b/drivers/s390/net/qeth_core_main.c
+index 838ed6213118..df8f74cb1406 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -1475,9 +1475,9 @@ static int qeth_setup_card(struct qeth_card *card)
+       qeth_set_intial_options(card);
+       /* IP address takeover */
+       INIT_LIST_HEAD(&card->ipato.entries);
+-      card->ipato.enabled = 0;
+-      card->ipato.invert4 = 0;
+-      card->ipato.invert6 = 0;
++      card->ipato.enabled = false;
++      card->ipato.invert4 = false;
++      card->ipato.invert6 = false;
+       /* init QDIO stuff */
+       qeth_init_qdio_info(card);
+       INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
+diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
+index 26f79533e62e..eedf9b01a496 100644
+--- a/drivers/s390/net/qeth_l3.h
++++ b/drivers/s390/net/qeth_l3.h
+@@ -80,7 +80,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum 
qeth_prot_versions, const u8 *);
+ int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
+                       const u8 *);
+-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr 
*);
++void qeth_l3_update_ipato(struct qeth_card *card);
+ struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
+ int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
+ int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index f91e70c369ed..1487f8a0c575 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -168,8 +168,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 
*bits, int len)
+       }
+ }
+ 
+-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+-                                              struct qeth_ipaddr *addr)
++static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
++                                           struct qeth_ipaddr *addr)
+ {
+       struct qeth_ipato_entry *ipatoe;
+       u8 addr_bits[128] = {0, };
+@@ -178,6 +178,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card 
*card,
+ 
+       if (!card->ipato.enabled)
+               return 0;
++      if (addr->type != QETH_IP_TYPE_NORMAL)
++              return 0;
+ 
+       qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
+                                 (addr->proto == QETH_PROT_IPV4)? 4:16);
+@@ -293,8 +295,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct 
qeth_ipaddr *tmp_addr)
+               memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
+               addr->ref_counter = 1;
+ 
+-              if (addr->type == QETH_IP_TYPE_NORMAL  &&
+-                              qeth_l3_is_addr_covered_by_ipato(card, addr)) {
++              if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+                       QETH_CARD_TEXT(card, 2, "tkovaddr");
+                       addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+               }
+@@ -607,6 +608,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
+ /*
+  * IP address takeover related functions
+  */
++
++/**
++ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
++ *
++ * Caller must hold ip_lock.
++ */
++void qeth_l3_update_ipato(struct qeth_card *card)
++{
++      struct qeth_ipaddr *addr;
++      unsigned int i;
++
++      hash_for_each(card->ip_htable, i, addr, hnode) {
++              if (addr->type != QETH_IP_TYPE_NORMAL)
++                      continue;
++              if (qeth_l3_is_addr_covered_by_ipato(card, addr))
++                      addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
++              else
++                      addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
++      }
++}
++
+ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
+ {
+       struct qeth_ipato_entry *ipatoe, *tmp;
+@@ -618,6 +640,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card 
*card)
+               kfree(ipatoe);
+       }
+ 
++      qeth_l3_update_ipato(card);
+       spin_unlock_bh(&card->ip_lock);
+ }
+ 
+@@ -642,8 +665,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
+               }
+       }
+ 
+-      if (!rc)
++      if (!rc) {
+               list_add_tail(&new->entry, &card->ipato.entries);
++              qeth_l3_update_ipato(card);
++      }
+ 
+       spin_unlock_bh(&card->ip_lock);
+ 
+@@ -666,6 +691,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
+                           (proto == QETH_PROT_IPV4)? 4:16) &&
+                   (ipatoe->mask_bits == mask_bits)) {
+                       list_del(&ipatoe->entry);
++                      qeth_l3_update_ipato(card);
+                       kfree(ipatoe);
+               }
+       }
+diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
+index cffe42f5775d..d6bdfc6e905a 100644
+--- a/drivers/s390/net/qeth_l3_sys.c
++++ b/drivers/s390/net/qeth_l3_sys.c
+@@ -372,8 +372,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct 
device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+ {
+       struct qeth_card *card = dev_get_drvdata(dev);
+-      struct qeth_ipaddr *addr;
+-      int i, rc = 0;
++      bool enable;
++      int rc = 0;
+ 
+       if (!card)
+               return -EINVAL;
+@@ -386,25 +386,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct 
device *dev,
+       }
+ 
+       if (sysfs_streq(buf, "toggle")) {
+-              card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
+-      } else if (sysfs_streq(buf, "1")) {
+-              card->ipato.enabled = 1;
+-              hash_for_each(card->ip_htable, i, addr, hnode) {
+-                              if ((addr->type == QETH_IP_TYPE_NORMAL) &&
+-                              qeth_l3_is_addr_covered_by_ipato(card, addr))
+-                                      addr->set_flags |=
+-                                      QETH_IPA_SETIP_TAKEOVER_FLAG;
+-                      }
+-      } else if (sysfs_streq(buf, "0")) {
+-              card->ipato.enabled = 0;
+-              hash_for_each(card->ip_htable, i, addr, hnode) {
+-                      if (addr->set_flags &
+-                      QETH_IPA_SETIP_TAKEOVER_FLAG)
+-                              addr->set_flags &=
+-                              ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+-                      }
+-      } else
++              enable = !card->ipato.enabled;
++      } else if (kstrtobool(buf, &enable)) {
+               rc = -EINVAL;
++              goto out;
++      }
++
++      if (card->ipato.enabled != enable) {
++              card->ipato.enabled = enable;
++              spin_lock_bh(&card->ip_lock);
++              qeth_l3_update_ipato(card);
++              spin_unlock_bh(&card->ip_lock);
++      }
+ out:
+       mutex_unlock(&card->conf_mutex);
+       return rc ? rc : count;
+@@ -430,20 +423,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct 
device *dev,
+                               const char *buf, size_t count)
+ {
+       struct qeth_card *card = dev_get_drvdata(dev);
++      bool invert;
+       int rc = 0;
+ 
+       if (!card)
+               return -EINVAL;
+ 
+       mutex_lock(&card->conf_mutex);
+-      if (sysfs_streq(buf, "toggle"))
+-              card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
+-      else if (sysfs_streq(buf, "1"))
+-              card->ipato.invert4 = 1;
+-      else if (sysfs_streq(buf, "0"))
+-              card->ipato.invert4 = 0;
+-      else
++      if (sysfs_streq(buf, "toggle")) {
++              invert = !card->ipato.invert4;
++      } else if (kstrtobool(buf, &invert)) {
+               rc = -EINVAL;
++              goto out;
++      }
++
++      if (card->ipato.invert4 != invert) {
++              card->ipato.invert4 = invert;
++              spin_lock_bh(&card->ip_lock);
++              qeth_l3_update_ipato(card);
++              spin_unlock_bh(&card->ip_lock);
++      }
++out:
+       mutex_unlock(&card->conf_mutex);
+       return rc ? rc : count;
+ }
+@@ -609,20 +609,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct 
device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+ {
+       struct qeth_card *card = dev_get_drvdata(dev);
++      bool invert;
+       int rc = 0;
+ 
+       if (!card)
+               return -EINVAL;
+ 
+       mutex_lock(&card->conf_mutex);
+-      if (sysfs_streq(buf, "toggle"))
+-              card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
+-      else if (sysfs_streq(buf, "1"))
+-              card->ipato.invert6 = 1;
+-      else if (sysfs_streq(buf, "0"))
+-              card->ipato.invert6 = 0;
+-      else
++      if (sysfs_streq(buf, "toggle")) {
++              invert = !card->ipato.invert6;
++      } else if (kstrtobool(buf, &invert)) {
+               rc = -EINVAL;
++              goto out;
++      }
++
++      if (card->ipato.invert6 != invert) {
++              card->ipato.invert6 = invert;
++              spin_lock_bh(&card->ip_lock);
++              qeth_l3_update_ipato(card);
++              spin_unlock_bh(&card->ip_lock);
++      }
++out:
+       mutex_unlock(&card->conf_mutex);
+       return rc ? rc : count;
+ }
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index bdf0e6e89991..faf50df81622 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1764,7 +1764,7 @@ static void n_tty_set_termios(struct tty_struct *tty, 
struct ktermios *old)
+ {
+       struct n_tty_data *ldata = tty->disc_data;
+ 
+-      if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
++      if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) 
{
+               bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
+               ldata->line_start = ldata->read_tail;
+               if (!L_ICANON(tty) || !read_cnt(ldata)) {
+@@ -2427,7 +2427,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct 
file *file,
+               return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
+       case TIOCINQ:
+               down_write(&tty->termios_rwsem);
+-              if (L_ICANON(tty))
++              if (L_ICANON(tty) && !L_EXTPROC(tty))
+                       retval = inq_canon(ldata);
+               else
+                       retval = read_cnt(ldata);
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index c220c2c0893f..e99f1c5b1df6 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -446,7 +446,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
+  *    Callers other than flush_to_ldisc() need to exclude the kworker
+  *    from concurrent use of the line discipline, see paste_selection().
+  *
+- *    Returns the number of bytes not processed
++ *    Returns the number of bytes processed
+  */
+ int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+                         char *f, int count)
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index ba9b29bc441f..7c54a19b20e0 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -1002,7 +1002,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+               case USB_SSP_CAP_TYPE:
+                       ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
+                       ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
+-                              USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
++                              USB_SSP_SUBLINK_SPEED_ATTRIBS);
+                       if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
+                               dev->bos->ssp_cap = ssp_cap;
+                       break;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 50010282c010..c05c4f877750 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -57,10 +57,11 @@ static const struct usb_device_id usb_quirk_list[] = {
+       /* Microsoft LifeCam-VX700 v2.0 */
+       { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+-      /* Logitech HD Pro Webcams C920, C920-C and C930e */
++      /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
+       { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+       { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
+       { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
++      { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
+       /* Logitech ConferenceCam CC3000e */
+       { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
+@@ -154,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+       /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+       { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+ 
++      /* ELSA MicroLink 56K */
++      { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
++
+       /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter 
*/
+       { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index c87ef38e7416..f6782a347cde 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -189,6 +189,9 @@ static void xhci_pci_quirks(struct device *dev, struct 
xhci_hcd *xhci)
+               xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+               xhci->quirks |= XHCI_BROKEN_STREAMS;
+       }
++      if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
++                      pdev->device == 0x0014)
++              xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+       if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+                       pdev->device == 0x0015)
+               xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 3249f42b4b93..0c743e4cca1e 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1017,6 +1017,7 @@ static const struct usb_device_id id_table_combined[] = {
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+       { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
++      { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+       { }                                     /* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h 
b/drivers/usb/serial/ftdi_sio_ids.h
+index f9d15bd62785..543d2801632b 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -913,6 +913,12 @@
+ #define ICPDAS_I7561U_PID             0x0104
+ #define ICPDAS_I7563U_PID             0x0105
+ 
++/*
++ * Airbus Defence and Space
++ */
++#define AIRBUS_DS_VID                 0x1e8e  /* Vendor ID */
++#define AIRBUS_DS_P8GR                        0x6001  /* Tetra P8GR */
++
+ /*
+  * RT Systems programming cables for various ham radios
+  */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index ffa8ec917ff5..a818c43a02ec 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
+ /* These Quectel products use Qualcomm's vendor ID */
+ #define QUECTEL_PRODUCT_UC20                  0x9003
+ #define QUECTEL_PRODUCT_UC15                  0x9090
++/* These Yuga products use Qualcomm's vendor ID */
++#define YUGA_PRODUCT_CLM920_NC5                       0x9625
+ 
+ #define QUECTEL_VENDOR_ID                     0x2c7c
+ /* These Quectel products use Quectel's vendor ID */
+@@ -283,6 +285,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_LE922_USBCFG3           0x1043
+ #define TELIT_PRODUCT_LE922_USBCFG5           0x1045
+ #define TELIT_PRODUCT_ME910                   0x1100
++#define TELIT_PRODUCT_ME910_DUAL_MODEM                0x1101
+ #define TELIT_PRODUCT_LE920                   0x1200
+ #define TELIT_PRODUCT_LE910                   0x1201
+ #define TELIT_PRODUCT_LE910_USBCFG4           0x1206
+@@ -648,6 +651,11 @@ static const struct option_blacklist_info 
telit_me910_blacklist = {
+       .reserved = BIT(1) | BIT(3),
+ };
+ 
++static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
++      .sendsetup = BIT(0),
++      .reserved = BIT(3),
++};
++
+ static const struct option_blacklist_info telit_le910_blacklist = {
+       .sendsetup = BIT(0),
+       .reserved = BIT(1) | BIT(2),
+@@ -677,6 +685,10 @@ static const struct option_blacklist_info 
cinterion_rmnet2_blacklist = {
+       .reserved = BIT(4) | BIT(5),
+ };
+ 
++static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
++      .reserved = BIT(1) | BIT(4),
++};
++
+ static const struct usb_device_id option_ids[] = {
+       { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+       { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -1181,6 +1193,9 @@ static const struct usb_device_id option_ids[] = {
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++      /* Yuga products use Qualcomm vendor ID */
++      { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
++        .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
+       /* Quectel products using Quectel vendor ID */
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+@@ -1247,6 +1262,8 @@ static const struct usb_device_id option_ids[] = {
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+               .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
++      { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
++              .driver_info = 
(kernel_ulong_t)&telit_me910_dual_modem_blacklist },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+               .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 4516291df1b8..fb6dc16c754a 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
+       {DEVICE_SWI(0x1199, 0x9079)},   /* Sierra Wireless EM74xx */
+       {DEVICE_SWI(0x1199, 0x907a)},   /* Sierra Wireless EM74xx QDL */
+       {DEVICE_SWI(0x1199, 0x907b)},   /* Sierra Wireless EM74xx */
++      {DEVICE_SWI(0x1199, 0x9090)},   /* Sierra Wireless EM7565 QDL */
++      {DEVICE_SWI(0x1199, 0x9091)},   /* Sierra Wireless EM7565 */
+       {DEVICE_SWI(0x413c, 0x81a2)},   /* Dell Wireless 5806 Gobi(TM) 4G LTE 
Mobile Broadband Card */
+       {DEVICE_SWI(0x413c, 0x81a3)},   /* Dell Wireless 5570 HSPA+ (42Mbps) 
Mobile Broadband Card */
+       {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) 
Mobile Broadband Card */
+@@ -346,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct 
usb_device_id *id)
+                       break;
+               case 2:
+                       dev_dbg(dev, "NMEA GPS interface found\n");
++                      sendsetup = true;
+                       break;
+               case 3:
+                       dev_dbg(dev, "Modem port found\n");
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index c653ce533430..1886d8e4f14e 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device 
*ud)
+        * step 1?
+        */
+       if (ud->tcp_socket) {
+-              dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
+-                      ud->tcp_socket);
++              dev_dbg(&sdev->udev->dev, "shutdown sockfd\n");
+               kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
+       }
+ 
+diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
+index af10f7b131a4..325b4c05acdd 100644
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -252,11 +252,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
+       struct stub_priv *priv;
+       struct urb *urb;
+ 
+-      dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
++      dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
+ 
+       while ((priv = stub_priv_pop(sdev))) {
+               urb = priv->urb;
+-              dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
++              dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
++                      priv->seqnum);
+               usb_kill_urb(urb);
+ 
+               kmem_cache_free(stub_priv_cache, priv);
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
+index 283a9be77a22..5b807185f79e 100644
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -225,9 +225,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
+               if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
+                       continue;
+ 
+-              dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
+-                       priv->urb);
+-
+               /*
+                * This matched urb is not completed yet (i.e., be in
+                * flight in usb hcd hardware/driver). Now we are
+@@ -266,8 +263,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
+               ret = usb_unlink_urb(priv->urb);
+               if (ret != -EINPROGRESS)
+                       dev_err(&priv->urb->dev->dev,
+-                              "failed to unlink a urb %p, ret %d\n",
+-                              priv->urb, ret);
++                              "failed to unlink a urb # %lu, ret %d\n",
++                              priv->seqnum, ret);
+ 
+               return 0;
+       }
+diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
+index 87ff94be4235..96aa375b80d9 100644
+--- a/drivers/usb/usbip/stub_tx.c
++++ b/drivers/usb/usbip/stub_tx.c
+@@ -102,7 +102,7 @@ void stub_complete(struct urb *urb)
+       /* link a urb to the queue of tx. */
+       spin_lock_irqsave(&sdev->priv_lock, flags);
+       if (sdev->ud.tcp_socket == NULL) {
+-              usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
++              usbip_dbg_stub_tx("ignore urb for closed connection\n");
+               /* It will be freed in stub_device_cleanup_urbs(). */
+       } else if (priv->unlinking) {
+               stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
+@@ -204,8 +204,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
+ 
+               /* 1. setup usbip_header */
+               setup_ret_submit_pdu(&pdu_header, urb);
+-              usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
+-                                pdu_header.base.seqnum, urb);
++              usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
++                                pdu_header.base.seqnum);
+               usbip_header_correct_endian(&pdu_header, 1);
+ 
+               iov[iovnum].iov_base = &pdu_header;
+diff --git a/drivers/usb/usbip/usbip_common.c 
b/drivers/usb/usbip/usbip_common.c
+index 8b232290be6b..e24b24fa0f16 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -335,13 +335,10 @@ int usbip_recv(struct socket *sock, void *buf, int size)
+       char *bp = buf;
+       int osize = size;
+ 
+-      usbip_dbg_xmit("enter\n");
+-
+-      if (!sock || !buf || !size) {
+-              pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
+-                     size);
++      if (!sock || !buf || !size)
+               return -EINVAL;
+-      }
++
++      usbip_dbg_xmit("enter\n");
+ 
+       do {
+               sock->sk->sk_allocation = GFP_NOIO;
+@@ -354,11 +351,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
+               msg.msg_flags      = MSG_NOSIGNAL;
+ 
+               result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
+-              if (result <= 0) {
+-                      pr_debug("receive sock %p buf %p size %u ret %d total 
%d\n",
+-                               sock, buf, size, result, total);
++              if (result <= 0)
+                       goto err;
+-              }
+ 
+               size -= result;
+               buf += result;
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index d6dc165e924b..7f161b095176 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -506,9 +506,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct 
urb *urb,
+       struct vhci_device *vdev;
+       unsigned long flags;
+ 
+-      usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
+-                        hcd, urb, mem_flags);
+-
+       if (portnum > VHCI_HC_PORTS) {
+               pr_err("invalid port number %d\n", portnum);
+               return -ENODEV;
+@@ -671,8 +668,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct 
urb *urb, int status)
+       struct vhci_device *vdev;
+       unsigned long flags;
+ 
+-      pr_info("dequeue a urb %p\n", urb);
+-
+       spin_lock_irqsave(&vhci->lock, flags);
+ 
+       priv = urb->hcpriv;
+@@ -700,7 +695,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct 
urb *urb, int status)
+               /* tcp connection is closed */
+               spin_lock(&vdev->priv_lock);
+ 
+-              pr_info("device %p seems to be disconnected\n", vdev);
+               list_del(&priv->list);
+               kfree(priv);
+               urb->hcpriv = NULL;
+@@ -712,8 +706,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct 
urb *urb, int status)
+                * vhci_rx will receive RET_UNLINK and give back the URB.
+                * Otherwise, we give back it here.
+                */
+-              pr_info("gives back urb %p\n", urb);
+-
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
+ 
+               spin_unlock_irqrestore(&vhci->lock, flags);
+@@ -741,8 +733,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct 
urb *urb, int status)
+ 
+               unlink->unlink_seqnum = priv->seqnum;
+ 
+-              pr_info("device %p seems to be still connected\n", vdev);
+-
+               /* send cmd_unlink and try to cancel the pending URB in the
+                * peer */
+               list_add_tail(&unlink->list, &vdev->unlink_tx);
+@@ -823,7 +813,7 @@ static void vhci_shutdown_connection(struct usbip_device 
*ud)
+ 
+       /* need this? see stub_dev.c */
+       if (ud->tcp_socket) {
+-              pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
++              pr_debug("shutdown tcp_socket\n");
+               kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
+       }
+ 
+diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
+index fc2d319e2360..5943deeec115 100644
+--- a/drivers/usb/usbip/vhci_rx.c
++++ b/drivers/usb/usbip/vhci_rx.c
+@@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device 
*vdev, __u32 seqnum)
+               urb = priv->urb;
+               status = urb->status;
+ 
+-              usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
+-                              urb, priv, seqnum);
++              usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
+ 
+               switch (status) {
+               case -ENOENT:
+                       /* fall through */
+               case -ECONNRESET:
+-                      dev_info(&urb->dev->dev,
+-                               "urb %p was unlinked %ssynchronuously.\n", urb,
+-                               status == -ENOENT ? "" : "a");
++                      dev_dbg(&urb->dev->dev,
++                               "urb seq# %u was unlinked %ssynchronuously\n",
++                               seqnum, status == -ENOENT ? "" : "a");
+                       break;
+               case -EINPROGRESS:
+                       /* no info output */
+                       break;
+               default:
+-                      dev_info(&urb->dev->dev,
+-                               "urb %p may be in a error, status %d\n", urb,
+-                               status);
++                      dev_dbg(&urb->dev->dev,
++                               "urb seq# %u may be in a error, status %d\n",
++                               seqnum, status);
+               }
+ 
+               list_del(&priv->list);
+@@ -80,8 +79,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+       spin_unlock_irqrestore(&vdev->priv_lock, flags);
+ 
+       if (!urb) {
+-              pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
+-              pr_info("max seqnum %d\n",
++              pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
++                      pdu->base.seqnum,
+                       atomic_read(&vhci->seqnum));
+               usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+               return;
+@@ -104,7 +103,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+       if (usbip_dbg_flag_vhci_rx)
+               usbip_dump_urb(urb);
+ 
+-      usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
++      usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
+ 
+       spin_lock_irqsave(&vhci->lock, flags);
+       usb_hcd_unlink_urb_from_ep(vhci_to_hcd(vhci), urb);
+@@ -170,7 +169,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
+               pr_info("the urb (seqnum %d) was already given back\n",
+                       pdu->base.seqnum);
+       } else {
+-              usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
++              usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
+ 
+               /* If unlink is successful, status is -ECONNRESET */
+               urb->status = pdu->u.ret_unlink.status;
+diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
+index 3e7878fe2fd4..a9a663a578b6 100644
+--- a/drivers/usb/usbip/vhci_tx.c
++++ b/drivers/usb/usbip/vhci_tx.c
+@@ -83,7 +83,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
+               memset(&msg, 0, sizeof(msg));
+               memset(&iov, 0, sizeof(iov));
+ 
+-              usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
++              usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
++                                priv->seqnum);
+ 
+               /* 1. setup usbip_header */
+               setup_cmd_submit_pdu(&pdu_header, urb);
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 965cc5693a46..c9447a689522 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -48,7 +48,7 @@ enum cpuhp_state {
+       CPUHP_ARM_SHMOBILE_SCU_PREPARE,
+       CPUHP_SH_SH3X_PREPARE,
+       CPUHP_BLK_MQ_PREPARE,
+-      CPUHP_TIMERS_DEAD,
++      CPUHP_TIMERS_PREPARE,
+       CPUHP_NOTF_ERR_INJ_PREPARE,
+       CPUHP_MIPS_SOC_PREPARE,
+       CPUHP_BRINGUP_CPU,
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index a0649973ee5b..b9dfca557a6c 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -246,7 +246,8 @@ struct ipv6_pinfo {
+                                                * 100: prefer care-of address
+                                                */
+                               dontfrag:1,
+-                              autoflowlabel:1;
++                              autoflowlabel:1,
++                              autoflowlabel_set:1;
+       __u8                    min_hopcount;
+       __u8                    tclass;
+       __be32                  rcv_flowinfo;
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 6045d4d58065..25ed105bbcfb 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -143,7 +143,7 @@ enum {
+       MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
+       MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
+       MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
+-      MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
++      MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,
+       MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
+       MLX5_CMD_OP_ALLOC_PD                      = 0x800,
+       MLX5_CMD_OP_DEALLOC_PD                    = 0x801,
+@@ -6689,7 +6689,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+       u8         vxlan_udp_port[0x10];
+ };
+ 
+-struct mlx5_ifc_set_rate_limit_out_bits {
++struct mlx5_ifc_set_pp_rate_limit_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+ 
+@@ -6698,7 +6698,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
+       u8         reserved_at_40[0x40];
+ };
+ 
+-struct mlx5_ifc_set_rate_limit_in_bits {
++struct mlx5_ifc_set_pp_rate_limit_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+ 
+@@ -6711,6 +6711,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
+       u8         reserved_at_60[0x20];
+ 
+       u8         rate_limit[0x20];
++
++      u8         reserved_at_a0[0x160];
+ };
+ 
+ struct mlx5_ifc_access_register_out_bits {
+diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
+index b83507c0640c..e38f471a5402 100644
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -99,12 +99,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
+ 
+ /* Note: callers invoking this in a loop must use a compiler barrier,
+  * for example cpu_relax(). Callers must hold producer_lock.
++ * Callers are responsible for making sure pointer that is being queued
++ * points to a valid data.
+  */
+ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
+ {
+       if (unlikely(!r->size) || r->queue[r->producer])
+               return -ENOSPC;
+ 
++      /* Make sure the pointer we are storing points to a valid data. */
++      /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
++      smp_wmb();
++
+       r->queue[r->producer++] = ptr;
+       if (unlikely(r->producer >= r->size))
+               r->producer = 0;
+@@ -244,6 +250,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
+       if (ptr)
+               __ptr_ring_discard_one(r);
+ 
++      /* Make sure anyone accessing data through the pointer is up to date. */
++      /* Pairs with smp_wmb in __ptr_ring_produce. */
++      smp_read_barrier_depends();
+       return ptr;
+ }
+ 
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index 647532b0eb03..f50b717ce644 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -219,7 +219,8 @@ struct tcp_sock {
+       } rack;
+       u16     advmss;         /* Advertised MSS                       */
+       u8      rate_app_limited:1,  /* rate_{delivered,interval_us} limited? */
+-              unused:7;
++              is_sack_reneg:1,    /* in recovery from loss with SACK reneg? */
++              unused:6;
+       u8      nonagle     : 4,/* Disable Nagle algorithm?             */
+               thin_lto    : 1,/* Use linear timeouts for thin streams */
+               thin_dupack : 1,/* Fast retransmit on first dupack      */
+diff --git a/include/linux/timer.h b/include/linux/timer.h
+index 51d601f192d4..ec86e4e55ea3 100644
+--- a/include/linux/timer.h
++++ b/include/linux/timer.h
+@@ -274,9 +274,11 @@ unsigned long round_jiffies_up(unsigned long j);
+ unsigned long round_jiffies_up_relative(unsigned long j);
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
++int timers_prepare_cpu(unsigned int cpu);
+ int timers_dead_cpu(unsigned int cpu);
+ #else
+-#define timers_dead_cpu NULL
++#define timers_prepare_cpu    NULL
++#define timers_dead_cpu               NULL
+ #endif
+ 
+ #endif
+diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
+index 4d6ec58a8d45..2edb150f1a4d 100644
+--- a/include/linux/vm_event_item.h
++++ b/include/linux/vm_event_item.h
+@@ -89,10 +89,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+ #endif
+ #endif
+ #ifdef CONFIG_DEBUG_TLBFLUSH
+-#ifdef CONFIG_SMP
+               NR_TLB_REMOTE_FLUSH,    /* cpu tried to flush others' tlbs */
+               NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
+-#endif /* CONFIG_SMP */
+               NR_TLB_LOCAL_FLUSH_ALL,
+               NR_TLB_LOCAL_FLUSH_ONE,
+ #endif /* CONFIG_DEBUG_TLBFLUSH */
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 51c6b9786c46..0e3dcd5a134d 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -33,6 +33,8 @@
+ #include <net/flow.h>
+ #include <net/flow_dissector.h>
+ 
++#define IPV4_MIN_MTU          68                      /* RFC 791 */
++
+ struct sock;
+ 
+ struct inet_skb_parm {
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index fba4fc46871d..caf35e062639 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1001,7 +1001,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff 
*skb);
+ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+                           struct rate_sample *rs);
+ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+-                struct skb_mstamp *now, struct rate_sample *rs);
++                bool is_sack_reneg, struct skb_mstamp *now, struct 
rate_sample *rs);
+ void tcp_rate_check_app_limited(struct sock *sk);
+ 
+ /* These functions determine how the current flow behaves in respect of SACK
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index e1436ca4aed0..802eb3361a0a 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1309,9 +1309,9 @@ static struct cpuhp_step cpuhp_bp_states[] = {
+        * before blk_mq_queue_reinit_notify() from notify_dead(),
+        * otherwise a RCU stall occurs.
+        */
+-      [CPUHP_TIMERS_DEAD] = {
++      [CPUHP_TIMERS_PREPARE] = {
+               .name                   = "timers:dead",
+-              .startup.single         = NULL,
++              .startup.single         = timers_prepare_cpu,
+               .teardown.single        = timers_dead_cpu,
+       },
+       /* Kicks the plugged cpu into life */
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 3bcb61b52f6c..dae1a45be504 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -663,6 +663,11 @@ static void tick_nohz_restart(struct tick_sched *ts, 
ktime_t now)
+               tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
+ }
+ 
++static inline bool local_timer_softirq_pending(void)
++{
++      return local_softirq_pending() & TIMER_SOFTIRQ;
++}
++
+ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+                                        ktime_t now, int cpu)
+ {
+@@ -679,8 +684,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct 
tick_sched *ts,
+       } while (read_seqretry(&jiffies_lock, seq));
+       ts->last_jiffies = basejiff;
+ 
+-      if (rcu_needs_cpu(basemono, &next_rcu) ||
+-          arch_needs_cpu() || irq_work_needs_cpu()) {
++      /*
++       * Keep the periodic tick, when RCU, architecture or irq_work
++       * requests it.
++       * Aside of that check whether the local timer softirq is
++       * pending. If so its a bad idea to call get_next_timer_interrupt()
++       * because there is an already expired timer, so it will request
++       * immeditate expiry, which rearms the hardware timer with a
++       * minimal delta which brings us back to this place
++       * immediately. Lather, rinse and repeat...
++       */
++      if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
++          irq_work_needs_cpu() || local_timer_softirq_pending()) {
+               next_tick = basemono + TICK_NSEC;
+       } else {
+               /*
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 7d670362891a..e872f7f05e8a 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -849,11 +849,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 
tflags, u32 cpu)
+       struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
+ 
+       /*
+-       * If the timer is deferrable and nohz is active then we need to use
+-       * the deferrable base.
++       * If the timer is deferrable and NO_HZ_COMMON is set then we need
++       * to use the deferrable base.
+        */
+-      if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+-          (tflags & TIMER_DEFERRABLE))
++      if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
+               base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
+       return base;
+ }
+@@ -863,11 +862,10 @@ static inline struct timer_base 
*get_timer_this_cpu_base(u32 tflags)
+       struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ 
+       /*
+-       * If the timer is deferrable and nohz is active then we need to use
+-       * the deferrable base.
++       * If the timer is deferrable and NO_HZ_COMMON is set then we need
++       * to use the deferrable base.
+        */
+-      if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+-          (tflags & TIMER_DEFERRABLE))
++      if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
+               base = this_cpu_ptr(&timer_bases[BASE_DEF]);
+       return base;
+ }
+@@ -1021,8 +1019,6 @@ __mod_timer(struct timer_list *timer, unsigned long 
expires, bool pending_only)
+       if (!ret && pending_only)
+               goto out_unlock;
+ 
+-      debug_activate(timer, expires);
+-
+       new_base = get_target_base(base, timer->flags);
+ 
+       if (base != new_base) {
+@@ -1046,6 +1042,8 @@ __mod_timer(struct timer_list *timer, unsigned long 
expires, bool pending_only)
+               }
+       }
+ 
++      debug_activate(timer, expires);
++
+       timer->expires = expires;
+       /*
+        * If 'idx' was calculated above and the base time did not advance
+@@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct 
softirq_action *h)
+       base->must_forward_clk = false;
+ 
+       __run_timers(base);
+-      if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
++      if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
+               __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+ }
+ 
+@@ -1853,6 +1851,21 @@ static void migrate_timer_list(struct timer_base 
*new_base, struct hlist_head *h
+       }
+ }
+ 
++int timers_prepare_cpu(unsigned int cpu)
++{
++      struct timer_base *base;
++      int b;
++
++      for (b = 0; b < NR_BASES; b++) {
++              base = per_cpu_ptr(&timer_bases[b], cpu);
++              base->clk = jiffies;
++              base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
++              base->is_idle = false;
++              base->must_forward_clk = true;
++      }
++      return 0;
++}
++
+ int timers_dead_cpu(unsigned int cpu)
+ {
+       struct timer_base *old_base;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index f5c016e8fc88..3e1d11f4fe44 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
+ /* Missed count stored at end */
+ #define RB_MISSED_STORED      (1 << 30)
+ 
++#define RB_MISSED_FLAGS               (RB_MISSED_EVENTS|RB_MISSED_STORED)
++
+ struct buffer_data_page {
+       u64              time_stamp;    /* page time stamp */
+       local_t          commit;        /* write committed index */
+@@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
+  */
+ size_t ring_buffer_page_len(void *page)
+ {
+-      return local_read(&((struct buffer_data_page *)page)->commit)
++      struct buffer_data_page *bpage = page;
++
++      return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
+               + BUF_PAGE_HDR_SIZE;
+ }
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 4214cd960b8e..15b02645ce8b 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6181,7 +6181,7 @@ tracing_buffers_splice_read(struct file *file, loff_t 
*ppos,
+               .spd_release    = buffer_spd_release,
+       };
+       struct buffer_ref *ref;
+-      int entries, size, i;
++      int entries, i;
+       ssize_t ret = 0;
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+@@ -6232,14 +6232,6 @@ tracing_buffers_splice_read(struct file *file, loff_t 
*ppos,
+                       break;
+               }
+ 
+-              /*
+-               * zero out any left over data, this is going to
+-               * user land.
+-               */
+-              size = ring_buffer_page_len(ref->page);
+-              if (size < PAGE_SIZE)
+-                      memset(ref->page + size, 0, PAGE_SIZE - size);
+-
+               page = virt_to_page(ref->page);
+ 
+               spd.pages[i] = page;
+@@ -6963,6 +6955,7 @@ allocate_trace_buffer(struct trace_array *tr, struct 
trace_buffer *buf, int size
+       buf->data = alloc_percpu(struct trace_array_cpu);
+       if (!buf->data) {
+               ring_buffer_free(buf->buffer);
++              buf->buffer = NULL;
+               return -ENOMEM;
+       }
+ 
+@@ -6986,7 +6979,9 @@ static int allocate_trace_buffers(struct trace_array 
*tr, int size)
+                                   allocate_snapshot ? size : 1);
+       if (WARN_ON(ret)) {
+               ring_buffer_free(tr->trace_buffer.buffer);
++              tr->trace_buffer.buffer = NULL;
+               free_percpu(tr->trace_buffer.data);
++              tr->trace_buffer.data = NULL;
+               return -ENOMEM;
+       }
+       tr->allocated_snapshot = allocate_snapshot;
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index 5d4006e589cb..4f831225d34f 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -1092,19 +1092,20 @@ static int br_dev_newlink(struct net *src_net, struct 
net_device *dev,
+       struct net_bridge *br = netdev_priv(dev);
+       int err;
+ 
++      err = register_netdevice(dev);
++      if (err)
++              return err;
++
+       if (tb[IFLA_ADDRESS]) {
+               spin_lock_bh(&br->lock);
+               br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+               spin_unlock_bh(&br->lock);
+       }
+ 
+-      err = register_netdevice(dev);
+-      if (err)
+-              return err;
+-
+       err = br_changelink(dev, tb, data);
+       if (err)
+-              unregister_netdevice(dev);
++              br_dev_delete(dev, NULL);
++
+       return err;
+ }
+ 
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 7001da910c6b..b7efe2f19f83 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -263,7 +263,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
+       spin_lock_irqsave(&net->nsid_lock, flags);
+       peer = idr_find(&net->netns_ids, id);
+       if (peer)
+-              get_net(peer);
++              peer = maybe_get_net(peer);
+       spin_unlock_irqrestore(&net->nsid_lock, flags);
+       rcu_read_unlock();
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index aec5605944d3..a64515583bc1 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3823,7 +3823,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
+       struct sock *sk = skb->sk;
+ 
+       if (!skb_may_tx_timestamp(sk, false))
+-              return;
++              goto err;
+ 
+       /* Take a reference to prevent skb_orphan() from freeing the socket,
+        * but only if the socket refcount is not zero.
+@@ -3832,7 +3832,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
+               *skb_hwtstamps(skb) = *hwtstamps;
+               __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+               sock_put(sk);
++              return;
+       }
++
++err:
++      kfree_skb(skb);
+ }
+ EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 062a67ca9a21..f08f984ebc56 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1380,7 +1380,7 @@ static void inetdev_changename(struct net_device *dev, 
struct in_device *in_dev)
+ 
+ static bool inetdev_valid_mtu(unsigned int mtu)
+ {
+-      return mtu >= 68;
++      return mtu >= IPV4_MIN_MTU;
+ }
+ 
+ static void inetdev_send_gratuitous_arp(struct net_device *dev,
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 968d8e165e3d..ffae472e250a 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1253,14 +1253,19 @@ static int __net_init ip_fib_net_init(struct net *net)
+ 
+ static void ip_fib_net_exit(struct net *net)
+ {
+-      unsigned int i;
++      int i;
+ 
+       rtnl_lock();
+ #ifdef CONFIG_IP_MULTIPLE_TABLES
+       RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
+       RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
+ #endif
+-      for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
++      /* Destroy the tables in reverse order to guarantee that the
++       * local table, ID 255, is destroyed before the main table, ID
++       * 254. This is necessary as the local table may contain
++       * references to data contained in the main table.
++       */
++      for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
+               struct hlist_head *head = &net->ipv4.fib_table_hash[i];
+               struct hlist_node *tmp;
+               struct fib_table *tb;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 08575e3bd135..7bff0c65046f 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -89,6 +89,7 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/times.h>
+ #include <linux/pkt_sched.h>
++#include <linux/byteorder/generic.h>
+ 
+ #include <net/net_namespace.h>
+ #include <net/arp.h>
+@@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int 
gdeleted, int sdeleted)
+       return scount;
+ }
+ 
++/* source address selection per RFC 3376 section 4.2.13 */
++static __be32 igmpv3_get_srcaddr(struct net_device *dev,
++                               const struct flowi4 *fl4)
++{
++      struct in_device *in_dev = __in_dev_get_rcu(dev);
++
++      if (!in_dev)
++              return htonl(INADDR_ANY);
++
++      for_ifa(in_dev) {
++              if (inet_ifa_match(fl4->saddr, ifa))
++                      return fl4->saddr;
++      } endfor_ifa(in_dev);
++
++      return htonl(INADDR_ANY);
++}
++
+ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int 
mtu)
+ {
+       struct sk_buff *skb;
+@@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device 
*dev, unsigned int mtu)
+       pip->frag_off = htons(IP_DF);
+       pip->ttl      = 1;
+       pip->daddr    = fl4.daddr;
+-      pip->saddr    = fl4.saddr;
++      pip->saddr    = igmpv3_get_srcaddr(dev, &fl4);
+       pip->protocol = IPPROTO_IGMP;
+       pip->tot_len  = 0;      /* filled in later */
+       ip_select_ident(net, skb, NULL);
+@@ -404,16 +422,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, 
int gdel, int sdel)
+ }
+ 
+ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
+-      int type, struct igmpv3_grec **ppgr)
++      int type, struct igmpv3_grec **ppgr, unsigned int mtu)
+ {
+       struct net_device *dev = pmc->interface->dev;
+       struct igmpv3_report *pih;
+       struct igmpv3_grec *pgr;
+ 
+-      if (!skb)
+-              skb = igmpv3_newpack(dev, dev->mtu);
+-      if (!skb)
+-              return NULL;
++      if (!skb) {
++              skb = igmpv3_newpack(dev, mtu);
++              if (!skb)
++                      return NULL;
++      }
+       pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
+       pgr->grec_type = type;
+       pgr->grec_auxwords = 0;
+@@ -436,12 +455,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, 
struct ip_mc_list *pmc,
+       struct igmpv3_grec *pgr = NULL;
+       struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
+       int scount, stotal, first, isquery, truncate;
++      unsigned int mtu;
+ 
+       if (pmc->multiaddr == IGMP_ALL_HOSTS)
+               return skb;
+       if (ipv4_is_local_multicast(pmc->multiaddr) && 
!net->ipv4.sysctl_igmp_llm_reports)
+               return skb;
+ 
++      mtu = READ_ONCE(dev->mtu);
++      if (mtu < IPV4_MIN_MTU)
++              return skb;
++
+       isquery = type == IGMPV3_MODE_IS_INCLUDE ||
+                 type == IGMPV3_MODE_IS_EXCLUDE;
+       truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
+@@ -462,7 +486,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, 
struct ip_mc_list *pmc,
+                   AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
+                       if (skb)
+                               igmpv3_sendpack(skb);
+-                      skb = igmpv3_newpack(dev, dev->mtu);
++                      skb = igmpv3_newpack(dev, mtu);
+               }
+       }
+       first = 1;
+@@ -498,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, 
struct ip_mc_list *pmc,
+                               pgr->grec_nsrcs = htons(scount);
+                       if (skb)
+                               igmpv3_sendpack(skb);
+-                      skb = igmpv3_newpack(dev, dev->mtu);
++                      skb = igmpv3_newpack(dev, mtu);
+                       first = 1;
+                       scount = 0;
+               }
+               if (first) {
+-                      skb = add_grhead(skb, pmc, type, &pgr);
++                      skb = add_grhead(skb, pmc, type, &pgr, mtu);
+                       first = 0;
+               }
+               if (!skb)
+@@ -538,7 +562,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, 
struct ip_mc_list *pmc,
+                               igmpv3_sendpack(skb);
+                               skb = NULL; /* add_grhead will get a new one */
+                       }
+-                      skb = add_grhead(skb, pmc, type, &pgr);
++                      skb = add_grhead(skb, pmc, type, &pgr, mtu);
+               }
+       }
+       if (pgr)
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index bd7f1836bb70..96536a0d6e2d 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -346,8 +346,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
+       dev->needed_headroom = t_hlen + hlen;
+       mtu -= (dev->hard_header_len + t_hlen);
+ 
+-      if (mtu < 68)
+-              mtu = 68;
++      if (mtu < IPV4_MIN_MTU)
++              mtu = IPV4_MIN_MTU;
+ 
+       return mtu;
+ }
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 9879b73d5565..59d8770055ed 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -502,11 +502,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr 
*msg, size_t len)
+       int err;
+       struct ip_options_data opt_copy;
+       struct raw_frag_vec rfv;
++      int hdrincl;
+ 
+       err = -EMSGSIZE;
+       if (len > 0xFFFF)
+               goto out;
+ 
++      /* hdrincl should be READ_ONCE(inet->hdrincl)
++       * but READ_ONCE() doesn't work with bit fields
++       */
++      hdrincl = inet->hdrincl;
+       /*
+        *      Check the flags.
+        */
+@@ -582,7 +587,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr 
*msg, size_t len)
+               /* Linux does not mangle headers on raw sockets,
+                * so that IP options + IP_HDRINCL is non-sense.
+                */
+-              if (inet->hdrincl)
++              if (hdrincl)
+                       goto done;
+               if (ipc.opt->opt.srr) {
+                       if (!daddr)
+@@ -604,12 +609,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr 
*msg, size_t len)
+ 
+       flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
+                          RT_SCOPE_UNIVERSE,
+-                         inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
++                         hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+                          inet_sk_flowi_flags(sk) |
+-                          (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
++                          (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
+                          daddr, saddr, 0, 0);
+ 
+-      if (!inet->hdrincl) {
++      if (!hdrincl) {
+               rfv.msg = msg;
+               rfv.hlen = 0;
+ 
+@@ -634,7 +639,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr 
*msg, size_t len)
+               goto do_confirm;
+ back_from_confirm:
+ 
+-      if (inet->hdrincl)
++      if (hdrincl)
+               err = raw_send_hdrinc(sk, &fl4, msg, len,
+                                     &rt, msg->msg_flags, &ipc.sockc);
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index dd33c785ce16..05d2bde00864 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2297,6 +2297,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+       tp->snd_cwnd_cnt = 0;
+       tp->window_clamp = 0;
+       tcp_set_ca_state(sk, TCP_CA_Open);
++      tp->is_sack_reneg = 0;
+       tcp_clear_retrans(tp);
+       inet_csk_delack_init(sk);
+       /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
+index cb8db347c680..97f9cac98348 100644
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -81,7 +81,8 @@ struct bbr {
+       u32     lt_last_lost;        /* LT intvl start: tp->lost */
+       u32     pacing_gain:10, /* current gain for setting pacing rate */
+               cwnd_gain:10,   /* current gain for setting cwnd */
+-              full_bw_cnt:3,  /* number of rounds without large bw gains */
++              full_bw_reached:1,   /* reached full bw in Startup? */
++              full_bw_cnt:2,  /* number of rounds without large bw gains */
+               cycle_idx:3,    /* current index in pacing_gain cycle array */
+               has_seen_rtt:1, /* have we seen an RTT sample yet? */
+               unused_b:5;
+@@ -151,7 +152,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
+ {
+       const struct bbr *bbr = inet_csk_ca(sk);
+ 
+-      return bbr->full_bw_cnt >= bbr_full_bw_cnt;
++      return bbr->full_bw_reached;
+ }
+ 
+ /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
+@@ -688,6 +689,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
+               return;
+       }
+       ++bbr->full_bw_cnt;
++      bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
+ }
+ 
+ /* If pipe is probably full, drain the queue and then enter steady-state. */
+@@ -821,6 +823,7 @@ static void bbr_init(struct sock *sk)
+       bbr->restore_cwnd = 0;
+       bbr->round_start = 0;
+       bbr->idle_restart = 0;
++      bbr->full_bw_reached = 0;
+       bbr->full_bw = 0;
+       bbr->full_bw_cnt = 0;
+       bbr->cycle_mstamp.v64 = 0;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 05255a286888..2f107e46355c 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1966,6 +1966,8 @@ void tcp_enter_loss(struct sock *sk)
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
+               tp->sacked_out = 0;
+               tp->fackets_out = 0;
++              /* Mark SACK reneging until we recover from this loss event. */
++              tp->is_sack_reneg = 1;
+       }
+       tcp_clear_all_retrans_hints(tp);
+ 
+@@ -2463,6 +2465,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
+               return true;
+       }
+       tcp_set_ca_state(sk, TCP_CA_Open);
++      tp->is_sack_reneg = 0;
+       return false;
+ }
+ 
+@@ -2494,8 +2497,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool 
frto_undo)
+                       NET_INC_STATS(sock_net(sk),
+                                       LINUX_MIB_TCPSPURIOUSRTOS);
+               inet_csk(sk)->icsk_retransmits = 0;
+-              if (frto_undo || tcp_is_sack(tp))
++              if (frto_undo || tcp_is_sack(tp)) {
+                       tcp_set_ca_state(sk, TCP_CA_Open);
++                      tp->is_sack_reneg = 0;
++              }
+               return true;
+       }
+       return false;
+@@ -3589,6 +3594,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff 
*skb, int flag)
+       struct tcp_sacktag_state sack_state;
+       struct rate_sample rs = { .prior_delivered = 0 };
+       u32 prior_snd_una = tp->snd_una;
++      bool is_sack_reneg = tp->is_sack_reneg;
+       u32 ack_seq = TCP_SKB_CB(skb)->seq;
+       u32 ack = TCP_SKB_CB(skb)->ack_seq;
+       bool is_dupack = false;
+@@ -3711,7 +3717,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff 
*skb, int flag)
+               tcp_schedule_loss_probe(sk);
+       delivered = tp->delivered - delivered;  /* freshly ACKed or SACKed */
+       lost = tp->lost - lost;                 /* freshly marked lost */
+-      tcp_rate_gen(sk, delivered, lost, &now, &rs);
++      tcp_rate_gen(sk, delivered, lost, is_sack_reneg, &now, &rs);
+       tcp_cong_control(sk, ack, delivered, flag, &rs);
+       tcp_xmit_recovery(sk, rexmit);
+       return 1;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index d577ec07a0d8..b3960738464e 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -828,7 +828,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, 
struct sk_buff *skb,
+                       tcp_time_stamp,
+                       req->ts_recent,
+                       0,
+-                      tcp_md5_do_lookup(sk, (union tcp_md5_addr 
*)&ip_hdr(skb)->daddr,
++                      tcp_md5_do_lookup(sk, (union tcp_md5_addr 
*)&ip_hdr(skb)->saddr,
+                                         AF_INET),
+                       inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 
0,
+                       ip_hdr(skb)->tos);
+diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
+index 9be1581a5a08..18309f58ab8d 100644
+--- a/net/ipv4/tcp_rate.c
++++ b/net/ipv4/tcp_rate.c
+@@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct 
sk_buff *skb,
+ 
+ /* Update the connection delivery information and generate a rate sample. */
+ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+-                struct skb_mstamp *now, struct rate_sample *rs)
++                bool is_sack_reneg, struct skb_mstamp *now, struct 
rate_sample *rs)
+ {
+       struct tcp_sock *tp = tcp_sk(sk);
+       u32 snd_us, ack_us;
+@@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 
lost,
+ 
+       rs->acked_sacked = delivered;   /* freshly ACKed or SACKed */
+       rs->losses = lost;              /* freshly marked lost */
+-      /* Return an invalid sample if no timing information is available. */
+-      if (!rs->prior_mstamp.v64) {
++      /* Return an invalid sample if no timing information is available or
++       * in recovery from loss with SACK reneging. Rate samples taken during
++       * a SACK reneging event may overestimate bw by including packets that
++       * were SACKed before the reneg.
++       */
++      if (!rs->prior_mstamp.v64 || is_sack_reneg) {
+               rs->delivered = -1;
+               rs->interval_us = -1;
+               return;
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 8285a1c108c9..5cad76f87536 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -209,7 +209,6 @@ static int inet6_create(struct net *net, struct socket 
*sock, int protocol,
+       np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
+       np->mc_loop     = 1;
+       np->pmtudisc    = IPV6_PMTUDISC_WANT;
+-      np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
+       sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
+ 
+       /* Init the ipv4 part of the socket since we can have sockets
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 6e01c9a8dfd3..506efba33a89 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -156,6 +156,14 @@ int ip6_output(struct net *net, struct sock *sk, struct 
sk_buff *skb)
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+ }
+ 
++static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
++{
++      if (!np->autoflowlabel_set)
++              return ip6_default_np_autolabel(net);
++      else
++              return np->autoflowlabel;
++}
++
+ /*
+  * xmit an sk_buff (used by TCP, SCTP and DCCP)
+  * Note : socket lock is not held for SYNACK packets, but might be modified
+@@ -219,7 +227,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, 
struct flowi6 *fl6,
+               hlimit = ip6_dst_hoplimit(dst);
+ 
+       ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
+-                                                   np->autoflowlabel, fl6));
++                              ip6_autoflowlabel(net, np), fl6));
+ 
+       hdr->payload_len = htons(seg_len);
+       hdr->nexthdr = proto;
+@@ -1691,7 +1699,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
+ 
+       ip6_flow_hdr(hdr, v6_cork->tclass,
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel,
+-                                      np->autoflowlabel, fl6));
++                                      ip6_autoflowlabel(net, np), fl6));
+       hdr->hop_limit = v6_cork->hop_limit;
+       hdr->nexthdr = proto;
+       hdr->saddr = fl6->saddr;
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 12b2fd512f32..11d22d642488 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -911,7 +911,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
+               if (t->parms.collect_md) {
+                       tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
+                       if (!tun_dst)
+-                              return 0;
++                              goto drop;
+               }
+               ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
+                                   log_ecn_error);
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 38bee173dc2b..6e3871c7f8f7 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -874,6 +874,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, 
int optname,
+               break;
+       case IPV6_AUTOFLOWLABEL:
+               np->autoflowlabel = valbool;
++              np->autoflowlabel_set = 1;
+               retv = 0;
+               break;
+       }
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 1bdc703cb966..ca8fac6e5a09 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, 
int gdel, int sdel)
+ }
+ 
+ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+-      int type, struct mld2_grec **ppgr)
++      int type, struct mld2_grec **ppgr, unsigned int mtu)
+ {
+-      struct net_device *dev = pmc->idev->dev;
+       struct mld2_report *pmr;
+       struct mld2_grec *pgr;
+ 
+-      if (!skb)
+-              skb = mld_newpack(pmc->idev, dev->mtu);
+-      if (!skb)
+-              return NULL;
++      if (!skb) {
++              skb = mld_newpack(pmc->idev, mtu);
++              if (!skb)
++                      return NULL;
++      }
+       pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
+       pgr->grec_type = type;
+       pgr->grec_auxwords = 0;
+@@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, 
struct ifmcaddr6 *pmc,
+       struct mld2_grec *pgr = NULL;
+       struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
+       int scount, stotal, first, isquery, truncate;
++      unsigned int mtu;
+ 
+       if (pmc->mca_flags & MAF_NOREPORT)
+               return skb;
+ 
++      mtu = READ_ONCE(dev->mtu);
++      if (mtu < IPV6_MIN_MTU)
++              return skb;
++
+       isquery = type == MLD2_MODE_IS_INCLUDE ||
+                 type == MLD2_MODE_IS_EXCLUDE;
+       truncate = type == MLD2_MODE_IS_EXCLUDE ||
+@@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, 
struct ifmcaddr6 *pmc,
+                   AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
+                       if (skb)
+                               mld_sendpack(skb);
+-                      skb = mld_newpack(idev, dev->mtu);
++                      skb = mld_newpack(idev, mtu);
+               }
+       }
+       first = 1;
+@@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, 
struct ifmcaddr6 *pmc,
+                               pgr->grec_nsrcs = htons(scount);
+                       if (skb)
+                               mld_sendpack(skb);
+-                      skb = mld_newpack(idev, dev->mtu);
++                      skb = mld_newpack(idev, mtu);
+                       first = 1;
+                       scount = 0;
+               }
+               if (first) {
+-                      skb = add_grhead(skb, pmc, type, &pgr);
++                      skb = add_grhead(skb, pmc, type, &pgr, mtu);
+                       first = 0;
+               }
+               if (!skb)
+@@ -1814,7 +1819,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, 
struct ifmcaddr6 *pmc,
+                               mld_sendpack(skb);
+                               skb = NULL; /* add_grhead will get a new one */
+                       }
+-                      skb = add_grhead(skb, pmc, type, &pgr);
++                      skb = add_grhead(skb, pmc, type, &pgr, mtu);
+               }
+       }
+       if (pgr)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 7ac2365aa6fb..eb624547382f 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -962,7 +962,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, 
struct sk_buff *skb,
+                       tcp_rsk(req)->rcv_nxt,
+                       req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+                       tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
+-                      tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
++                      tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
+                       0, 0);
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 1ff497bd9c20..e1c123d4cdda 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -261,6 +261,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+       struct sock *sk = skb->sk;
+       int ret = -ENOMEM;
+ 
++      if (!net_eq(dev_net(dev), sock_net(sk)))
++              return 0;
++
+       dev_hold(dev);
+ 
+       if (is_vmalloc_addr(skb->head))
+diff --git a/net/rds/send.c b/net/rds/send.c
+index ad247dc71ebb..ef53d164e146 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -1006,6 +1006,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t 
*rdma_bytes)
+                       continue;
+ 
+               if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
++                      if (cmsg->cmsg_len <
++                          CMSG_LEN(sizeof(struct rds_rdma_args)))
++                              return -EINVAL;
+                       args = CMSG_DATA(cmsg);
+                       *rdma_bytes += args->remote_vec.bytes;
+               }
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index c2ab864da50d..7181ce6c62bf 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4246,7 +4246,7 @@ static int sctp_init_sock(struct sock *sk)
+       SCTP_DBG_OBJCNT_INC(sock);
+ 
+       local_bh_disable();
+-      percpu_counter_inc(&sctp_sockets_allocated);
++      sk_sockets_allocated_inc(sk);
+       sock_prot_inuse_add(net, sk->sk_prot, 1);
+ 
+       /* Nothing can fail after this block, otherwise
+@@ -4290,7 +4290,7 @@ static void sctp_destroy_sock(struct sock *sk)
+       }
+       sctp_endpoint_free(sp->ep);
+       local_bh_disable();
+-      percpu_counter_dec(&sctp_sockets_allocated);
++      sk_sockets_allocated_dec(sk);
+       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+       local_bh_enable();
+ }
+diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
+index c9af022676c2..47c3e97c3136 100644
+--- a/sound/hda/hdac_i915.c
++++ b/sound/hda/hdac_i915.c
+@@ -319,7 +319,7 @@ static int hdac_component_master_match(struct device *dev, 
void *data)
+  */
+ int snd_hdac_i915_register_notifier(const struct 
i915_audio_component_audio_ops *aops)
+ {
+-      if (WARN_ON(!hdac_acomp))
++      if (!hdac_acomp)
+               return -ENODEV;
+ 
+       hdac_acomp->audio_ops = aops;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index ba40596b9d92..4ef3b0067876 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5971,6 +5971,11 @@ static const struct snd_hda_pin_quirk 
alc269_pin_fixup_tbl[] = {
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", 
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x1b, 0x01011020},
+               {0x21, 0x02211010}),
++      SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", 
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++              {0x12, 0x90a60130},
++              {0x14, 0x90170110},
++              {0x1b, 0x01011020},
++              {0x21, 0x0221101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", 
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
+index c69e97654fc6..f88632426c0a 100644
+--- a/sound/soc/codecs/da7218.c
++++ b/sound/soc/codecs/da7218.c
+@@ -2519,7 +2519,7 @@ static struct da7218_pdata *da7218_of_to_pdata(struct 
snd_soc_codec *codec)
+       }
+ 
+       if (da7218->dev_id == DA7218_DEV_ID) {
+-              hpldet_np = of_find_node_by_name(np, "da7218_hpldet");
++              hpldet_np = of_get_child_by_name(np, "da7218_hpldet");
+               if (!hpldet_np)
+                       return pdata;
+ 
+diff --git a/sound/soc/codecs/tlv320aic31xx.h 
b/sound/soc/codecs/tlv320aic31xx.h
+index 5acd5b69fb83..f9b6c5a81b47 100644
+--- a/sound/soc/codecs/tlv320aic31xx.h
++++ b/sound/soc/codecs/tlv320aic31xx.h
+@@ -115,7 +115,7 @@ struct aic31xx_pdata {
+ /* INT2 interrupt control */
+ #define AIC31XX_INT2CTRL      AIC31XX_REG(0, 49)
+ /* GPIO1 control */
+-#define AIC31XX_GPIO1         AIC31XX_REG(0, 50)
++#define AIC31XX_GPIO1         AIC31XX_REG(0, 51)
+ 
+ #define AIC31XX_DACPRB                AIC31XX_REG(0, 60)
+ /* ADC Instruction Set Register */
+diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
+index a2104d68169d..26fd6a664b9b 100644
+--- a/sound/soc/codecs/twl4030.c
++++ b/sound/soc/codecs/twl4030.c
+@@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct 
snd_soc_codec *codec)
+       struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
+       struct device_node *twl4030_codec_node = NULL;
+ 
+-      twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node,
++      twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
+                                                 "codec");
+ 
+       if (!pdata && twl4030_codec_node) {
+@@ -241,9 +241,11 @@ static struct twl4030_codec_data 
*twl4030_get_pdata(struct snd_soc_codec *codec)
+                                    GFP_KERNEL);
+               if (!pdata) {
+                       dev_err(codec->dev, "Can not allocate memory\n");
++                      of_node_put(twl4030_codec_node);
+                       return NULL;
+               }
+               twl4030_setup_pdata_of(pdata, twl4030_codec_node);
++              of_node_put(twl4030_codec_node);
+       }
+ 
+       return pdata;
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 757af795cebd..c03c9da076c2 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1465,7 +1465,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
+                le64_to_cpu(footer->timestamp));
+ 
+       while (pos < firmware->size &&
+-             pos - firmware->size > sizeof(*region)) {
++             sizeof(*region) < firmware->size - pos) {
+               region = (void *)&(firmware->data[pos]);
+               region_name = "Unknown";
+               reg = 0;
+@@ -1526,8 +1526,8 @@ static int wm_adsp_load(struct wm_adsp *dsp)
+                        regions, le32_to_cpu(region->len), offset,
+                        region_name);
+ 
+-              if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
+-                  firmware->size) {
++              if (le32_to_cpu(region->len) >
++                  firmware->size - pos - sizeof(*region)) {
+                       adsp_err(dsp,
+                                "%s.%d: %s region len %d bytes exceeds file 
length %zu\n",
+                                file, regions, region_name,
+@@ -1992,7 +1992,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
+ 
+       blocks = 0;
+       while (pos < firmware->size &&
+-             pos - firmware->size > sizeof(*blk)) {
++             sizeof(*blk) < firmware->size - pos) {
+               blk = (void *)(&firmware->data[pos]);
+ 
+               type = le16_to_cpu(blk->type);
+@@ -2066,8 +2066,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
+               }
+ 
+               if (reg) {
+-                      if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
+-                          firmware->size) {
++                      if (le32_to_cpu(blk->len) >
++                          firmware->size - pos - sizeof(*blk)) {
+                               adsp_err(dsp,
+                                        "%s.%d: %s region len %d bytes exceeds 
file length %zu\n",
+                                        file, blocks, region_name,
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index fde08660b63b..1c03490e1182 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -1467,12 +1467,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
+                               sizeof(fsl_ssi_ac97_dai));
+ 
+               fsl_ac97_data = ssi_private;
+-
+-              ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+-              if (ret) {
+-                      dev_err(&pdev->dev, "could not set AC'97 ops\n");
+-                      return ret;
+-              }
+       } else {
+               /* Initialize this copy of the CPU DAI driver structure */
+               memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
+@@ -1583,6 +1577,14 @@ static int fsl_ssi_probe(struct platform_device *pdev)
+                       return ret;
+       }
+ 
++      if (fsl_ssi_is_ac97(ssi_private)) {
++              ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
++              if (ret) {
++                      dev_err(&pdev->dev, "could not set AC'97 ops\n");
++                      goto error_ac97_ops;
++              }
++      }
++
+       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
+                                             &ssi_private->cpu_dai_drv, 1);
+       if (ret) {
+@@ -1666,6 +1668,10 @@ static int fsl_ssi_probe(struct platform_device *pdev)
+       fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
+ 
+ error_asoc_register:
++      if (fsl_ssi_is_ac97(ssi_private))
++              snd_soc_set_ac97_ops(NULL);
++
++error_ac97_ops:
+       if (ssi_private->soc->imx)
+               fsl_ssi_imx_clean(pdev, ssi_private);
+ 
+diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt 
b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
+index 767be7c76034..1754e094bc28 100644
+--- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
++++ b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
+@@ -896,7 +896,7 @@ EndTable
+ 
+ GrpTable: Grp3_1
+ 0: TEST Eb,Ib
+-1:
++1: TEST Eb,Ib
+ 2: NOT Eb
+ 3: NEG Eb
+ 4: MUL AL,Eb
+diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
+index 2b3d6d235015..3d7b42e77299 100644
+--- a/tools/usb/usbip/src/utils.c
++++ b/tools/usb/usbip/src/utils.c
+@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
+       char command[SYSFS_BUS_ID_SIZE + 4];
+       char match_busid_attr_path[SYSFS_PATH_MAX];
+       int rc;
++      int cmd_size;
+ 
+       snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
+                "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
+@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
+                attr_name);
+ 
+       if (add)
+-              snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
++              cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
++                                  busid);
+       else
+-              snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
++              cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
++                                  busid);
+ 
+       rc = write_sysfs_attribute(match_busid_attr_path, command,
+-                                 sizeof(command));
++                                 cmd_size);
+       if (rc < 0) {
+               dbg("failed to write match_busid: %s", strerror(errno));
+               return -1;

Reply via email to