commit:     43758b12bec40d0ae26d022ef93a79de156478b9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 17 16:18:20 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 17 16:18:20 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=43758b12

Linux patch 5.10.8

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1007_linux-5.10.8.patch | 3472 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3476 insertions(+)

diff --git a/0000_README b/0000_README
index d4ad009..b0f1ce8 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-5.10.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.7
 
+Patch:  1007_linux-5.10.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-5.10.8.patch b/1007_linux-5.10.8.patch
new file mode 100644
index 0000000..28a8032
--- /dev/null
+++ b/1007_linux-5.10.8.patch
@@ -0,0 +1,3472 @@
+diff --git a/Makefile b/Makefile
+index 9b6c90eed5e9c..4ee137b5d2416 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index ddd4641446bdd..69fe7133c765d 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -1053,6 +1053,12 @@ config ARCH_WANT_LD_ORPHAN_WARN
+         by the linker, since the locations of such sections can change 
between linker
+         versions.
+ 
++config ARCH_SPLIT_ARG64
++      bool
++      help
++         If a 32-bit architecture requires 64-bit arguments to be split into
++         pairs of 32-bit arguments, select this option.
++
+ source "kernel/gcov/Kconfig"
+ 
+ source "scripts/gcc-plugins/Kconfig"
+diff --git a/arch/arm/mach-omap2/omap_device.c 
b/arch/arm/mach-omap2/omap_device.c
+index fc7bb2ca16727..64b23b0cd23c7 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -230,10 +230,12 @@ static int _omap_device_notifier_call(struct 
notifier_block *nb,
+               break;
+       case BUS_NOTIFY_BIND_DRIVER:
+               od = to_omap_device(pdev);
+-              if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+-                  pm_runtime_status_suspended(dev)) {
++              if (od) {
+                       od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+-                      pm_runtime_set_active(dev);
++                      if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
++                          pm_runtime_status_suspended(dev)) {
++                              pm_runtime_set_active(dev);
++                      }
+               }
+               break;
+       case BUS_NOTIFY_ADD_DEVICE:
+diff --git a/arch/arm64/include/asm/processor.h 
b/arch/arm64/include/asm/processor.h
+index fce8cbecd6bc7..a884d77739895 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -96,7 +96,8 @@
+ #endif /* CONFIG_ARM64_FORCE_52BIT */
+ 
+ extern phys_addr_t arm64_dma_phys_limit;
+-#define ARCH_LOW_ADDRESS_LIMIT        (arm64_dma_phys_limit - 1)
++extern phys_addr_t arm64_dma32_phys_limit;
++#define ARCH_LOW_ADDRESS_LIMIT        ((arm64_dma_phys_limit ? : 
arm64_dma32_phys_limit) - 1)
+ 
+ struct debug_info {
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 6f36c4f62f694..0a52e076153bb 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2552,7 +2552,7 @@ static void verify_hyp_capabilities(void)
+       int parange, ipa_max;
+       unsigned int safe_vmid_bits, vmid_bits;
+ 
+-      if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
++      if (!IS_ENABLED(CONFIG_KVM))
+               return;
+ 
+       safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 2b28bf1a53266..b246a4acba416 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -663,6 +663,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const 
struct sys_reg_desc *r)
+ {
+       u64 pmcr, val;
+ 
++      /* No PMU available, PMCR_EL0 may UNDEF... */
++      if (!kvm_arm_support_pmu_v3())
++              return;
++
+       pmcr = read_sysreg(pmcr_el0);
+       /*
+        * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 095540667f0fd..00576a960f11f 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -60,7 +60,7 @@ EXPORT_SYMBOL(memstart_addr);
+  * bit addressable memory area.
+  */
+ phys_addr_t arm64_dma_phys_limit __ro_after_init;
+-static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
++phys_addr_t arm64_dma32_phys_limit __ro_after_init;
+ 
+ #ifdef CONFIG_KEXEC_CORE
+ /*
+diff --git a/arch/powerpc/kernel/head_book3s_32.S 
b/arch/powerpc/kernel/head_book3s_32.S
+index a0dda2a1f2df0..d66da35f2e8d3 100644
+--- a/arch/powerpc/kernel/head_book3s_32.S
++++ b/arch/powerpc/kernel/head_book3s_32.S
+@@ -262,10 +262,19 @@ __secondary_hold_acknowledge:
+ MachineCheck:
+       EXCEPTION_PROLOG_0
+ #ifdef CONFIG_PPC_CHRP
++#ifdef CONFIG_VMAP_STACK
++      mr      r11, r1
++      mfspr   r1, SPRN_SPRG_THREAD
++      lwz     r1, RTAS_SP(r1)
++      cmpwi   cr1, r1, 0
++      bne     cr1, 7f
++      mr      r1, r11
++#else
+       mfspr   r11, SPRN_SPRG_THREAD
+       lwz     r11, RTAS_SP(r11)
+       cmpwi   cr1, r11, 0
+       bne     cr1, 7f
++#endif
+ #endif /* CONFIG_PPC_CHRP */
+       EXCEPTION_PROLOG_1 for_rtas=1
+ 7:    EXCEPTION_PROLOG_2
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index fbf26e0f7a6a9..3a5ecb1039bfb 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -18,6 +18,7 @@ config X86_32
+       select MODULES_USE_ELF_REL
+       select OLD_SIGACTION
+       select GENERIC_VDSO_32
++      select ARCH_SPLIT_ARG64
+ 
+ config X86_64
+       def_bool y
+diff --git a/block/genhd.c b/block/genhd.c
+index 9387f050c248a..ec6264e2ed671 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -256,14 +256,17 @@ struct hd_struct *disk_part_iter_next(struct 
disk_part_iter *piter)
+               part = rcu_dereference(ptbl->part[piter->idx]);
+               if (!part)
+                       continue;
++              get_device(part_to_dev(part));
++              piter->part = part;
+               if (!part_nr_sects_read(part) &&
+                   !(piter->flags & DISK_PITER_INCL_EMPTY) &&
+                   !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
+-                    piter->idx == 0))
++                    piter->idx == 0)) {
++                      put_device(part_to_dev(part));
++                      piter->part = NULL;
+                       continue;
++              }
+ 
+-              get_device(part_to_dev(part));
+-              piter->part = part;
+               piter->idx += inc;
+               break;
+       }
+diff --git a/drivers/base/regmap/regmap-debugfs.c 
b/drivers/base/regmap/regmap-debugfs.c
+index 8dfac7f3ed7aa..ff2ee87987c7e 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
+               devname = dev_name(map->dev);
+ 
+       if (name) {
+-              map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
++              if (!map->debugfs_name) {
++                      map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+                                             devname, name);
++                      if (!map->debugfs_name)
++                              return;
++              }
+               name = map->debugfs_name;
+       } else {
+               name = devname;
+@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
+ 
+       if (!strcmp(name, "dummy")) {
+               kfree(map->debugfs_name);
+-
+               map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
+                                               dummy_index);
++              if (!map->debugfs_name)
++                              return;
+               name = map->debugfs_name;
+               dummy_index++;
+       }
+diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
+index ecceaaa1a66ff..f40ebe9f50474 100644
+--- a/drivers/block/Kconfig
++++ b/drivers/block/Kconfig
+@@ -451,6 +451,7 @@ config BLK_DEV_RBD
+ config BLK_DEV_RSXX
+       tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
+       depends on PCI
++      select CRC32
+       help
+         Device driver for IBM's high speed PCIe SSD
+         storage device: Flash Adapter 900GB Full Height.
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index 7af1b60582fe5..ba334fe7626db 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1671,7 +1671,8 @@ static void rnbd_destroy_sessions(void)
+        */
+ 
+       list_for_each_entry_safe(sess, sn, &sess_list, list) {
+-              WARN_ON(!rnbd_clt_get_sess(sess));
++              if (!rnbd_clt_get_sess(sess))
++                      continue;
+               close_rtrs(sess);
+               list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
+                       /*
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index 0acc9e241cd7d..b9ccb6a3dad98 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data 
*data)
+ 
+ /* Take a frequency, and issue the fid/vid transition command */
+ static int transition_frequency_fidvid(struct powernow_k8_data *data,
+-              unsigned int index)
++              unsigned int index,
++              struct cpufreq_policy *policy)
+ {
+-      struct cpufreq_policy *policy;
+       u32 fid = 0;
+       u32 vid = 0;
+       int res;
+@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct 
powernow_k8_data *data,
+       freqs.old = find_khz_freq_from_fid(data->currfid);
+       freqs.new = find_khz_freq_from_fid(fid);
+ 
+-      policy = cpufreq_cpu_get(smp_processor_id());
+-      cpufreq_cpu_put(policy);
+-
+       cpufreq_freq_transition_begin(policy, &freqs);
+       res = transition_fid_vid(data, fid, vid);
+       cpufreq_freq_transition_end(policy, &freqs, res);
+@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
+ 
+       powernow_k8_acpi_pst_values(data, newstate);
+ 
+-      ret = transition_frequency_fidvid(data, newstate);
++      ret = transition_frequency_fidvid(data, newstate, pol);
+ 
+       if (ret) {
+               pr_err("transition frequency failed\n");
+diff --git a/drivers/dma/dw-edma/dw-edma-core.c 
b/drivers/dma/dw-edma/dw-edma-core.c
+index b971505b87152..08d71dafa0015 100644
+--- a/drivers/dma/dw-edma/dw-edma-core.c
++++ b/drivers/dma/dw-edma/dw-edma-core.c
+@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct 
dw_edma_desc *desc)
+ 
+       if (desc->chunk) {
+               /* Create and add new element into the linked list */
+-              desc->chunks_alloc++;
+-              list_add_tail(&chunk->list, &desc->chunk->list);
+               if (!dw_edma_alloc_burst(chunk)) {
+                       kfree(chunk);
+                       return NULL;
+               }
++              desc->chunks_alloc++;
++              list_add_tail(&chunk->list, &desc->chunk->list);
+       } else {
+               /* List head */
+               chunk->burst = NULL;
+diff --git a/drivers/dma/mediatek/mtk-hsdma.c 
b/drivers/dma/mediatek/mtk-hsdma.c
+index f133ae8dece16..6ad8afbb95f2b 100644
+--- a/drivers/dma/mediatek/mtk-hsdma.c
++++ b/drivers/dma/mediatek/mtk-hsdma.c
+@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
+       return 0;
+ 
+ err_free:
++      mtk_hsdma_hw_deinit(hsdma);
+       of_dma_controller_free(pdev->dev.of_node);
+ err_unregister:
+       dma_async_device_unregister(dd);
+diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
+index 85a597228fb04..748b260bbc976 100644
+--- a/drivers/dma/milbeaut-xdmac.c
++++ b/drivers/dma/milbeaut-xdmac.c
+@@ -351,7 +351,7 @@ static int milbeaut_xdmac_probe(struct platform_device 
*pdev)
+ 
+       ret = dma_async_device_register(ddev);
+       if (ret)
+-              return ret;
++              goto disable_xdmac;
+ 
+       ret = of_dma_controller_register(dev->of_node,
+                                        of_dma_simple_xlate, mdev);
+@@ -364,6 +364,8 @@ static int milbeaut_xdmac_probe(struct platform_device 
*pdev)
+ 
+ unregister_dmac:
+       dma_async_device_unregister(ddev);
++disable_xdmac:
++      disable_xdmac(mdev);
+       return ret;
+ }
+ 
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 22faea653ea82..79777550a6ffc 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct 
xilinx_dma_device *xdev,
+               has_dre = false;
+ 
+       if (!has_dre)
+-              xdev->common.copy_align = fls(width - 1);
++              xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 
1);
+ 
+       if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
+           of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
+@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct 
xilinx_dma_device *xdev,
+ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
+                                   struct device_node *node)
+ {
+-      int ret, i, nr_channels = 1;
++      int ret, i;
++      u32 nr_channels = 1;
+ 
+       ret = of_property_read_u32(node, "dma-channels", &nr_channels);
+       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
+@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device 
*pdev)
+       }
+ 
+       /* Register the DMA engine with the core */
+-      dma_async_device_register(&xdev->common);
++      err = dma_async_device_register(&xdev->common);
++      if (err) {
++              dev_err(xdev->dev, "failed to register the dma device\n");
++              goto error;
++      }
+ 
+       err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+                                        xdev);
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h 
b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 3d4bf9b6a0a2c..06d4ce31838a5 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -1382,6 +1382,9 @@ struct intel_dp {
+               bool ycbcr_444_to_420;
+       } dfp;
+ 
++      /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
++      struct pm_qos_request pm_qos;
++
+       /* Display stream compression testing */
+       bool force_dsc_en;
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c 
b/drivers/gpu/drm/i915/display/intel_dp.c
+index 9bc59fd2f95f5..1901c88d418fa 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1411,7 +1411,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
+        * lowest possible wakeup latency and so prevent the cpu from going into
+        * deep sleep states.
+        */
+-      cpu_latency_qos_update_request(&i915->pm_qos, 0);
++      cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
+ 
+       intel_dp_check_edp(intel_dp);
+ 
+@@ -1544,7 +1544,7 @@ done:
+ 
+       ret = recv_bytes;
+ out:
+-      cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
++      cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ 
+       if (vdd)
+               edp_panel_vdd_off(intel_dp, false);
+@@ -1776,6 +1776,9 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp 
*intel_dp, int index)
+ static void
+ intel_dp_aux_fini(struct intel_dp *intel_dp)
+ {
++      if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
++              cpu_latency_qos_remove_request(&intel_dp->pm_qos);
++
+       kfree(intel_dp->aux.name);
+ }
+ 
+@@ -1818,6 +1821,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
+                                      aux_ch_name(dig_port->aux_ch),
+                                      port_name(encoder->port));
+       intel_dp->aux.transfer = intel_dp_aux_transfer;
++      cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ }
+ 
+ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index acc32066cec35..382cf048eefe0 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -577,8 +577,6 @@ static int i915_driver_hw_probe(struct drm_i915_private 
*dev_priv)
+ 
+       pci_set_master(pdev);
+ 
+-      cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+-
+       intel_gt_init_workarounds(dev_priv);
+ 
+       /* On the 945G/GM, the chipset reports the MSI capability on the
+@@ -623,7 +621,6 @@ static int i915_driver_hw_probe(struct drm_i915_private 
*dev_priv)
+ err_msi:
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
+-      cpu_latency_qos_remove_request(&dev_priv->pm_qos);
+ err_mem_regions:
+       intel_memory_regions_driver_release(dev_priv);
+ err_ggtt:
+@@ -645,8 +642,6 @@ static void i915_driver_hw_remove(struct drm_i915_private 
*dev_priv)
+ 
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
+-
+-      cpu_latency_qos_remove_request(&dev_priv->pm_qos);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 8426d59746693..83f4af097b858 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -892,9 +892,6 @@ struct drm_i915_private {
+ 
+       bool display_irqs_enabled;
+ 
+-      /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+-      struct pm_qos_request pm_qos;
+-
+       /* Sideband mailbox protection */
+       struct mutex sb_lock;
+       struct pm_qos_request sb_qos;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
+index 1ce2001106e56..04e6f6f9b742e 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_job.c
++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
+@@ -617,6 +617,8 @@ int panfrost_job_init(struct panfrost_device *pfdev)
+       }
+ 
+       for (j = 0; j < NUM_JOB_SLOTS; j++) {
++              mutex_init(&js->queue[j].lock);
++
+               js->queue[j].fence_context = dma_fence_context_alloc(1);
+ 
+               ret = drm_sched_init(&js->queue[j].sched,
+@@ -647,8 +649,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
+ 
+       job_write(pfdev, JOB_INT_MASK, 0);
+ 
+-      for (j = 0; j < NUM_JOB_SLOTS; j++)
++      for (j = 0; j < NUM_JOB_SLOTS; j++) {
+               drm_sched_fini(&js->queue[j].sched);
++              mutex_destroy(&js->queue[j].lock);
++      }
+ 
+ }
+ 
+@@ -660,7 +664,6 @@ int panfrost_job_open(struct panfrost_file_priv 
*panfrost_priv)
+       int ret, i;
+ 
+       for (i = 0; i < NUM_JOB_SLOTS; i++) {
+-              mutex_init(&js->queue[i].lock);
+               sched = &js->queue[i].sched;
+               ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
+                                           DRM_SCHED_PRIORITY_NORMAL, &sched,
+@@ -673,14 +676,10 @@ int panfrost_job_open(struct panfrost_file_priv 
*panfrost_priv)
+ 
+ void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
+ {
+-      struct panfrost_device *pfdev = panfrost_priv->pfdev;
+-      struct panfrost_job_slot *js = pfdev->js;
+       int i;
+ 
+-      for (i = 0; i < NUM_JOB_SLOTS; i++) {
++      for (i = 0; i < NUM_JOB_SLOTS; i++)
+               drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
+-              mutex_destroy(&js->queue[i].lock);
+-      }
+ }
+ 
+ int panfrost_job_is_idle(struct panfrost_device *pfdev)
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index cd71e71339446..9e852b4bbf92b 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -1270,6 +1270,37 @@ static int wacom_devm_sysfs_create_group(struct wacom 
*wacom,
+                                              group);
+ }
+ 
++static void wacom_devm_kfifo_release(struct device *dev, void *res)
++{
++      struct kfifo_rec_ptr_2 *devres = res;
++
++      kfifo_free(devres);
++}
++
++static int wacom_devm_kfifo_alloc(struct wacom *wacom)
++{
++      struct wacom_wac *wacom_wac = &wacom->wacom_wac;
++      struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
++      int error;
++
++      pen_fifo = devres_alloc(wacom_devm_kfifo_release,
++                            sizeof(struct kfifo_rec_ptr_2),
++                            GFP_KERNEL);
++
++      if (!pen_fifo)
++              return -ENOMEM;
++
++      error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
++      if (error) {
++              devres_free(pen_fifo);
++              return error;
++      }
++
++      devres_add(&wacom->hdev->dev, pen_fifo);
++
++      return 0;
++}
++
+ enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
+ {
+       struct wacom *wacom = led->wacom;
+@@ -2724,7 +2755,7 @@ static int wacom_probe(struct hid_device *hdev,
+       if (features->check_for_hid_type && features->hid_type != hdev->type)
+               return -ENODEV;
+ 
+-      error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
++      error = wacom_devm_kfifo_alloc(wacom);
+       if (error)
+               return error;
+ 
+@@ -2786,8 +2817,6 @@ static void wacom_remove(struct hid_device *hdev)
+ 
+       if (wacom->wacom_wac.features.type != REMOTE)
+               wacom_release_resources(wacom);
+-
+-      kfifo_free(&wacom_wac->pen_fifo);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index ae90713443fa6..877fe3733a42b 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
+ 
+       /* Register GPIO descriptor lookup table */
+       lookup = devm_kzalloc(dev,
+-                            struct_size(lookup, table, mux_config->n_gpios),
++                            struct_size(lookup, table, mux_config->n_gpios + 
1),
+                             GFP_KERNEL);
+       if (!lookup)
+               return -ENOMEM;
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index 33de99b7bc20c..0818d3e507347 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -38,6 +38,7 @@
+ #define I2C_IO_CONFIG_OPEN_DRAIN      0x0003
+ #define I2C_IO_CONFIG_PUSH_PULL               0x0000
+ #define I2C_SOFT_RST                  0x0001
++#define I2C_HANDSHAKE_RST             0x0020
+ #define I2C_FIFO_ADDR_CLR             0x0001
+ #define I2C_DELAY_LEN                 0x0002
+ #define I2C_TIME_CLR_VALUE            0x0000
+@@ -45,6 +46,7 @@
+ #define I2C_WRRD_TRANAC_VALUE         0x0002
+ #define I2C_RD_TRANAC_VALUE           0x0001
+ #define I2C_SCL_MIS_COMP_VALUE                0x0000
++#define I2C_CHN_CLR_FLAG              0x0000
+ 
+ #define I2C_DMA_CON_TX                        0x0000
+ #define I2C_DMA_CON_RX                        0x0001
+@@ -54,7 +56,9 @@
+ #define I2C_DMA_START_EN              0x0001
+ #define I2C_DMA_INT_FLAG_NONE         0x0000
+ #define I2C_DMA_CLR_FLAG              0x0000
++#define I2C_DMA_WARM_RST              0x0001
+ #define I2C_DMA_HARD_RST              0x0002
++#define I2C_DMA_HANDSHAKE_RST         0x0004
+ 
+ #define MAX_SAMPLE_CNT_DIV            8
+ #define MAX_STEP_CNT_DIV              64
+@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ {
+       u16 control_reg;
+ 
+-      writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+-      udelay(50);
+-      writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+-
+-      mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
++      if (i2c->dev_comp->dma_sync) {
++              writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
++              udelay(10);
++              writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
++              udelay(10);
++              writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
++                     i2c->pdmabase + OFFSET_RST);
++              mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
++                             OFFSET_SOFTRESET);
++              udelay(10);
++              writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
++              mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
++      } else {
++              writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
++              udelay(50);
++              writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
++              mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
++      }
+ 
+       /* Set ioconfig */
+       if (i2c->use_push_pull)
+diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
+index 19cda6742423d..2917fecf6c80d 100644
+--- a/drivers/i2c/busses/i2c-sprd.c
++++ b/drivers/i2c/busses/i2c-sprd.c
+@@ -72,6 +72,8 @@
+ 
+ /* timeout (ms) for pm runtime autosuspend */
+ #define SPRD_I2C_PM_TIMEOUT   1000
++/* timeout (ms) for transfer message */
++#define I2C_XFER_TIMEOUT      1000
+ 
+ /* SPRD i2c data structure */
+ struct sprd_i2c {
+@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter 
*i2c_adap,
+                              struct i2c_msg *msg, bool is_last_msg)
+ {
+       struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
++      unsigned long time_left;
+ 
+       i2c_dev->msg = msg;
+       i2c_dev->buf = msg->buf;
+@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter 
*i2c_adap,
+ 
+       sprd_i2c_opt_start(i2c_dev);
+ 
+-      wait_for_completion(&i2c_dev->complete);
++      time_left = wait_for_completion_timeout(&i2c_dev->complete,
++                              msecs_to_jiffies(I2C_XFER_TIMEOUT));
++      if (!time_left)
++              return -ETIMEDOUT;
+ 
+       return i2c_dev->err;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c 
b/drivers/infiniband/hw/hns/hns_roce_ah.c
+index 7dd3b6097226f..174b19e397124 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -36,9 +36,6 @@
+ #include <rdma/ib_cache.h>
+ #include "hns_roce_device.h"
+ 
+-#define VLAN_SL_MASK 7
+-#define VLAN_SL_SHIFT 13
+-
+ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
+ {
+       u32 fl = ah_attr->grh.flow_label;
+@@ -81,18 +78,12 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct 
rdma_ah_init_attr *init_attr,
+ 
+       /* HIP08 needs to record vlan info in Address Vector */
+       if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
+-              ah->av.vlan_en = 0;
+-
+               ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
+                                             &ah->av.vlan_id, NULL);
+               if (ret)
+                       return ret;
+ 
+-              if (ah->av.vlan_id < VLAN_N_VID) {
+-                      ah->av.vlan_en = 1;
+-                      ah->av.vlan_id |= (rdma_ah_get_sl(ah_attr) & 
VLAN_SL_MASK) <<
+-                                        VLAN_SL_SHIFT;
+-              }
++              ah->av.vlan_en = ah->av.vlan_id < VLAN_N_VID;
+       }
+ 
+       return ret;
+diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
+index 41dba7090c2ae..e398ebf1dbbab 100644
+--- a/drivers/interconnect/imx/imx.c
++++ b/drivers/interconnect/imx/imx.c
+@@ -99,6 +99,7 @@ static int imx_icc_node_init_qos(struct icc_provider 
*provider,
+               if (!dn || !of_device_is_available(dn)) {
+                       dev_warn(dev, "Missing property %s, skip scaling %s\n",
+                                adj->phandle_name, node->name);
++                      of_node_put(dn);
+                       return 0;
+               }
+ 
+diff --git a/drivers/interconnect/qcom/Kconfig 
b/drivers/interconnect/qcom/Kconfig
+index a8f93ba265f81..b3fb5b02bcf1e 100644
+--- a/drivers/interconnect/qcom/Kconfig
++++ b/drivers/interconnect/qcom/Kconfig
+@@ -42,13 +42,23 @@ config INTERCONNECT_QCOM_QCS404
+         This is a driver for the Qualcomm Network-on-Chip on qcs404-based
+         platforms.
+ 
++config INTERCONNECT_QCOM_RPMH_POSSIBLE
++      tristate
++      default INTERCONNECT_QCOM
++      depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
++      depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
++      depends on OF || COMPILE_TEST
++      help
++        Compile-testing RPMH drivers is possible on other platforms,
++        but in order to avoid link failures, drivers must not be built-in
++        when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
++
+ config INTERCONNECT_QCOM_RPMH
+       tristate
+ 
+ config INTERCONNECT_QCOM_SC7180
+       tristate "Qualcomm SC7180 interconnect driver"
+-      depends on INTERCONNECT_QCOM
+-      depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
++      depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+@@ -57,8 +67,7 @@ config INTERCONNECT_QCOM_SC7180
+ 
+ config INTERCONNECT_QCOM_SDM845
+       tristate "Qualcomm SDM845 interconnect driver"
+-      depends on INTERCONNECT_QCOM
+-      depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
++      depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+@@ -67,8 +76,7 @@ config INTERCONNECT_QCOM_SDM845
+ 
+ config INTERCONNECT_QCOM_SM8150
+       tristate "Qualcomm SM8150 interconnect driver"
+-      depends on INTERCONNECT_QCOM
+-      depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
++      depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+@@ -77,8 +85,7 @@ config INTERCONNECT_QCOM_SM8150
+ 
+ config INTERCONNECT_QCOM_SM8250
+       tristate "Qualcomm SM8250 interconnect driver"
+-      depends on INTERCONNECT_QCOM
+-      depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
++      depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c 
b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index ef37ccfa82562..0eba5e883e3f1 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -55,6 +55,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+ 
+               set_bit(qsmmu->bypass_cbndx, smmu->context_map);
+ 
++              arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 
0);
++
+               reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, 
CBAR_TYPE_S1_TRANS_S2_BYPASS);
+               arm_smmu_gr1_write(smmu, 
ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
+       }
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index b46dbfa6d0ed6..004feaed3c72c 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, 
u32 pasid, u64 addr,
+               int mask = ilog2(__roundup_pow_of_two(npages));
+               unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
+ 
+-              if (WARN_ON_ONCE(!ALIGN(addr, align)))
+-                      addr &= ~(align - 1);
++              if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
++                      addr = ALIGN_DOWN(addr, align);
+ 
+               desc.qw0 = QI_EIOTLB_PASID(pasid) |
+                               QI_EIOTLB_DID(did) |
+diff --git a/drivers/iommu/intel/irq_remapping.c 
b/drivers/iommu/intel/irq_remapping.c
+index 0cfce1d3b7bbd..aedaae4630bc8 100644
+--- a/drivers/iommu/intel/irq_remapping.c
++++ b/drivers/iommu/intel/irq_remapping.c
+@@ -1390,6 +1390,8 @@ static int intel_irq_remapping_alloc(struct irq_domain 
*domain,
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               irq_cfg = irqd_cfg(irq_data);
+               if (!irq_data || !irq_cfg) {
++                      if (!i)
++                              kfree(data);
+                       ret = -EINVAL;
+                       goto out_free_data;
+               }
+diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
+index 8f39f9ba5c80e..4c2ce210c1237 100644
+--- a/drivers/lightnvm/Kconfig
++++ b/drivers/lightnvm/Kconfig
+@@ -19,6 +19,7 @@ if NVM
+ 
+ config NVM_PBLK
+       tristate "Physical Block Device Open-Channel SSD target"
++      select CRC32
+       help
+         Allows an open-channel SSD to be exposed as a block device to the
+         host. The target assumes the device exposes raw flash and must be
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index aa4531c2ce0df..a148b92ad8563 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1341,6 +1341,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct 
cache_set *c,
+       bcache_device_link(&dc->disk, c, "bdev");
+       atomic_inc(&c->attached_dev_nr);
+ 
++      if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
++              pr_err("The obsoleted large bucket layout is unsupported, set 
the bcache device into read-only\n");
++              pr_err("Please update to the latest bcache-tools to create the 
cache device\n");
++              set_disk_ro(dc->disk.disk, 1);
++      }
++
+       /* Allow the writeback thread to proceed */
+       up_write(&dc->writeback_lock);
+ 
+@@ -1564,6 +1570,12 @@ static int flash_dev_run(struct cache_set *c, struct 
uuid_entry *u)
+ 
+       bcache_device_link(d, c, "volume");
+ 
++      if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
++              pr_err("The obsoleted large bucket layout is unsupported, set 
the bcache device into read-only\n");
++              pr_err("Please update to the latest bcache-tools to create the 
cache device\n");
++              set_disk_ro(d->disk, 1);
++      }
++
+       return 0;
+ err:
+       kobject_put(&d->kobj);
+@@ -2123,6 +2135,9 @@ static int run_cache_set(struct cache_set *c)
+       c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
+       bcache_write_super(c);
+ 
++      if (bch_has_feature_obso_large_bucket(&c->cache->sb))
++              pr_err("Detect obsoleted large bucket layout, all attached 
bcache device will be read-only\n");
++
+       list_for_each_entry_safe(dc, t, &uncached_devices, list)
+               bch_cached_dev_attach(dc, c, NULL);
+ 
+diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
+index 3b320f3d48b30..59c1724bcd0ed 100644
+--- a/drivers/net/bareudp.c
++++ b/drivers/net/bareudp.c
+@@ -645,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
+       return 0;
+ }
+ 
++static void bareudp_dellink(struct net_device *dev, struct list_head *head)
++{
++      struct bareudp_dev *bareudp = netdev_priv(dev);
++
++      list_del(&bareudp->next);
++      unregister_netdevice_queue(dev, head);
++}
++
+ static int bareudp_newlink(struct net *net, struct net_device *dev,
+                          struct nlattr *tb[], struct nlattr *data[],
+                          struct netlink_ext_ack *extack)
+ {
+       struct bareudp_conf conf;
++      LIST_HEAD(list_kill);
+       int err;
+ 
+       err = bareudp2info(data, &conf, extack);
+@@ -662,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct 
net_device *dev,
+ 
+       err = bareudp_link_config(dev, tb);
+       if (err)
+-              return err;
++              goto err_unconfig;
+ 
+       return 0;
+-}
+-
+-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+-{
+-      struct bareudp_dev *bareudp = netdev_priv(dev);
+ 
+-      list_del(&bareudp->next);
+-      unregister_netdevice_queue(dev, head);
++err_unconfig:
++      bareudp_dellink(dev, &list_kill);
++      unregister_netdevice_many(&list_kill);
++      return err;
+ }
+ 
+ static size_t bareudp_get_size(const struct net_device *dev)
+diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
+index 424970939fd4c..1c28eade6becc 100644
+--- a/drivers/net/can/Kconfig
++++ b/drivers/net/can/Kconfig
+@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
+ config CAN_KVASER_PCIEFD
+       depends on PCI
+       tristate "Kvaser PCIe FD cards"
++      select CRC32
+         help
+         This is a driver for the Kvaser PCI Express CAN FD family.
+ 
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 7fc4ac1582afc..3c1e379751683 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1914,8 +1914,6 @@ EXPORT_SYMBOL_GPL(m_can_class_resume);
+ void m_can_class_unregister(struct m_can_classdev *m_can_dev)
+ {
+       unregister_candev(m_can_dev->net);
+-
+-      m_can_clk_stop(m_can_dev);
+ }
+ EXPORT_SYMBOL_GPL(m_can_class_unregister);
+ 
+diff --git a/drivers/net/can/m_can/tcan4x5x.c 
b/drivers/net/can/m_can/tcan4x5x.c
+index 7347ab39c5b65..f726c5112294f 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -129,30 +129,6 @@ struct tcan4x5x_priv {
+       int reg_offset;
+ };
+ 
+-static struct can_bittiming_const tcan4x5x_bittiming_const = {
+-      .name = DEVICE_NAME,
+-      .tseg1_min = 2,
+-      .tseg1_max = 31,
+-      .tseg2_min = 2,
+-      .tseg2_max = 16,
+-      .sjw_max = 16,
+-      .brp_min = 1,
+-      .brp_max = 32,
+-      .brp_inc = 1,
+-};
+-
+-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
+-      .name = DEVICE_NAME,
+-      .tseg1_min = 1,
+-      .tseg1_max = 32,
+-      .tseg2_min = 1,
+-      .tseg2_max = 16,
+-      .sjw_max = 16,
+-      .brp_min = 1,
+-      .brp_max = 32,
+-      .brp_inc = 1,
+-};
+-
+ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
+ {
+       int wake_state = 0;
+@@ -479,8 +455,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+       mcan_class->dev = &spi->dev;
+       mcan_class->ops = &tcan4x5x_ops;
+       mcan_class->is_peripheral = true;
+-      mcan_class->bit_timing = &tcan4x5x_bittiming_const;
+-      mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
+       mcan_class->net->irq = spi->irq;
+ 
+       spi_set_drvdata(spi, priv);
+diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
+index 4b36d89bec061..662e68a0e7e61 100644
+--- a/drivers/net/dsa/lantiq_gswip.c
++++ b/drivers/net/dsa/lantiq_gswip.c
+@@ -1436,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch 
*ds, int port,
+       phylink_set(mask, Pause);
+       phylink_set(mask, Asym_Pause);
+ 
+-      /* With the exclusion of MII and Reverse MII, we support Gigabit,
+-       * including Half duplex
++      /* With the exclusion of MII, Reverse MII and Reduced MII, we
++       * support Gigabit, including Half duplex
+        */
+       if (state->interface != PHY_INTERFACE_MODE_MII &&
+-          state->interface != PHY_INTERFACE_MODE_REVMII) {
++          state->interface != PHY_INTERFACE_MODE_REVMII &&
++          state->interface != PHY_INTERFACE_MODE_RMII) {
+               phylink_set(mask, 1000baseT_Full);
+               phylink_set(mask, 1000baseT_Half);
+       }
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c 
b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+index 50e3a70e5a290..07a956098e11f 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+@@ -621,7 +621,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
+ 
+       while (!skb_queue_empty(&listen_ctx->synq)) {
+               struct chtls_sock *csk =
+-                      container_of((struct synq *)__skb_dequeue
++                      container_of((struct synq *)skb_peek
+                               (&listen_ctx->synq), struct chtls_sock, synq);
+               struct sock *child = csk->sk;
+ 
+@@ -1109,6 +1109,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+                                   const struct cpl_pass_accept_req *req,
+                                   struct chtls_dev *cdev)
+ {
++      struct adapter *adap = pci_get_drvdata(cdev->pdev);
+       struct neighbour *n = NULL;
+       struct inet_sock *newinet;
+       const struct iphdr *iph;
+@@ -1118,9 +1119,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+       struct dst_entry *dst;
+       struct tcp_sock *tp;
+       struct sock *newsk;
++      bool found = false;
+       u16 port_id;
+       int rxq_idx;
+-      int step;
++      int step, i;
+ 
+       iph = (const struct iphdr *)network_hdr;
+       newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
+@@ -1152,7 +1154,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+               n = dst_neigh_lookup(dst, &ip6h->saddr);
+ #endif
+       }
+-      if (!n)
++      if (!n || !n->dev)
+               goto free_sk;
+ 
+       ndev = n->dev;
+@@ -1161,6 +1163,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+       if (is_vlan_dev(ndev))
+               ndev = vlan_dev_real_dev(ndev);
+ 
++      for_each_port(adap, i)
++              if (cdev->ports[i] == ndev)
++                      found = true;
++
++      if (!found)
++              goto free_dst;
++
+       port_id = cxgb4_port_idx(ndev);
+ 
+       csk = chtls_sock_create(cdev);
+@@ -1237,6 +1246,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ free_csk:
+       chtls_sock_release(&csk->kref);
+ free_dst:
++      neigh_release(n);
+       dst_release(dst);
+ free_sk:
+       inet_csk_prepare_forced_close(newsk);
+@@ -1386,7 +1396,7 @@ static void chtls_pass_accept_request(struct sock *sk,
+ 
+       newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
+       if (!newsk)
+-              goto free_oreq;
++              goto reject;
+ 
+       if (chtls_get_module(newsk))
+               goto reject;
+@@ -1402,8 +1412,6 @@ static void chtls_pass_accept_request(struct sock *sk,
+       kfree_skb(skb);
+       return;
+ 
+-free_oreq:
+-      chtls_reqsk_free(oreq);
+ reject:
+       mk_tid_release(reply_skb, 0, tid);
+       cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+@@ -1588,6 +1596,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, 
struct sk_buff *skb)
+                       sk_wake_async(sk, 0, POLL_OUT);
+ 
+               data = lookup_stid(cdev->tids, stid);
++              if (!data) {
++                      /* listening server close */
++                      kfree_skb(skb);
++                      goto unlock;
++              }
+               lsk = ((struct listen_ctx *)data)->lsk;
+ 
+               bh_lock_sock(lsk);
+@@ -1996,39 +2009,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct 
chtls_dev *cdev,
+       spin_unlock_bh(&cdev->deferq.lock);
+ }
+ 
+-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
+-                         struct chtls_dev *cdev, int status, int queue)
+-{
+-      struct cpl_abort_req_rss *req = cplhdr(skb);
+-      struct sk_buff *reply_skb;
+-      struct chtls_sock *csk;
+-
+-      csk = rcu_dereference_sk_user_data(sk);
+-
+-      reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
+-                            GFP_KERNEL);
+-
+-      if (!reply_skb) {
+-              req->status = (queue << 1);
+-              t4_defer_reply(skb, cdev, send_defer_abort_rpl);
+-              return;
+-      }
+-
+-      set_abort_rpl_wr(reply_skb, GET_TID(req), status);
+-      kfree_skb(skb);
+-
+-      set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
+-      if (csk_conn_inline(csk)) {
+-              struct l2t_entry *e = csk->l2t_entry;
+-
+-              if (e && sk->sk_state != TCP_SYN_RECV) {
+-                      cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
+-                      return;
+-              }
+-      }
+-      cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+-}
+-
+ static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
+                                struct chtls_dev *cdev,
+                                int status, int queue)
+@@ -2077,9 +2057,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct 
sk_buff *skb)
+       queue = csk->txq_idx;
+ 
+       skb->sk = NULL;
++      chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
++                           CPL_ABORT_NO_RST, queue);
+       do_abort_syn_rcv(child, lsk);
+-      send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+-                     CPL_ABORT_NO_RST, queue);
+ }
+ 
+ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
+@@ -2109,8 +2089,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff 
*skb)
+       if (!sock_owned_by_user(psk)) {
+               int queue = csk->txq_idx;
+ 
++              chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
+               do_abort_syn_rcv(sk, psk);
+-              send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
+       } else {
+               skb->sk = sk;
+               BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
+@@ -2128,9 +2108,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct 
sk_buff *skb)
+       int queue = csk->txq_idx;
+ 
+       if (is_neg_adv(req->status)) {
+-              if (sk->sk_state == TCP_SYN_RECV)
+-                      chtls_set_tcb_tflag(sk, 0, 0);
+-
+               kfree_skb(skb);
+               return;
+       }
+@@ -2157,12 +2134,12 @@ static void chtls_abort_req_rss(struct sock *sk, 
struct sk_buff *skb)
+               if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
+                       return;
+ 
+-              chtls_release_resources(sk);
+-              chtls_conn_done(sk);
+       }
+ 
+       chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
+                            rst_status, queue);
++      chtls_release_resources(sk);
++      chtls_conn_done(sk);
+ }
+ 
+ static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h 
b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+index 1ffe8fac702d9..98a9f5e3fe864 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+@@ -168,7 +168,7 @@ struct hclgevf_mbx_arq_ring {
+ #define hclge_mbx_ring_ptr_move_crq(crq) \
+       (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+ #define hclge_mbx_tail_ptr_move_arq(arq) \
+-      (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
++              (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+ #define hclge_mbx_head_ptr_move_arq(arq) \
+-              (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
++              (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+ #endif
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 1f026408ad38b..4321132a4f630 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle 
*handle, int stringset)
+               handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+               handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ 
+-              if (hdev->hw.mac.phydev) {
++              if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
++                  hdev->hw.mac.phydev->drv->set_loopback) {
+                       count += 1;
+                       handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
+               }
+@@ -4484,8 +4485,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle 
*handle,
+               req->ipv4_sctp_en = tuple_sets;
+               break;
+       case SCTP_V6_FLOW:
+-              if ((nfc->data & RXH_L4_B_0_1) ||
+-                  (nfc->data & RXH_L4_B_2_3))
++              if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
++                  (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
+                       return -EINVAL;
+ 
+               req->ipv6_sctp_en = tuple_sets;
+@@ -4665,6 +4666,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
+               vport[i].rss_tuple_sets.ipv6_udp_en =
+                       HCLGE_RSS_INPUT_TUPLE_OTHER;
+               vport[i].rss_tuple_sets.ipv6_sctp_en =
++                      hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
++                      HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+                       HCLGE_RSS_INPUT_TUPLE_SCTP;
+               vport[i].rss_tuple_sets.ipv6_fragment_en =
+                       HCLGE_RSS_INPUT_TUPLE_OTHER;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h 
b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+index 64e6afdb61b8d..213ac73f94cdd 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -105,6 +105,8 @@
+ #define HCLGE_D_IP_BIT                        BIT(2)
+ #define HCLGE_S_IP_BIT                        BIT(3)
+ #define HCLGE_V_TAG_BIT                       BIT(4)
++#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT    \
++              (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
+ 
+ #define HCLGE_RSS_TC_SIZE_0           1
+ #define HCLGE_RSS_TC_SIZE_1           2
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index c8e3fdd5999c4..dc5d150a9c546 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -901,8 +901,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle 
*handle,
+               req->ipv4_sctp_en = tuple_sets;
+               break;
+       case SCTP_V6_FLOW:
+-              if ((nfc->data & RXH_L4_B_0_1) ||
+-                  (nfc->data & RXH_L4_B_2_3))
++              if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
++                  (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
+                       return -EINVAL;
+ 
+               req->ipv6_sctp_en = tuple_sets;
+@@ -2481,7 +2481,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev 
*hdev)
+               tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+               tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+               tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+-              tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
++              tuple_sets->ipv6_sctp_en =
++                      hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
++                                      HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
++                                      HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+               tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+       }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h 
b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index c5bcc3894fd54..526a62f970466 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -122,6 +122,8 @@
+ #define HCLGEVF_D_IP_BIT              BIT(2)
+ #define HCLGEVF_S_IP_BIT              BIT(3)
+ #define HCLGEVF_V_TAG_BIT             BIT(4)
++#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT  \
++      (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
+ 
+ #define HCLGEVF_STATS_TIMER_INTERVAL  36U
+ 
+diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
+index c3e429445b83d..ceb4f27898002 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4409,7 +4409,7 @@ static int mvneta_xdp_setup(struct net_device *dev, 
struct bpf_prog *prog,
+       struct bpf_prog *old_prog;
+ 
+       if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+-              NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
++              NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+               return -EOPNOTSUPP;
+       }
+ 
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 
b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 2026c923b5855..2dcdec3eacc36 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -5480,7 +5480,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
+       struct mvpp2 *priv = port->priv;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       unsigned int thread;
+-      int queue, err;
++      int queue, err, val;
+ 
+       /* Checks for hardware constraints */
+       if (port->first_rxq + port->nrxqs >
+@@ -5494,6 +5494,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
+       mvpp2_egress_disable(port);
+       mvpp2_port_disable(port);
+ 
++      if (mvpp2_is_xlg(port->phy_interface)) {
++              val = readl(port->base + MVPP22_XLG_CTRL0_REG);
++              val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
++              val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
++              writel(val, port->base + MVPP22_XLG_CTRL0_REG);
++      } else {
++              val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
++              val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
++              val |= MVPP2_GMAC_FORCE_LINK_DOWN;
++              writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
++      }
++
+       port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
+ 
+       port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c 
b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 8f17e26dca538..fc27a40202c6d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -862,8 +862,10 @@ static int cgx_lmac_init(struct cgx *cgx)
+               if (!lmac)
+                       return -ENOMEM;
+               lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+-              if (!lmac->name)
+-                      return -ENOMEM;
++              if (!lmac->name) {
++                      err = -ENOMEM;
++                      goto err_lmac_free;
++              }
+               sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+               lmac->lmac_id = i;
+               lmac->cgx = cgx;
+@@ -874,7 +876,7 @@ static int cgx_lmac_init(struct cgx *cgx)
+                                                CGX_LMAC_FWI + i * 9),
+                                  cgx_fwi_event_handler, 0, lmac->name, lmac);
+               if (err)
+-                      return err;
++                      goto err_irq;
+ 
+               /* Enable interrupt */
+               cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
+@@ -886,6 +888,12 @@ static int cgx_lmac_init(struct cgx *cgx)
+       }
+ 
+       return cgx_lmac_verify_fwi_version(cgx);
++
++err_irq:
++      kfree(lmac->name);
++err_lmac_free:
++      kfree(lmac);
++      return err;
+ }
+ 
+ static int cgx_lmac_exit(struct cgx *cgx)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index d29af7b9c695a..76177f7c5ec29 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+       if (!reg_c0)
+               return true;
+ 
++      /* If reg_c0 is not equal to the default flow tag then skb->mark
++       * is not supported and must be reset back to 0.
++       */
++      skb->mark = 0;
++
+       priv = netdev_priv(skb->dev);
+       esw = priv->mdev->priv.eswitch;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index d25a56ec6876a..f01395a9fd8df 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1007,6 +1007,22 @@ static int mlx5e_get_link_ksettings(struct net_device 
*netdev,
+       return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
+ }
+ 
++static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
++                              const unsigned long link_modes, u8 autoneg)
++{
++      /* Extended link-mode has no speed limitations. */
++      if (ext)
++              return 0;
++
++      if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
++          autoneg != AUTONEG_ENABLE) {
++              netdev_err(netdev, "%s: 56G link speed requires autoneg 
enabled\n",
++                         __func__);
++              return -EINVAL;
++      }
++      return 0;
++}
++
+ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
+ {
+       u32 i, ptys_modes = 0;
+@@ -1100,13 +1116,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv 
*priv,
+       link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) 
:
+               mlx5e_port_speed2linkmodes(mdev, speed, !ext);
+ 
+-      if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+-          autoneg != AUTONEG_ENABLE) {
+-              netdev_err(priv->netdev, "%s: 56G link speed requires autoneg 
enabled\n",
+-                         __func__);
+-              err = -EINVAL;
++      err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
++      if (err)
+               goto out;
+-      }
+ 
+       link_modes = link_modes & eproto.cap;
+       if (!link_modes) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 1f48f99c0997d..7ad332d8625b9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -936,6 +936,7 @@ static int mlx5e_create_ttc_table_groups(struct 
mlx5e_ttc_table *ttc,
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               kfree(ft->g);
++              ft->g = NULL;
+               return -ENOMEM;
+       }
+ 
+@@ -1081,6 +1082,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct 
mlx5e_ttc_table *ttc)
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               kfree(ft->g);
++              ft->g = NULL;
+               return -ENOMEM;
+       }
+ 
+@@ -1384,6 +1386,7 @@ err_destroy_groups:
+       ft->g[ft->num_groups] = NULL;
+       mlx5e_destroy_groups(ft);
+       kvfree(in);
++      kfree(ft->g);
+ 
+       return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c 
b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+index 33081b24f10aa..9025e5f38bb65 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+@@ -556,7 +556,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct 
net_device *netdev)
+       struct mlx5_core_dev *tmp_dev;
+       int i, err;
+ 
+-      if (!MLX5_CAP_GEN(dev, vport_group_manager))
++      if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
++          !MLX5_CAP_GEN(dev, lag_master) ||
++          MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
+               return;
+ 
+       tmp_dev = mlx5_get_next_phys_dev(dev);
+@@ -574,12 +576,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct 
net_device *netdev)
+       if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
+               return;
+ 
+-      for (i = 0; i < MLX5_MAX_PORTS; i++) {
+-              tmp_dev = ldev->pf[i].dev;
+-              if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
+-                  MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
++      for (i = 0; i < MLX5_MAX_PORTS; i++)
++              if (!ldev->pf[i].dev)
+                       break;
+-      }
+ 
+       if (i >= MLX5_MAX_PORTS)
+               ldev->flags |= MLX5_LAG_FLAG_READY;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c 
b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index 0fc7de4aa572f..8e0dddc6383f0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -116,7 +116,7 @@ free:
+ static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
+ {
+       mlx5_core_roce_gid_set(dev, 0, 0, 0,
+-                             NULL, NULL, false, 0, 0);
++                             NULL, NULL, false, 0, 1);
+ }
+ 
+ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union 
ib_gid *gid)
+diff --git a/drivers/net/ethernet/natsemi/macsonic.c 
b/drivers/net/ethernet/natsemi/macsonic.c
+index 776b7d264dc34..2289e1fe37419 100644
+--- a/drivers/net/ethernet/natsemi/macsonic.c
++++ b/drivers/net/ethernet/natsemi/macsonic.c
+@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct 
platform_device *pdev)
+ 
+       err = register_netdev(dev);
+       if (err)
+-              goto out;
++              goto undo_probe;
+ 
+       return 0;
+ 
++undo_probe:
++      dma_free_coherent(lp->device,
++                        SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
++                        lp->descriptors, lp->descriptors_laddr);
+ out:
+       free_netdev(dev);
+ 
+@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board 
*board)
+ 
+       err = register_netdev(ndev);
+       if (err)
+-              goto out;
++              goto undo_probe;
+ 
+       nubus_set_drvdata(board, ndev);
+ 
+       return 0;
+ 
++undo_probe:
++      dma_free_coherent(lp->device,
++                        SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
++                        lp->descriptors, lp->descriptors_laddr);
+ out:
+       free_netdev(ndev);
+       return err;
+diff --git a/drivers/net/ethernet/natsemi/xtsonic.c 
b/drivers/net/ethernet/natsemi/xtsonic.c
+index afa166ff7aef5..28d9e98db81a8 100644
+--- a/drivers/net/ethernet/natsemi/xtsonic.c
++++ b/drivers/net/ethernet/natsemi/xtsonic.c
+@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
+       sonic_msg_init(dev);
+ 
+       if ((err = register_netdev(dev)))
+-              goto out1;
++              goto undo_probe1;
+ 
+       return 0;
+ 
+-out1:
++undo_probe1:
++      dma_free_coherent(lp->device,
++                        SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
++                        lp->descriptors, lp->descriptors_laddr);
+       release_region(dev->base_addr, SONIC_MEM_SIZE);
+ out:
+       free_netdev(dev);
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c 
b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index c968c5c5a60a0..d0ae1cf43592d 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -123,6 +123,12 @@ static void ionic_link_status_check(struct ionic_lif *lif)
+       link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
+ 
+       if (link_up) {
++              if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
++                      mutex_lock(&lif->queue_lock);
++                      ionic_start_queues(lif);
++                      mutex_unlock(&lif->queue_lock);
++              }
++
+               if (!netif_carrier_ok(netdev)) {
+                       u32 link_speed;
+ 
+@@ -132,12 +138,6 @@ static void ionic_link_status_check(struct ionic_lif *lif)
+                                   link_speed / 1000);
+                       netif_carrier_on(netdev);
+               }
+-
+-              if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+-                      mutex_lock(&lif->queue_lock);
+-                      ionic_start_queues(lif);
+-                      mutex_unlock(&lif->queue_lock);
+-              }
+       } else {
+               if (netif_carrier_ok(netdev)) {
+                       netdev_info(netdev, "Link down\n");
+diff --git a/drivers/net/ethernet/qlogic/Kconfig 
b/drivers/net/ethernet/qlogic/Kconfig
+index 4366c7a8de951..6b5ddb07ee833 100644
+--- a/drivers/net/ethernet/qlogic/Kconfig
++++ b/drivers/net/ethernet/qlogic/Kconfig
+@@ -78,6 +78,7 @@ config QED
+       depends on PCI
+       select ZLIB_INFLATE
+       select CRC8
++      select CRC32
+       select NET_DEVLINK
+       help
+         This enables the support for Marvell FastLinQ adapters family.
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c 
b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+index 58e0511badba8..a5e0eff4a3874 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+@@ -64,6 +64,7 @@ struct emac_variant {
+  * @variant:  reference to the current board variant
+  * @regmap:   regmap for using the syscon
+  * @internal_phy_powered: Does the internal PHY is enabled
++ * @use_internal_phy: Is the internal PHY selected for use
+  * @mux_handle:       Internal pointer used by mdio-mux lib
+  */
+ struct sunxi_priv_data {
+@@ -74,6 +75,7 @@ struct sunxi_priv_data {
+       const struct emac_variant *variant;
+       struct regmap_field *regmap_field;
+       bool internal_phy_powered;
++      bool use_internal_phy;
+       void *mux_handle;
+ };
+ 
+@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
+       .dma_interrupt = sun8i_dwmac_dma_interrupt,
+ };
+ 
++static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
++
+ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+ {
++      struct net_device *ndev = platform_get_drvdata(pdev);
+       struct sunxi_priv_data *gmac = priv;
+       int ret;
+ 
+@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device 
*pdev, void *priv)
+ 
+       ret = clk_prepare_enable(gmac->tx_clk);
+       if (ret) {
+-              if (gmac->regulator)
+-                      regulator_disable(gmac->regulator);
+               dev_err(&pdev->dev, "Could not enable AHB clock\n");
+-              return ret;
++              goto err_disable_regulator;
++      }
++
++      if (gmac->use_internal_phy) {
++              ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
++              if (ret)
++                      goto err_disable_clk;
+       }
+ 
+       return 0;
++
++err_disable_clk:
++      clk_disable_unprepare(gmac->tx_clk);
++err_disable_regulator:
++      if (gmac->regulator)
++              regulator_disable(gmac->regulator);
++
++      return ret;
+ }
+ 
+ static void sun8i_dwmac_core_init(struct mac_device_info *hw,
+@@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, 
int desired_child,
+       struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+       u32 reg, val;
+       int ret = 0;
+-      bool need_power_ephy = false;
+ 
+       if (current_child ^ desired_child) {
+               regmap_field_read(gmac->regmap_field, &reg);
+@@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, 
int desired_child,
+               case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
+                       dev_info(priv->device, "Switch mux to internal PHY");
+                       val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
+-
+-                      need_power_ephy = true;
++                      gmac->use_internal_phy = true;
+                       break;
+               case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
+                       dev_info(priv->device, "Switch mux to external PHY");
+                       val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
+-                      need_power_ephy = false;
++                      gmac->use_internal_phy = false;
+                       break;
+               default:
+                       dev_err(priv->device, "Invalid child ID %x\n",
+@@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, 
int desired_child,
+                       return -EINVAL;
+               }
+               regmap_field_write(gmac->regmap_field, val);
+-              if (need_power_ephy) {
++              if (gmac->use_internal_phy) {
+                       ret = sun8i_dwmac_power_internal_phy(priv);
+                       if (ret)
+                               return ret;
+@@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct 
stmmac_priv *priv)
+       return ret;
+ }
+ 
+-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
++static int sun8i_dwmac_set_syscon(struct device *dev,
++                                struct plat_stmmacenet_data *plat)
+ {
+-      struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+-      struct device_node *node = priv->device->of_node;
++      struct sunxi_priv_data *gmac = plat->bsp_priv;
++      struct device_node *node = dev->of_node;
+       int ret;
+       u32 reg, val;
+ 
+       ret = regmap_field_read(gmac->regmap_field, &val);
+       if (ret) {
+-              dev_err(priv->device, "Fail to read from regmap field.\n");
++              dev_err(dev, "Fail to read from regmap field.\n");
+               return ret;
+       }
+ 
+       reg = gmac->variant->default_syscon_value;
+       if (reg != val)
+-              dev_warn(priv->device,
++              dev_warn(dev,
+                        "Current syscon value is not the default %x (expect 
%x)\n",
+                        val, reg);
+ 
+@@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+               /* Force EPHY xtal frequency to 24MHz. */
+               reg |= H3_EPHY_CLK_SEL;
+ 
+-              ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
++              ret = of_mdio_parse_addr(dev, plat->phy_node);
+               if (ret < 0) {
+-                      dev_err(priv->device, "Could not parse MDIO addr\n");
++                      dev_err(dev, "Could not parse MDIO addr\n");
+                       return ret;
+               }
+               /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
+@@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv 
*priv)
+ 
+       if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
+               if (val % 100) {
+-                      dev_err(priv->device, "tx-delay must be a multiple of 
100\n");
++                      dev_err(dev, "tx-delay must be a multiple of 100\n");
+                       return -EINVAL;
+               }
+               val /= 100;
+-              dev_dbg(priv->device, "set tx-delay to %x\n", val);
++              dev_dbg(dev, "set tx-delay to %x\n", val);
+               if (val <= gmac->variant->tx_delay_max) {
+                       reg &= ~(gmac->variant->tx_delay_max <<
+                                SYSCON_ETXDC_SHIFT);
+                       reg |= (val << SYSCON_ETXDC_SHIFT);
+               } else {
+-                      dev_err(priv->device, "Invalid TX clock delay: %d\n",
++                      dev_err(dev, "Invalid TX clock delay: %d\n",
+                               val);
+                       return -EINVAL;
+               }
+@@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv 
*priv)
+ 
+       if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
+               if (val % 100) {
+-                      dev_err(priv->device, "rx-delay must be a multiple of 
100\n");
++                      dev_err(dev, "rx-delay must be a multiple of 100\n");
+                       return -EINVAL;
+               }
+               val /= 100;
+-              dev_dbg(priv->device, "set rx-delay to %x\n", val);
++              dev_dbg(dev, "set rx-delay to %x\n", val);
+               if (val <= gmac->variant->rx_delay_max) {
+                       reg &= ~(gmac->variant->rx_delay_max <<
+                                SYSCON_ERXDC_SHIFT);
+                       reg |= (val << SYSCON_ERXDC_SHIFT);
+               } else {
+-                      dev_err(priv->device, "Invalid RX clock delay: %d\n",
++                      dev_err(dev, "Invalid RX clock delay: %d\n",
+                               val);
+                       return -EINVAL;
+               }
+@@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+       if (gmac->variant->support_rmii)
+               reg &= ~SYSCON_RMII_EN;
+ 
+-      switch (priv->plat->interface) {
++      switch (plat->interface) {
+       case PHY_INTERFACE_MODE_MII:
+               /* default */
+               break;
+@@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+               reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
+               break;
+       default:
+-              dev_err(priv->device, "Unsupported interface mode: %s",
+-                      phy_modes(priv->plat->interface));
++              dev_err(dev, "Unsupported interface mode: %s",
++                      phy_modes(plat->interface));
+               return -EINVAL;
+       }
+ 
+@@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device 
*pdev, void *priv)
+       struct sunxi_priv_data *gmac = priv;
+ 
+       if (gmac->variant->soc_has_internal_phy) {
+-              /* sun8i_dwmac_exit could be called with mdiomux uninit */
+-              if (gmac->mux_handle)
+-                      mdio_mux_uninit(gmac->mux_handle);
+               if (gmac->internal_phy_powered)
+                       sun8i_dwmac_unpower_internal_phy(gmac);
+       }
+ 
+-      sun8i_dwmac_unset_syscon(gmac);
+-
+-      reset_control_put(gmac->rst_ephy);
+-
+       clk_disable_unprepare(gmac->tx_clk);
+ 
+       if (gmac->regulator)
+@@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void 
*ppriv)
+ {
+       struct mac_device_info *mac;
+       struct stmmac_priv *priv = ppriv;
+-      int ret;
+ 
+       mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+       if (!mac)
+               return NULL;
+ 
+-      ret = sun8i_dwmac_set_syscon(priv);
+-      if (ret)
+-              return NULL;
+-
+       mac->pcsr = priv->ioaddr;
+       mac->mac = &sun8i_dwmac_ops;
+       mac->dma = &sun8i_dwmac_dma_ops;
+@@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device 
*pdev)
+       if (ret)
+               return ret;
+ 
+-      plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+-      if (IS_ERR(plat_dat))
+-              return PTR_ERR(plat_dat);
+-
+       gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+       if (!gmac)
+               return -ENOMEM;
+@@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device 
*pdev)
+       ret = of_get_phy_mode(dev->of_node, &interface);
+       if (ret)
+               return -EINVAL;
+-      plat_dat->interface = interface;
++
++      plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
++      if (IS_ERR(plat_dat))
++              return PTR_ERR(plat_dat);
+ 
+       /* platform data specifying hardware features and callbacks.
+        * hardware features were copied from Allwinner drivers.
+        */
++      plat_dat->interface = interface;
+       plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
+       plat_dat->tx_coe = 1;
+       plat_dat->has_sun8i = true;
+@@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device 
*pdev)
+       plat_dat->exit = sun8i_dwmac_exit;
+       plat_dat->setup = sun8i_dwmac_setup;
+ 
++      ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
++      if (ret)
++              goto dwmac_deconfig;
++
+       ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+-              return ret;
++              goto dwmac_syscon;
+ 
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (ret)
+@@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device 
*pdev)
+       if (gmac->variant->soc_has_internal_phy) {
+               ret = get_ephy_nodes(priv);
+               if (ret)
+-                      goto dwmac_exit;
++                      goto dwmac_remove;
+               ret = sun8i_dwmac_register_mdio_mux(priv);
+               if (ret) {
+                       dev_err(&pdev->dev, "Failed to register mux\n");
+@@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device 
*pdev)
+       } else {
+               ret = sun8i_dwmac_reset(priv);
+               if (ret)
+-                      goto dwmac_exit;
++                      goto dwmac_remove;
+       }
+ 
+       return ret;
+ dwmac_mux:
+-      sun8i_dwmac_unset_syscon(gmac);
++      reset_control_put(gmac->rst_ephy);
++      clk_put(gmac->ephy_clk);
++dwmac_remove:
++      stmmac_dvr_remove(&pdev->dev);
+ dwmac_exit:
++      sun8i_dwmac_exit(pdev, gmac);
++dwmac_syscon:
++      sun8i_dwmac_unset_syscon(gmac);
++dwmac_deconfig:
++      stmmac_remove_config_dt(pdev, plat_dat);
++
++      return ret;
++}
++
++static int sun8i_dwmac_remove(struct platform_device *pdev)
++{
++      struct net_device *ndev = platform_get_drvdata(pdev);
++      struct stmmac_priv *priv = netdev_priv(ndev);
++      struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
++
++      if (gmac->variant->soc_has_internal_phy) {
++              mdio_mux_uninit(gmac->mux_handle);
++              sun8i_dwmac_unpower_internal_phy(gmac);
++              reset_control_put(gmac->rst_ephy);
++              clk_put(gmac->ephy_clk);
++      }
++
+       stmmac_pltfr_remove(pdev);
+-return ret;
++      sun8i_dwmac_unset_syscon(gmac);
++
++      return 0;
+ }
+ 
+ static const struct of_device_id sun8i_dwmac_match[] = {
+@@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
+ 
+ static struct platform_driver sun8i_dwmac_driver = {
+       .probe  = sun8i_dwmac_probe,
+-      .remove = stmmac_pltfr_remove,
++      .remove = sun8i_dwmac_remove,
+       .driver = {
+               .name           = "dwmac-sun8i",
+               .pm             = &stmmac_pltfr_pm_ops,
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 5dc1365dc1f9a..854c6624e6859 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct 
sk_buff *skb, __le32 sign)
+        * accordingly. Otherwise, we should check here.
+        */
+       if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+-              delayed_ndp_size = ALIGN(ctx->max_ndp_size, 
ctx->tx_ndp_modulus);
++              delayed_ndp_size = ctx->max_ndp_size +
++                      max_t(u32,
++                            ctx->tx_ndp_modulus,
++                            ctx->tx_modulus + ctx->tx_remainder) - 1;
+       else
+               delayed_ndp_size = 0;
+ 
+@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff 
*skb, __le32 sign)
+       if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
+           skb_out->len > ctx->min_tx_pkt) {
+               padding_count = ctx->tx_curr_size - skb_out->len;
+-              skb_put_zero(skb_out, padding_count);
++              if (!WARN_ON(padding_count > ctx->tx_curr_size))
++                      skb_put_zero(skb_out, padding_count);
+       } else if (skb_out->len < ctx->tx_curr_size &&
+                  (skb_out->len % dev->maxpacket) == 0) {
+               skb_put_u8(skb_out, 0); /* force short packet */
+diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
+index 39e5ab261d7ce..4be2a5cf022c8 100644
+--- a/drivers/net/wan/Kconfig
++++ b/drivers/net/wan/Kconfig
+@@ -282,6 +282,7 @@ config SLIC_DS26522
+       tristate "Slic Maxim ds26522 card support"
+       depends on SPI
+       depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
++      select BITREVERSE
+       help
+         This module initializes and configures the slic maxim card
+         in T1 or E1 mode.
+diff --git a/drivers/net/wireless/ath/wil6210/Kconfig 
b/drivers/net/wireless/ath/wil6210/Kconfig
+index 6a95b199bf626..f074e9c31aa22 100644
+--- a/drivers/net/wireless/ath/wil6210/Kconfig
++++ b/drivers/net/wireless/ath/wil6210/Kconfig
+@@ -2,6 +2,7 @@
+ config WIL6210
+       tristate "Wilocity 60g WiFi card wil6210 support"
+       select WANT_DEV_COREDUMP
++      select CRC32
+       depends on CFG80211
+       depends on PCI
+       default n
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index c0c33320fe659..9aa3d9e91c5d1 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct 
nvme_tcp_request *req,
+       }
+ }
+ 
++static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
++{
++      int ret;
++
++      /* drain the send queue as much as we can... */
++      do {
++              ret = nvme_tcp_try_send(queue);
++      } while (ret > 0);
++}
++
+ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+               bool sync, bool last)
+ {
+@@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct 
nvme_tcp_request *req,
+       if (queue->io_cpu == smp_processor_id() &&
+           sync && empty && mutex_trylock(&queue->send_mutex)) {
+               queue->more_requests = !last;
+-              nvme_tcp_try_send(queue);
++              nvme_tcp_send_all(queue);
+               queue->more_requests = false;
+               mutex_unlock(&queue->send_mutex);
+       } else if (last) {
+diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
+index 942f72d8151da..deb429a3dff1d 100644
+--- a/drivers/ptp/Kconfig
++++ b/drivers/ptp/Kconfig
+@@ -64,6 +64,7 @@ config DP83640_PHY
+       depends on NETWORK_PHY_TIMESTAMPING
+       depends on PHYLIB
+       depends on PTP_1588_CLOCK
++      select CRC32
+       help
+         Supports the DP83640 PHYTER with IEEE 1588 features.
+ 
+@@ -78,6 +79,7 @@ config DP83640_PHY
+ config PTP_1588_CLOCK_INES
+       tristate "ZHAW InES PTP time stamping IP core"
+       depends on NETWORK_PHY_TIMESTAMPING
++      depends on HAS_IOMEM
+       depends on PHYLIB
+       depends on PTP_1588_CLOCK
+       help
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c 
b/drivers/regulator/qcom-rpmh-regulator.c
+index d488325499a9f..a22c4b5f64f7e 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
+ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_ops,
+-      .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
++      .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
+       .n_voltages = 5,
+       .pmic_mode_map = pmic_mode_map_pmic5_smps,
+       .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index b235393e091ca..2f7e06ec9a30e 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -1075,7 +1075,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
+ void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
+                             int clear_start_mask);
+ int qeth_threads_running(struct qeth_card *, unsigned long);
+-int qeth_set_offline(struct qeth_card *card, bool resetting);
++int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline 
*disc,
++                   bool resetting);
+ 
+ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
+                 int (*reply_cb)
+diff --git a/drivers/s390/net/qeth_core_main.c 
b/drivers/s390/net/qeth_core_main.c
+index e27319de7b00b..f108232498baf 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -5300,12 +5300,12 @@ out:
+       return rc;
+ }
+ 
+-static int qeth_set_online(struct qeth_card *card)
++static int qeth_set_online(struct qeth_card *card,
++                         const struct qeth_discipline *disc)
+ {
+       bool carrier_ok;
+       int rc;
+ 
+-      mutex_lock(&card->discipline_mutex);
+       mutex_lock(&card->conf_mutex);
+       QETH_CARD_TEXT(card, 2, "setonlin");
+ 
+@@ -5322,7 +5322,7 @@ static int qeth_set_online(struct qeth_card *card)
+               /* no need for locking / error handling at this early stage: */
+               qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
+ 
+-      rc = card->discipline->set_online(card, carrier_ok);
++      rc = disc->set_online(card, carrier_ok);
+       if (rc)
+               goto err_online;
+ 
+@@ -5330,7 +5330,6 @@ static int qeth_set_online(struct qeth_card *card)
+       kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
+ 
+       mutex_unlock(&card->conf_mutex);
+-      mutex_unlock(&card->discipline_mutex);
+       return 0;
+ 
+ err_online:
+@@ -5345,15 +5344,14 @@ err_hardsetup:
+       qdio_free(CARD_DDEV(card));
+ 
+       mutex_unlock(&card->conf_mutex);
+-      mutex_unlock(&card->discipline_mutex);
+       return rc;
+ }
+ 
+-int qeth_set_offline(struct qeth_card *card, bool resetting)
++int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline 
*disc,
++                   bool resetting)
+ {
+       int rc, rc2, rc3;
+ 
+-      mutex_lock(&card->discipline_mutex);
+       mutex_lock(&card->conf_mutex);
+       QETH_CARD_TEXT(card, 3, "setoffl");
+ 
+@@ -5374,7 +5372,7 @@ int qeth_set_offline(struct qeth_card *card, bool 
resetting)
+ 
+       cancel_work_sync(&card->rx_mode_work);
+ 
+-      card->discipline->set_offline(card);
++      disc->set_offline(card);
+ 
+       qeth_qdio_clear_card(card, 0);
+       qeth_drain_output_queues(card);
+@@ -5395,16 +5393,19 @@ int qeth_set_offline(struct qeth_card *card, bool 
resetting)
+       kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
+ 
+       mutex_unlock(&card->conf_mutex);
+-      mutex_unlock(&card->discipline_mutex);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(qeth_set_offline);
+ 
+ static int qeth_do_reset(void *data)
+ {
++      const struct qeth_discipline *disc;
+       struct qeth_card *card = data;
+       int rc;
+ 
++      /* Lock-free, other users will block until we are done. */
++      disc = card->discipline;
++
+       QETH_CARD_TEXT(card, 2, "recover1");
+       if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+               return 0;
+@@ -5412,8 +5413,8 @@ static int qeth_do_reset(void *data)
+       dev_warn(&card->gdev->dev,
+                "A recovery process has been started for the device\n");
+ 
+-      qeth_set_offline(card, true);
+-      rc = qeth_set_online(card);
++      qeth_set_offline(card, disc, true);
++      rc = qeth_set_online(card, disc);
+       if (!rc) {
+               dev_info(&card->gdev->dev,
+                        "Device successfully recovered!\n");
+@@ -6360,6 +6361,7 @@ static int qeth_core_probe_device(struct ccwgroup_device 
*gdev)
+               break;
+       default:
+               card->info.layer_enforced = true;
++              /* It's so early that we don't need the discipline_mutex yet. */
+               rc = qeth_core_load_discipline(card, enforced_disc);
+               if (rc)
+                       goto err_load;
+@@ -6392,10 +6394,12 @@ static void qeth_core_remove_device(struct 
ccwgroup_device *gdev)
+ 
+       QETH_CARD_TEXT(card, 2, "removedv");
+ 
++      mutex_lock(&card->discipline_mutex);
+       if (card->discipline) {
+               card->discipline->remove(gdev);
+               qeth_core_free_discipline(card);
+       }
++      mutex_unlock(&card->discipline_mutex);
+ 
+       qeth_free_qdio_queues(card);
+ 
+@@ -6410,6 +6414,7 @@ static int qeth_core_set_online(struct ccwgroup_device 
*gdev)
+       int rc = 0;
+       enum qeth_discipline_id def_discipline;
+ 
++      mutex_lock(&card->discipline_mutex);
+       if (!card->discipline) {
+               def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
+                                               QETH_DISCIPLINE_LAYER2;
+@@ -6423,16 +6428,23 @@ static int qeth_core_set_online(struct ccwgroup_device 
*gdev)
+               }
+       }
+ 
+-      rc = qeth_set_online(card);
++      rc = qeth_set_online(card, card->discipline);
++
+ err:
++      mutex_unlock(&card->discipline_mutex);
+       return rc;
+ }
+ 
+ static int qeth_core_set_offline(struct ccwgroup_device *gdev)
+ {
+       struct qeth_card *card = dev_get_drvdata(&gdev->dev);
++      int rc;
+ 
+-      return qeth_set_offline(card, false);
++      mutex_lock(&card->discipline_mutex);
++      rc = qeth_set_offline(card, card->discipline, false);
++      mutex_unlock(&card->discipline_mutex);
++
++      return rc;
+ }
+ 
+ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 79939ba5d5235..cfc931f2b7e2c 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device 
*gdev)
+       wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+ 
+       if (gdev->state == CCWGROUP_ONLINE)
+-              qeth_set_offline(card, false);
++              qeth_set_offline(card, card->discipline, false);
+ 
+       cancel_work_sync(&card->close_dev_work);
+       if (card->dev->reg_state == NETREG_REGISTERED)
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index b1c1d2510d55b..291861c9b9569 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -1816,7 +1816,7 @@ static netdev_features_t 
qeth_l3_osa_features_check(struct sk_buff *skb,
+                                                   struct net_device *dev,
+                                                   netdev_features_t features)
+ {
+-      if (qeth_get_ip_version(skb) != 4)
++      if (vlan_get_protocol(skb) != htons(ETH_P_IP))
+               features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+       return qeth_features_check(skb, dev, features);
+ }
+@@ -1974,7 +1974,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device 
*cgdev)
+       wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+ 
+       if (cgdev->state == CCWGROUP_ONLINE)
+-              qeth_set_offline(card, false);
++              qeth_set_offline(card, card->discipline, false);
+ 
+       cancel_work_sync(&card->close_dev_work);
+       if (card->dev->reg_state == NETREG_REGISTERED)
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index fcaafa564dfcd..f103340820c66 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -10459,7 +10459,6 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+                          struct lpfc_nodelist *ndlp,
+                          struct sli4_wcqe_xri_aborted *axri)
+ {
+-      struct lpfc_vport *vport;
+       uint32_t ext_status = 0;
+ 
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+@@ -10469,7 +10468,6 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+               return;
+       }
+ 
+-      vport = ndlp->vport;
+       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                       "3116 Port generated FCP XRI ABORT event on "
+                       "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 02f161468daf5..7558b4abebfc5 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -7651,7 +7651,7 @@ static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 
wlun)
+       else if (wlun == UFS_UPIU_RPMB_WLUN)
+               sdp = hba->sdev_rpmb;
+       else
+-              BUG_ON(1);
++              BUG();
+       if (sdp) {
+               ret = scsi_device_get(sdp);
+               if (!ret && !scsi_device_online(sdp)) {
+diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
+index 0e3d8e6c08f42..01ef79f15b024 100644
+--- a/drivers/spi/spi-geni-qcom.c
++++ b/drivers/spi/spi-geni-qcom.c
+@@ -83,6 +83,7 @@ struct spi_geni_master {
+       spinlock_t lock;
+       int irq;
+       bool cs_flag;
++      bool abort_failed;
+ };
+ 
+ static int get_spi_clk_cfg(unsigned int speed_hz,
+@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
+       spin_unlock_irq(&mas->lock);
+ 
+       time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
+-      if (!time_left)
++      if (!time_left) {
+               dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
++
++              /*
++               * No need for a lock since SPI core has a lock and we never
++               * access this from an interrupt.
++               */
++              mas->abort_failed = true;
++      }
++}
++
++static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
++{
++      struct geni_se *se = &mas->se;
++      u32 m_irq, m_irq_en;
++
++      if (!mas->abort_failed)
++              return false;
++
++      /*
++       * The only known case where a transfer times out and then a cancel
++       * times out then an abort times out is if something is blocking our
++       * interrupt handler from running.  Avoid starting any new transfers
++       * until that sorts itself out.
++       */
++      spin_lock_irq(&mas->lock);
++      m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
++      m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
++      spin_unlock_irq(&mas->lock);
++
++      if (m_irq & m_irq_en) {
++              dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
++                      m_irq & m_irq_en);
++              return true;
++      }
++
++      /*
++       * If we're here the problem resolved itself so no need to check more
++       * on future transfers.
++       */
++      mas->abort_failed = false;
++
++      return false;
+ }
+ 
+ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
+@@ -158,9 +200,15 @@ static void spi_geni_set_cs(struct spi_device *slv, bool 
set_flag)
+       if (set_flag == mas->cs_flag)
+               return;
+ 
++      pm_runtime_get_sync(mas->dev);
++
++      if (spi_geni_is_abort_still_pending(mas)) {
++              dev_err(mas->dev, "Can't set chip select\n");
++              goto exit;
++      }
++
+       mas->cs_flag = set_flag;
+ 
+-      pm_runtime_get_sync(mas->dev);
+       spin_lock_irq(&mas->lock);
+       reinit_completion(&mas->cs_done);
+       if (set_flag)
+@@ -173,6 +221,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool 
set_flag)
+       if (!time_left)
+               handle_fifo_timeout(spi, NULL);
+ 
++exit:
+       pm_runtime_put(mas->dev);
+ }
+ 
+@@ -280,6 +329,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
+       int ret;
+       struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ 
++      if (spi_geni_is_abort_still_pending(mas))
++              return -EBUSY;
++
+       ret = setup_fifo_params(spi_msg->spi, spi);
+       if (ret)
+               dev_err(mas->dev, "Couldn't select mode %d\n", ret);
+@@ -354,6 +406,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master 
*mas)
+       unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
+       unsigned int i = 0;
+ 
++      /* Stop the watermark IRQ if nothing to send */
++      if (!mas->cur_xfer) {
++              writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
++              return false;
++      }
++
+       max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
+       if (mas->tx_rem_bytes < max_bytes)
+               max_bytes = mas->tx_rem_bytes;
+@@ -396,6 +454,14 @@ static void geni_spi_handle_rx(struct spi_geni_master 
*mas)
+               if (rx_last_byte_valid && rx_last_byte_valid < 4)
+                       rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
+       }
++
++      /* Clear out the FIFO and bail if nowhere to put it */
++      if (!mas->cur_xfer) {
++              for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); 
i++)
++                      readl(se->base + SE_GENI_RX_FIFOn);
++              return;
++      }
++
+       if (mas->rx_rem_bytes < rx_bytes)
+               rx_bytes = mas->rx_rem_bytes;
+ 
+@@ -495,6 +561,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
+ {
+       struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ 
++      if (spi_geni_is_abort_still_pending(mas))
++              return -EBUSY;
++
+       /* Terminate and return success for 0 byte length transfer */
+       if (!xfer->len)
+               return 0;
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 471dedf3d3392..6017209c6d2f7 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi 
*spi, u32 xfer_len)
+ 
+       /* align packet size with data registers access */
+       if (spi->cur_bpw > 8)
+-              fthlv -= (fthlv % 2); /* multiple of 2 */
++              fthlv += (fthlv % 2) ? 1 : 0;
+       else
+-              fthlv -= (fthlv % 4); /* multiple of 4 */
++              fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
+ 
+       if (!fthlv)
+               fthlv = 1;
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 92dd86bceae31..8de4bf8edb9c0 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -35,6 +35,22 @@ enum {
+       BTRFS_INODE_IN_DELALLOC_LIST,
+       BTRFS_INODE_HAS_PROPS,
+       BTRFS_INODE_SNAPSHOT_FLUSH,
++      /*
++       * Set and used when logging an inode and it serves to signal that an
++       * inode does not have xattrs, so subsequent fsyncs can avoid searching
++       * for xattrs to log. This bit must be cleared whenever a xattr is added
++       * to an inode.
++       */
++      BTRFS_INODE_NO_XATTRS,
++      /*
++       * Set when we are in a context where we need to start a transaction and
++       * have dirty pages with the respective file range locked. This is to
++       * ensure that when reserving space for the transaction, if we are low
++       * on available space and need to flush delalloc, we will not flush
++       * delalloc for this inode, because that could result in a deadlock (on
++       * the file range, inode's io_tree).
++       */
++      BTRFS_INODE_NO_DELALLOC_FLUSH,
+ };
+ 
+ /* in memory btrfs inode */
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 62461239600fc..e01545538e07f 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3001,7 +3001,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle 
*trans,
+                              u32 min_type);
+ 
+ int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
+-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr);
++int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
++                             bool in_reclaim_context);
+ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
+                             unsigned int extra_bits,
+                             struct extent_state **cached_state);
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 10638537b9ef3..d297804631829 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -703,7 +703,7 @@ static int btrfs_dev_replace_finishing(struct 
btrfs_fs_info *fs_info,
+        * flush all outstanding I/O and inode extent mappings before the
+        * copy operation is declared as being finished
+        */
+-      ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
++      ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
+       if (ret) {
+               mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+               return ret;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 7e8d8169779d2..acc47e2ffb46b 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9389,7 +9389,9 @@ static struct btrfs_delalloc_work 
*btrfs_alloc_delalloc_work(struct inode *inode
+  * some fairly slow code that needs optimization. This walks the list
+  * of all the inodes with pending delalloc and forces them to disk.
+  */
+-static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool 
snapshot)
++static int start_delalloc_inodes(struct btrfs_root *root,
++                               struct writeback_control *wbc, bool snapshot,
++                               bool in_reclaim_context)
+ {
+       struct btrfs_inode *binode;
+       struct inode *inode;
+@@ -9397,6 +9399,7 @@ static int start_delalloc_inodes(struct btrfs_root 
*root, u64 *nr, bool snapshot
+       struct list_head works;
+       struct list_head splice;
+       int ret = 0;
++      bool full_flush = wbc->nr_to_write == LONG_MAX;
+ 
+       INIT_LIST_HEAD(&works);
+       INIT_LIST_HEAD(&splice);
+@@ -9410,6 +9413,11 @@ static int start_delalloc_inodes(struct btrfs_root 
*root, u64 *nr, bool snapshot
+ 
+               list_move_tail(&binode->delalloc_inodes,
+                              &root->delalloc_inodes);
++
++              if (in_reclaim_context &&
++                  test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, 
&binode->runtime_flags))
++                      continue;
++
+               inode = igrab(&binode->vfs_inode);
+               if (!inode) {
+                       cond_resched_lock(&root->delalloc_lock);
+@@ -9420,18 +9428,24 @@ static int start_delalloc_inodes(struct btrfs_root 
*root, u64 *nr, bool snapshot
+               if (snapshot)
+                       set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+                               &binode->runtime_flags);
+-              work = btrfs_alloc_delalloc_work(inode);
+-              if (!work) {
+-                      iput(inode);
+-                      ret = -ENOMEM;
+-                      goto out;
+-              }
+-              list_add_tail(&work->list, &works);
+-              btrfs_queue_work(root->fs_info->flush_workers,
+-                               &work->work);
+-              if (*nr != U64_MAX) {
+-                      (*nr)--;
+-                      if (*nr == 0)
++              if (full_flush) {
++                      work = btrfs_alloc_delalloc_work(inode);
++                      if (!work) {
++                              iput(inode);
++                              ret = -ENOMEM;
++                              goto out;
++                      }
++                      list_add_tail(&work->list, &works);
++                      btrfs_queue_work(root->fs_info->flush_workers,
++                                       &work->work);
++              } else {
++                      ret = sync_inode(inode, wbc);
++                      if (!ret &&
++                          test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
++                                   &BTRFS_I(inode)->runtime_flags))
++                              ret = sync_inode(inode, wbc);
++                      btrfs_add_delayed_iput(inode);
++                      if (ret || wbc->nr_to_write <= 0)
+                               goto out;
+               }
+               cond_resched();
+@@ -9457,17 +9471,29 @@ out:
+ 
+ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
+ {
++      struct writeback_control wbc = {
++              .nr_to_write = LONG_MAX,
++              .sync_mode = WB_SYNC_NONE,
++              .range_start = 0,
++              .range_end = LLONG_MAX,
++      };
+       struct btrfs_fs_info *fs_info = root->fs_info;
+-      u64 nr = U64_MAX;
+ 
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+               return -EROFS;
+ 
+-      return start_delalloc_inodes(root, &nr, true);
++      return start_delalloc_inodes(root, &wbc, true, false);
+ }
+ 
+-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
++int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
++                             bool in_reclaim_context)
+ {
++      struct writeback_control wbc = {
++              .nr_to_write = (nr == U64_MAX) ? LONG_MAX : (unsigned long)nr,
++              .sync_mode = WB_SYNC_NONE,
++              .range_start = 0,
++              .range_end = LLONG_MAX,
++      };
+       struct btrfs_root *root;
+       struct list_head splice;
+       int ret;
+@@ -9481,6 +9507,13 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info 
*fs_info, u64 nr)
+       spin_lock(&fs_info->delalloc_root_lock);
+       list_splice_init(&fs_info->delalloc_roots, &splice);
+       while (!list_empty(&splice) && nr) {
++              /*
++               * Reset nr_to_write here so we know that we're doing a full
++               * flush.
++               */
++              if (nr == U64_MAX)
++                      wbc.nr_to_write = LONG_MAX;
++
+               root = list_first_entry(&splice, struct btrfs_root,
+                                       delalloc_root);
+               root = btrfs_grab_root(root);
+@@ -9489,9 +9522,9 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info 
*fs_info, u64 nr)
+                              &fs_info->delalloc_roots);
+               spin_unlock(&fs_info->delalloc_root_lock);
+ 
+-              ret = start_delalloc_inodes(root, &nr, false);
++              ret = start_delalloc_inodes(root, &wbc, false, 
in_reclaim_context);
+               btrfs_put_root(root);
+-              if (ret < 0)
++              if (ret < 0 || wbc.nr_to_write <= 0)
+                       goto out;
+               spin_lock(&fs_info->delalloc_root_lock);
+       }
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index e8ca229a216be..bd46e107f955e 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4940,7 +4940,7 @@ long btrfs_ioctl(struct file *file, unsigned int
+       case BTRFS_IOC_SYNC: {
+               int ret;
+ 
+-              ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
++              ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
+               if (ret)
+                       return ret;
+               ret = btrfs_sync_fs(inode->i_sb, 1);
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index 99aa87c089121..a646af95dd100 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -89,6 +89,19 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
+       if (ret)
+               goto out_unlock;
+ 
++      /*
++       * After dirtying the page our caller will need to start a transaction,
++       * and if we are low on metadata free space, that can cause flushing of
++       * delalloc for all inodes in order to get metadata space released.
++       * However we are holding the range locked for the whole duration of
++       * the clone/dedupe operation, so we may deadlock if that happens and no
++       * other task releases enough space. So mark this inode as not being
++       * possible to flush to avoid such deadlock. We will clear that flag
++       * when we finish cloning all extents, since a transaction is started
++       * after finding each extent to clone.
++       */
++      set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
++
+       if (comp_type == BTRFS_COMPRESS_NONE) {
+               char *map;
+ 
+@@ -547,6 +560,8 @@ process_slot:
+ out:
+       btrfs_free_path(path);
+       kvfree(buf);
++      clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, 
&BTRFS_I(inode)->runtime_flags);
++
+       return ret;
+ }
+ 
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 64099565ab8f5..e8347461c8ddd 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -532,7 +532,9 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
+ 
+       loops = 0;
+       while ((delalloc_bytes || dio_bytes) && loops < 3) {
+-              btrfs_start_delalloc_roots(fs_info, items);
++              u64 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
++
++              btrfs_start_delalloc_roots(fs_info, nr_pages, true);
+ 
+               loops++;
+               if (wait_ordered && !trans) {
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 56cbc1706b6f7..5b11bb9770664 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4571,6 +4571,10 @@ static int btrfs_log_all_xattrs(struct 
btrfs_trans_handle *trans,
+       const u64 ino = btrfs_ino(inode);
+       int ins_nr = 0;
+       int start_slot = 0;
++      bool found_xattrs = false;
++
++      if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
++              return 0;
+ 
+       key.objectid = ino;
+       key.type = BTRFS_XATTR_ITEM_KEY;
+@@ -4609,6 +4613,7 @@ static int btrfs_log_all_xattrs(struct 
btrfs_trans_handle *trans,
+                       start_slot = slot;
+               ins_nr++;
+               path->slots[0]++;
++              found_xattrs = true;
+               cond_resched();
+       }
+       if (ins_nr > 0) {
+@@ -4618,6 +4623,9 @@ static int btrfs_log_all_xattrs(struct 
btrfs_trans_handle *trans,
+                       return ret;
+       }
+ 
++      if (!found_xattrs)
++              set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
++
+       return 0;
+ }
+ 
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 95d9aebff2c4b..e51774201d53b 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -213,9 +213,11 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, 
struct inode *inode,
+       }
+ out:
+       btrfs_free_path(path);
+-      if (!ret)
++      if (!ret) {
+               set_bit(BTRFS_INODE_COPY_EVERYTHING,
+                       &BTRFS_I(inode)->runtime_flags);
++              clear_bit(BTRFS_INODE_NO_XATTRS, 
&BTRFS_I(inode)->runtime_flags);
++      }
+       return ret;
+ }
+ 
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 1f798c5c4213e..4833b68f1a1cc 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1625,9 +1625,9 @@ static bool io_match_files(struct io_kiocb *req,
+ }
+ 
+ /* Returns true if there are no backlogged entries after the flush */
+-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+-                                   struct task_struct *tsk,
+-                                   struct files_struct *files)
++static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
++                                     struct task_struct *tsk,
++                                     struct files_struct *files)
+ {
+       struct io_rings *rings = ctx->rings;
+       struct io_kiocb *req, *tmp;
+@@ -1681,6 +1681,20 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx 
*ctx, bool force,
+       return cqe != NULL;
+ }
+ 
++static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
++                                   struct task_struct *tsk,
++                                   struct files_struct *files)
++{
++      if (test_bit(0, &ctx->cq_check_overflow)) {
++              /* iopoll syncs against uring_lock, not completion_lock */
++              if (ctx->flags & IORING_SETUP_IOPOLL)
++                      mutex_lock(&ctx->uring_lock);
++              __io_cqring_overflow_flush(ctx, force, tsk, files);
++              if (ctx->flags & IORING_SETUP_IOPOLL)
++                      mutex_unlock(&ctx->uring_lock);
++      }
++}
++
+ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long 
cflags)
+ {
+       struct io_ring_ctx *ctx = req->ctx;
+@@ -2047,14 +2061,15 @@ static void io_req_task_cancel(struct callback_head 
*cb)
+ static void __io_req_task_submit(struct io_kiocb *req)
+ {
+       struct io_ring_ctx *ctx = req->ctx;
++      bool fail;
+ 
+-      if (!__io_sq_thread_acquire_mm(ctx)) {
+-              mutex_lock(&ctx->uring_lock);
++      fail = __io_sq_thread_acquire_mm(ctx);
++      mutex_lock(&ctx->uring_lock);
++      if (!fail)
+               __io_queue_sqe(req, NULL);
+-              mutex_unlock(&ctx->uring_lock);
+-      } else {
++      else
+               __io_req_task_cancel(req, -EFAULT);
+-      }
++      mutex_unlock(&ctx->uring_lock);
+ }
+ 
+ static void io_req_task_submit(struct callback_head *cb)
+@@ -2234,22 +2249,10 @@ static void io_double_put_req(struct io_kiocb *req)
+               io_free_req(req);
+ }
+ 
+-static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
++static unsigned io_cqring_events(struct io_ring_ctx *ctx)
+ {
+       struct io_rings *rings = ctx->rings;
+ 
+-      if (test_bit(0, &ctx->cq_check_overflow)) {
+-              /*
+-               * noflush == true is from the waitqueue handler, just ensure
+-               * we wake up the task, and the next invocation will flush the
+-               * entries. We cannot safely to it from here.
+-               */
+-              if (noflush)
+-                      return -1U;
+-
+-              io_cqring_overflow_flush(ctx, false, NULL, NULL);
+-      }
+-
+       /* See comment at the top of this file */
+       smp_rmb();
+       return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
+@@ -2474,7 +2477,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long 
min)
+                * If we do, we can potentially be spinning for commands that
+                * already triggered a CQE (eg in error).
+                */
+-              if (io_cqring_events(ctx, false))
++              if (test_bit(0, &ctx->cq_check_overflow))
++                      __io_cqring_overflow_flush(ctx, false, NULL, NULL);
++              if (io_cqring_events(ctx))
+                       break;
+ 
+               /*
+@@ -6577,7 +6582,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, 
unsigned int nr)
+ 
+       /* if we have a backlog and couldn't flush it all, return BUSY */
+       if (test_bit(0, &ctx->sq_check_overflow)) {
+-              if (!io_cqring_overflow_flush(ctx, false, NULL, NULL))
++              if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
+                       return -EBUSY;
+       }
+ 
+@@ -6866,7 +6871,7 @@ struct io_wait_queue {
+       unsigned nr_timeouts;
+ };
+ 
+-static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
++static inline bool io_should_wake(struct io_wait_queue *iowq)
+ {
+       struct io_ring_ctx *ctx = iowq->ctx;
+ 
+@@ -6875,7 +6880,7 @@ static inline bool io_should_wake(struct io_wait_queue 
*iowq, bool noflush)
+        * started waiting. For timeouts, we always want to return to userspace,
+        * regardless of event count.
+        */
+-      return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
++      return io_cqring_events(ctx) >= iowq->to_wait ||
+                       atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
+ }
+ 
+@@ -6885,11 +6890,13 @@ static int io_wake_function(struct wait_queue_entry 
*curr, unsigned int mode,
+       struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
+                                                       wq);
+ 
+-      /* use noflush == true, as we can't safely rely on locking context */
+-      if (!io_should_wake(iowq, true))
+-              return -1;
+-
+-      return autoremove_wake_function(curr, mode, wake_flags, key);
++      /*
++       * Cannot safely flush overflowed CQEs from here, ensure we wake up
++       * the task, and the next invocation will do it.
++       */
++      if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
++              return autoremove_wake_function(curr, mode, wake_flags, key);
++      return -1;
+ }
+ 
+ static int io_run_task_work_sig(void)
+@@ -6928,7 +6935,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int 
min_events,
+       int ret = 0;
+ 
+       do {
+-              if (io_cqring_events(ctx, false) >= min_events)
++              io_cqring_overflow_flush(ctx, false, NULL, NULL);
++              if (io_cqring_events(ctx) >= min_events)
+                       return 0;
+               if (!io_run_task_work())
+                       break;
+@@ -6950,6 +6958,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int 
min_events,
+       iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+       trace_io_uring_cqring_wait(ctx, min_events);
+       do {
++              io_cqring_overflow_flush(ctx, false, NULL, NULL);
+               prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+                                               TASK_INTERRUPTIBLE);
+               /* make sure we run task_work before checking for signals */
+@@ -6958,8 +6967,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int 
min_events,
+                       continue;
+               else if (ret < 0)
+                       break;
+-              if (io_should_wake(&iowq, false))
++              if (io_should_wake(&iowq))
+                       break;
++              if (test_bit(0, &ctx->cq_check_overflow))
++                      continue;
+               schedule();
+       } while (1);
+       finish_wait(&ctx->wait, &iowq.wq);
+@@ -7450,12 +7461,12 @@ static struct fixed_file_ref_node 
*alloc_fixed_file_ref_node(
+ 
+       ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
+       if (!ref_node)
+-              return ERR_PTR(-ENOMEM);
++              return NULL;
+ 
+       if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
+                           0, GFP_KERNEL)) {
+               kfree(ref_node);
+-              return ERR_PTR(-ENOMEM);
++              return NULL;
+       }
+       INIT_LIST_HEAD(&ref_node->node);
+       INIT_LIST_HEAD(&ref_node->file_list);
+@@ -7549,9 +7560,9 @@ static int io_sqe_files_register(struct io_ring_ctx 
*ctx, void __user *arg,
+       }
+ 
+       ref_node = alloc_fixed_file_ref_node(ctx);
+-      if (IS_ERR(ref_node)) {
++      if (!ref_node) {
+               io_sqe_files_unregister(ctx);
+-              return PTR_ERR(ref_node);
++              return -ENOMEM;
+       }
+ 
+       io_sqe_files_set_node(file_data, ref_node);
+@@ -7651,8 +7662,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+               return -EINVAL;
+ 
+       ref_node = alloc_fixed_file_ref_node(ctx);
+-      if (IS_ERR(ref_node))
+-              return PTR_ERR(ref_node);
++      if (!ref_node)
++              return -ENOMEM;
+ 
+       done = 0;
+       fds = u64_to_user_ptr(up->fds);
+@@ -8384,7 +8395,8 @@ static __poll_t io_uring_poll(struct file *file, 
poll_table *wait)
+       smp_rmb();
+       if (!io_sqring_full(ctx))
+               mask |= EPOLLOUT | EPOLLWRNORM;
+-      if (io_cqring_events(ctx, false))
++      io_cqring_overflow_flush(ctx, false, NULL, NULL);
++      if (io_cqring_events(ctx))
+               mask |= EPOLLIN | EPOLLRDNORM;
+ 
+       return mask;
+@@ -8442,7 +8454,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx 
*ctx)
+       /* if force is set, the ring is going away. always drop after that */
+       ctx->cq_overflow_flushed = 1;
+       if (ctx->rings)
+-              io_cqring_overflow_flush(ctx, true, NULL, NULL);
++              __io_cqring_overflow_flush(ctx, true, NULL, NULL);
+       mutex_unlock(&ctx->uring_lock);
+ 
+       io_kill_timeouts(ctx, NULL);
+@@ -8715,9 +8727,7 @@ static void io_uring_cancel_task_requests(struct 
io_ring_ctx *ctx,
+       }
+ 
+       io_cancel_defer_files(ctx, task, files);
+-      io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
+       io_cqring_overflow_flush(ctx, true, task, files);
+-      io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
+ 
+       while (__io_uring_cancel_task_requests(ctx, task, files)) {
+               io_run_task_work();
+@@ -9023,10 +9033,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, 
to_submit,
+        */
+       ret = 0;
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+-              io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
+-              if (!list_empty_careful(&ctx->cq_overflow_list))
+-                      io_cqring_overflow_flush(ctx, false, NULL, NULL);
+-              io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
++              io_cqring_overflow_flush(ctx, false, NULL, NULL);
++
+               if (flags & IORING_ENTER_SQ_WAKEUP)
+                       wake_up(&ctx->sq_data->wait);
+               if (flags & IORING_ENTER_SQ_WAIT)
+diff --git a/fs/notify/fanotify/fanotify_user.c 
b/fs/notify/fanotify/fanotify_user.c
+index 3e01d8f2ab906..dcab112e1f001 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -1285,26 +1285,23 @@ fput_and_out:
+       return ret;
+ }
+ 
++#ifndef CONFIG_ARCH_SPLIT_ARG64
+ SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
+                             __u64, mask, int, dfd,
+                             const char  __user *, pathname)
+ {
+       return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
+ }
++#endif
+ 
+-#ifdef CONFIG_COMPAT
+-COMPAT_SYSCALL_DEFINE6(fanotify_mark,
++#if defined(CONFIG_ARCH_SPLIT_ARG64) || defined(CONFIG_COMPAT)
++SYSCALL32_DEFINE6(fanotify_mark,
+                               int, fanotify_fd, unsigned int, flags,
+-                              __u32, mask0, __u32, mask1, int, dfd,
++                              SC_ARG64(mask), int, dfd,
+                               const char  __user *, pathname)
+ {
+-      return do_fanotify_mark(fanotify_fd, flags,
+-#ifdef __BIG_ENDIAN
+-                              ((__u64)mask0 << 32) | mask1,
+-#else
+-                              ((__u64)mask1 << 32) | mask0,
+-#endif
+-                               dfd, pathname);
++      return do_fanotify_mark(fanotify_fd, flags, SC_VAL64(__u64, mask),
++                              dfd, pathname);
+ }
+ #endif
+ 
+diff --git a/fs/zonefs/Kconfig b/fs/zonefs/Kconfig
+index ef2697b78820d..827278f937fe7 100644
+--- a/fs/zonefs/Kconfig
++++ b/fs/zonefs/Kconfig
+@@ -3,6 +3,7 @@ config ZONEFS_FS
+       depends on BLOCK
+       depends on BLK_DEV_ZONED
+       select FS_IOMAP
++      select CRC32
+       help
+         zonefs is a simple file system which exposes zones of a zoned block
+         device (e.g. host-managed or host-aware SMR disk drives) as files.
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 37bea07c12f21..aea0ce9f3b745 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -251,6 +251,30 @@ static inline int is_syscall_trace_event(struct 
trace_event_call *tp_event)
+       static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+ #endif /* __SYSCALL_DEFINEx */
+ 
++/* For split 64-bit arguments on 32-bit architectures */
++#ifdef __LITTLE_ENDIAN
++#define SC_ARG64(name) u32, name##_lo, u32, name##_hi
++#else
++#define SC_ARG64(name) u32, name##_hi, u32, name##_lo
++#endif
++#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo)
++
++#ifdef CONFIG_COMPAT
++#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1
++#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2
++#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3
++#define SYSCALL32_DEFINE4 COMPAT_SYSCALL_DEFINE4
++#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5
++#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6
++#else
++#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1
++#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2
++#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3
++#define SYSCALL32_DEFINE4 SYSCALL_DEFINE4
++#define SYSCALL32_DEFINE5 SYSCALL_DEFINE5
++#define SYSCALL32_DEFINE6 SYSCALL_DEFINE6
++#endif
++
+ /*
+  * Called before coming back to user-mode. Returning to user-mode with an
+  * address limit different than USER_DS can allow to overwrite kernel memory.
+diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
+index 4f4e93bf814c3..cc17bc9575482 100644
+--- a/include/net/xdp_sock.h
++++ b/include/net/xdp_sock.h
+@@ -58,10 +58,6 @@ struct xdp_sock {
+ 
+       struct xsk_queue *tx ____cacheline_aligned_in_smp;
+       struct list_head tx_list;
+-      /* Mutual exclusion of NAPI TX thread and sendmsg error paths
+-       * in the SKB destructor callback.
+-       */
+-      spinlock_t tx_completion_lock;
+       /* Protects generic receive. */
+       spinlock_t rx_lock;
+ 
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index 01755b838c745..eaa8386dbc630 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -73,6 +73,11 @@ struct xsk_buff_pool {
+       bool dma_need_sync;
+       bool unaligned;
+       void *addrs;
++      /* Mutual exclusion of the completion ring in the SKB mode. Two cases 
to protect:
++       * NAPI TX thread and sendmsg error paths in the SKB destructor 
callback and when
++       * sockets share a single cq when the same netdev and queue id is 
shared.
++       */
++      spinlock_t cq_lock;
+       struct xdp_buff_xsk *free_heads[];
+ };
+ 
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index f292e0267bb9e..15bbfaf943fd1 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -284,7 +284,8 @@ static int register_vlan_device(struct net_device 
*real_dev, u16 vlan_id)
+       return 0;
+ 
+ out_free_newdev:
+-      if (new_dev->reg_state == NETREG_UNINITIALIZED)
++      if (new_dev->reg_state == NETREG_UNINITIALIZED ||
++          new_dev->reg_state == NETREG_UNREGISTERED)
+               free_netdev(new_dev);
+       return err;
+ }
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 26bdc3c20b7e4..8bd565f2073e7 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -1139,6 +1139,7 @@ static int isotp_getname(struct socket *sock, struct 
sockaddr *uaddr, int peer)
+       if (peer)
+               return -EOPNOTSUPP;
+ 
++      memset(addr, 0, sizeof(*addr));
+       addr->can_family = AF_CAN;
+       addr->can_ifindex = so->ifindex;
+       addr->can_addr.tp.rx_id = so->rxid;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index e578544b2cc71..fbadd93b95ace 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2011,6 +2011,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned 
int len)
+               skb->csum = csum_block_sub(skb->csum,
+                                          skb_checksum(skb, len, delta, 0),
+                                          len);
++      } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++              int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
++              int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
++
++              if (offset + sizeof(__sum16) > hdlen)
++                      return -EINVAL;
+       }
+       return __pskb_trim(skb, len);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 879b76ae4435c..97975bed491ad 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -302,7 +302,7 @@ static int __ip_finish_output(struct net *net, struct sock 
*sk, struct sk_buff *
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(net, sk, skb, mtu);
+ 
+-      if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
++      if (skb->len > mtu || IPCB(skb)->frag_max_size)
+               return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
+ 
+       return ip_finish_output2(net, sk, skb);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index ee65c9225178d..64594aa755f05 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -759,8 +759,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+               goto tx_error;
+       }
+ 
+-      if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
+-                          0, 0, false)) {
++      df = tnl_params->frag_off;
++      if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
++              df |= (inner_iph->frag_off & htons(IP_DF));
++
++      if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
+               ip_rt_put(rt);
+               goto tx_error;
+       }
+@@ -788,10 +791,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+                       ttl = ip4_dst_hoplimit(&rt->dst);
+       }
+ 
+-      df = tnl_params->frag_off;
+-      if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+-              df |= (inner_iph->frag_off&htons(IP_DF));
+-
+       max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+                       + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+       if (max_headroom > dev->needed_headroom)
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index 0dc43ad28eb95..f63f7ada51b36 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -496,7 +496,7 @@ static int nh_check_attr_group(struct net *net, struct 
nlattr *tb[],
+       for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
+               if (!tb[i])
+                       continue;
+-              if (tb[NHA_FDB])
++              if (i == NHA_FDB)
+                       continue;
+               NL_SET_ERR_MSG(extack,
+                              "No other attributes can be set in nexthop 
groups");
+@@ -1277,8 +1277,10 @@ static struct nexthop *nexthop_create_group(struct net 
*net,
+       return nh;
+ 
+ out_no_nh:
+-      for (; i >= 0; --i)
++      for (i--; i >= 0; --i) {
++              list_del(&nhg->nh_entries[i].nh_list);
+               nexthop_put(nhg->nh_entries[i].nh);
++      }
+ 
+       kfree(nhg->spare);
+       kfree(nhg);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 605cdd38a919a..f43e275557251 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1025,6 +1025,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct 
fib6_node *fn,
+ {
+       struct fib6_table *table = rt->fib6_table;
+ 
++      /* Flush all cached dst in exception table */
++      rt6_flush_exceptions(rt);
+       fib6_drop_pcpu_from(rt, table);
+ 
+       if (rt->nh && !list_empty(&rt->nh_list))
+@@ -1927,9 +1929,6 @@ static void fib6_del_route(struct fib6_table *table, 
struct fib6_node *fn,
+       net->ipv6.rt6_stats->fib_rt_entries--;
+       net->ipv6.rt6_stats->fib_discarded_routes++;
+ 
+-      /* Flush all cached dst in exception table */
+-      rt6_flush_exceptions(rt);
+-
+       /* Reset round-robin state, if necessary */
+       if (rcu_access_pointer(fn->rr_ptr) == rt)
+               fn->rr_ptr = NULL;
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 189cfbbcccc04..d5f42c62fd79e 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -364,9 +364,9 @@ static void xsk_destruct_skb(struct sk_buff *skb)
+       struct xdp_sock *xs = xdp_sk(skb->sk);
+       unsigned long flags;
+ 
+-      spin_lock_irqsave(&xs->tx_completion_lock, flags);
++      spin_lock_irqsave(&xs->pool->cq_lock, flags);
+       xskq_prod_submit_addr(xs->pool->cq, addr);
+-      spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
++      spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ 
+       sock_wfree(skb);
+ }
+@@ -378,6 +378,7 @@ static int xsk_generic_xmit(struct sock *sk)
+       bool sent_frame = false;
+       struct xdp_desc desc;
+       struct sk_buff *skb;
++      unsigned long flags;
+       int err = 0;
+ 
+       mutex_lock(&xs->mutex);
+@@ -409,10 +410,13 @@ static int xsk_generic_xmit(struct sock *sk)
+                * if there is space in it. This avoids having to implement
+                * any buffering in the Tx path.
+                */
++              spin_lock_irqsave(&xs->pool->cq_lock, flags);
+               if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
++                      spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+                       kfree_skb(skb);
+                       goto out;
+               }
++              spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ 
+               skb->dev = xs->dev;
+               skb->priority = sk->sk_priority;
+@@ -424,6 +428,9 @@ static int xsk_generic_xmit(struct sock *sk)
+               if  (err == NETDEV_TX_BUSY) {
+                       /* Tell user-space to retry the send */
+                       skb->destructor = sock_wfree;
++                      spin_lock_irqsave(&xs->pool->cq_lock, flags);
++                      xskq_prod_cancel(xs->pool->cq);
++                      spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+                       /* Free skb without triggering the perf drop trace */
+                       consume_skb(skb);
+                       err = -EAGAIN;
+@@ -1197,7 +1204,6 @@ static int xsk_create(struct net *net, struct socket 
*sock, int protocol,
+       xs->state = XSK_READY;
+       mutex_init(&xs->mutex);
+       spin_lock_init(&xs->rx_lock);
+-      spin_lock_init(&xs->tx_completion_lock);
+ 
+       INIT_LIST_HEAD(&xs->map_list);
+       spin_lock_init(&xs->map_list_lock);
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index 46c2ae7d91d15..2ef6f926610ee 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -71,6 +71,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct 
xdp_sock *xs,
+       INIT_LIST_HEAD(&pool->free_list);
+       INIT_LIST_HEAD(&pool->xsk_tx_list);
+       spin_lock_init(&pool->xsk_tx_list_lock);
++      spin_lock_init(&pool->cq_lock);
+       refcount_set(&pool->users, 1);
+ 
+       pool->fq = xs->fq_tmp;
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index 9e71b9f27679b..ef6de0fb4e312 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -286,6 +286,11 @@ static inline bool xskq_prod_is_full(struct xsk_queue *q)
+       return !free_entries;
+ }
+ 
++static inline void xskq_prod_cancel(struct xsk_queue *q)
++{
++      q->cached_prod--;
++}
++
+ static inline int xskq_prod_reserve(struct xsk_queue *q)
+ {
+       if (xskq_prod_is_full(q))
+diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
+index 3fae61ef63396..ff3aa0cf39978 100644
+--- a/tools/bpf/bpftool/net.c
++++ b/tools/bpf/bpftool/net.c
+@@ -11,7 +11,6 @@
+ #include <bpf/bpf.h>
+ #include <bpf/libbpf.h>
+ #include <net/if.h>
+-#include <linux/if.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/socket.h>
+ #include <linux/tc_act/tc_bpf.h>
+diff --git a/tools/include/uapi/linux/fscrypt.h 
b/tools/include/uapi/linux/fscrypt.h
+index e5de603369381..9f4428be3e362 100644
+--- a/tools/include/uapi/linux/fscrypt.h
++++ b/tools/include/uapi/linux/fscrypt.h
+@@ -20,7 +20,6 @@
+ #define FSCRYPT_POLICY_FLAG_DIRECT_KEY                0x04
+ #define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64    0x08
+ #define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32    0x10
+-#define FSCRYPT_POLICY_FLAGS_VALID            0x1F
+ 
+ /* Encryption algorithms */
+ #define FSCRYPT_MODE_AES_256_XTS              1
+@@ -28,7 +27,7 @@
+ #define FSCRYPT_MODE_AES_128_CBC              5
+ #define FSCRYPT_MODE_AES_128_CTS              6
+ #define FSCRYPT_MODE_ADIANTUM                 9
+-#define __FSCRYPT_MODE_MAX                    9
++/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h 
*/
+ 
+ /*
+  * Legacy policy version; ad-hoc KDF and no key verification.
+@@ -177,7 +176,7 @@ struct fscrypt_get_key_status_arg {
+ #define FS_POLICY_FLAGS_PAD_32                FSCRYPT_POLICY_FLAGS_PAD_32
+ #define FS_POLICY_FLAGS_PAD_MASK      FSCRYPT_POLICY_FLAGS_PAD_MASK
+ #define FS_POLICY_FLAG_DIRECT_KEY     FSCRYPT_POLICY_FLAG_DIRECT_KEY
+-#define FS_POLICY_FLAGS_VALID         FSCRYPT_POLICY_FLAGS_VALID
++#define FS_POLICY_FLAGS_VALID         0x07    /* contains old flags only */
+ #define FS_ENCRYPTION_MODE_INVALID    0       /* never used */
+ #define FS_ENCRYPTION_MODE_AES_256_XTS        FSCRYPT_MODE_AES_256_XTS
+ #define FS_ENCRYPTION_MODE_AES_256_GCM        2       /* never used */
+diff --git a/tools/testing/selftests/bpf/Makefile 
b/tools/testing/selftests/bpf/Makefile
+index 136df8c102812..9359377aeb35c 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -146,6 +146,9 @@ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)               
                \
+                    /sys/kernel/btf/vmlinux                            \
+                    /boot/vmlinux-$(shell uname -r)
+ VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
++ifeq ($(VMLINUX_BTF),)
++$(error Cannot find a vmlinux for VMLINUX_BTF at any of 
"$(VMLINUX_BTF_PATHS)")
++endif
+ 
+ DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
+ 
+diff --git a/tools/testing/selftests/net/fib_nexthops.sh 
b/tools/testing/selftests/net/fib_nexthops.sh
+index eb693a3b7b4a1..4c7d33618437c 100755
+--- a/tools/testing/selftests/net/fib_nexthops.sh
++++ b/tools/testing/selftests/net/fib_nexthops.sh
+@@ -869,7 +869,7 @@ ipv6_torture()
+       pid3=$!
+       ip netns exec me ping -f 2001:db8:101::2 >/dev/null 2>&1 &
+       pid4=$!
+-      ip netns exec me mausezahn veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 
-c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
++      ip netns exec me mausezahn -6 veth1 -B 2001:db8:101::2 -A 
2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
+       pid5=$!
+ 
+       sleep 300
+diff --git a/tools/testing/selftests/net/pmtu.sh 
b/tools/testing/selftests/net/pmtu.sh
+index 6bbf69a28e128..3367fb5f2feff 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -162,7 +162,15 @@
+ # - list_flush_ipv6_exception
+ #     Using the same topology as in pmtu_ipv6, create exceptions, and check
+ #     they are shown when listing exception caches, gone after flushing them
+-
++#
++# - pmtu_ipv4_route_change
++#     Use the same topology as in pmtu_ipv4, but issue a route replacement
++#     command and delete the corresponding device afterward. This tests for
++#     proper cleanup of the PMTU exceptions by the route replacement path.
++#     Device unregistration should complete successfully
++#
++# - pmtu_ipv6_route_change
++#     Same as above but with IPv6
+ 
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
+@@ -224,7 +232,9 @@ tests="
+       cleanup_ipv4_exception          ipv4: cleanup of cached exceptions      
1
+       cleanup_ipv6_exception          ipv6: cleanup of cached exceptions      
1
+       list_flush_ipv4_exception       ipv4: list and flush cached exceptions  
1
+-      list_flush_ipv6_exception       ipv6: list and flush cached exceptions  
1"
++      list_flush_ipv6_exception       ipv6: list and flush cached exceptions  
1
++      pmtu_ipv4_route_change          ipv4: PMTU exception w/route replace    
1
++      pmtu_ipv6_route_change          ipv6: PMTU exception w/route replace    
1"
+ 
+ NS_A="ns-A"
+ NS_B="ns-B"
+@@ -1770,6 +1780,63 @@ test_list_flush_ipv6_exception() {
+       return ${fail}
+ }
+ 
++test_pmtu_ipvX_route_change() {
++      family=${1}
++
++      setup namespaces routing || return 2
++      trace "${ns_a}"  veth_A-R1    "${ns_r1}" veth_R1-A \
++            "${ns_r1}" veth_R1-B    "${ns_b}"  veth_B-R1 \
++            "${ns_a}"  veth_A-R2    "${ns_r2}" veth_R2-A \
++            "${ns_r2}" veth_R2-B    "${ns_b}"  veth_B-R2
++
++      if [ ${family} -eq 4 ]; then
++              ping=ping
++              dst1="${prefix4}.${b_r1}.1"
++              dst2="${prefix4}.${b_r2}.1"
++              gw="${prefix4}.${a_r1}.2"
++      else
++              ping=${ping6}
++              dst1="${prefix6}:${b_r1}::1"
++              dst2="${prefix6}:${b_r2}::1"
++              gw="${prefix6}:${a_r1}::2"
++      fi
++
++      # Set up initial MTU values
++      mtu "${ns_a}"  veth_A-R1 2000
++      mtu "${ns_r1}" veth_R1-A 2000
++      mtu "${ns_r1}" veth_R1-B 1400
++      mtu "${ns_b}"  veth_B-R1 1400
++
++      mtu "${ns_a}"  veth_A-R2 2000
++      mtu "${ns_r2}" veth_R2-A 2000
++      mtu "${ns_r2}" veth_R2-B 1500
++      mtu "${ns_b}"  veth_B-R2 1500
++
++      # Create route exceptions
++      run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1}
++      run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2}
++
++      # Check that exceptions have been created with the correct PMTU
++      pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
++      check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
++      pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
++      check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
++
++      # Replace the route from A to R1
++      run_cmd ${ns_a} ip route change default via ${gw}
++
++      # Delete the device in A
++      run_cmd ${ns_a} ip link del "veth_A-R1"
++}
++
++test_pmtu_ipv4_route_change() {
++      test_pmtu_ipvX_route_change 4
++}
++
++test_pmtu_ipv6_route_change() {
++      test_pmtu_ipvX_route_change 6
++}
++
+ usage() {
+       echo
+       echo "$0 [OPTIONS] [TEST]..."

Reply via email to