commit:     94c77d39cd0775e11378a7308bbabc6e6f834254
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 13 11:34:18 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Apr 13 11:34:18 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=94c77d39

Linux patch 4.19.115

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1114_linux-4.19.115.patch | 1883 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1887 insertions(+)

diff --git a/0000_README b/0000_README
index 99fb8b5..65d1a80 100644
--- a/0000_README
+++ b/0000_README
@@ -495,6 +495,10 @@ Patch:  1113_linux-4.19.114.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.114
 
+Patch:  1114_linux-4.19.115.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.115
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1114_linux-4.19.115.patch b/1114_linux-4.19.115.patch
new file mode 100644
index 0000000..d2bb9d9
--- /dev/null
+++ b/1114_linux-4.19.115.patch
@@ -0,0 +1,1883 @@
+diff --git a/Makefile b/Makefile
+index 6f849dafbfec..9830a71e9192 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 114
++SUBLEVEL = 115
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 06058fba5f86..d22ab8d9edc9 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -667,7 +667,7 @@ ENTRY(__boot_cpu_mode)
+  * with MMU turned off.
+  */
+ ENTRY(__early_cpu_boot_status)
+-      .long   0
++      .quad   0
+ 
+       .popsection
+ 
+diff --git a/drivers/char/hw_random/imx-rngc.c 
b/drivers/char/hw_random/imx-rngc.c
+index 14730be54edf..dc9b8f377907 100644
+--- a/drivers/char/hw_random/imx-rngc.c
++++ b/drivers/char/hw_random/imx-rngc.c
+@@ -111,8 +111,10 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
+               return -ETIMEDOUT;
+       }
+ 
+-      if (rngc->err_reg != 0)
++      if (rngc->err_reg != 0) {
++              imx_rngc_irq_mask_clear(rngc);
+               return -EIO;
++      }
+ 
+       return 0;
+ }
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 53e822793d46..d5f970d039bb 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -2280,11 +2280,11 @@ struct batched_entropy {
+ 
+ /*
+  * Get a random word for internal kernel use only. The quality of the random
+- * number is either as good as RDRAND or as good as /dev/urandom, with the
+- * goal of being quite fast and not depleting entropy. In order to ensure
++ * number is good as /dev/urandom, but there is no backtrack protection, with
++ * the goal of being quite fast and not depleting entropy. In order to ensure
+  * that the randomness provided by this function is okay, the function
+- * wait_for_random_bytes() should be called and return 0 at least once
+- * at any point prior.
++ * wait_for_random_bytes() should be called and return 0 at least once at any
++ * point prior.
+  */
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+       .batch_lock     = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+@@ -2297,15 +2297,6 @@ u64 get_random_u64(void)
+       struct batched_entropy *batch;
+       static void *previous;
+ 
+-#if BITS_PER_LONG == 64
+-      if (arch_get_random_long((unsigned long *)&ret))
+-              return ret;
+-#else
+-      if (arch_get_random_long((unsigned long *)&ret) &&
+-          arch_get_random_long((unsigned long *)&ret + 1))
+-          return ret;
+-#endif
+-
+       warn_unseeded_randomness(&previous);
+ 
+       batch = raw_cpu_ptr(&batched_entropy_u64);
+@@ -2330,9 +2321,6 @@ u32 get_random_u32(void)
+       struct batched_entropy *batch;
+       static void *previous;
+ 
+-      if (arch_get_random_int(&ret))
+-              return ret;
+-
+       warn_unseeded_randomness(&previous);
+ 
+       batch = raw_cpu_ptr(&batched_entropy_u32);
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index ee693e15d9eb..f420f0c96877 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -105,7 +105,7 @@ static int update_config(struct clk_rcg2 *rcg)
+       }
+ 
+       WARN(1, "%s: rcg didn't update its configuration.", name);
+-      return 0;
++      return -EBUSY;
+ }
+ 
+ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
+index a983708b77a6..363b403bdb51 100644
+--- a/drivers/extcon/extcon-axp288.c
++++ b/drivers/extcon/extcon-axp288.c
+@@ -428,9 +428,40 @@ static int axp288_extcon_probe(struct platform_device 
*pdev)
+       /* Start charger cable type detection */
+       axp288_extcon_enable(info);
+ 
++      device_init_wakeup(dev, true);
++      platform_set_drvdata(pdev, info);
++
++      return 0;
++}
++
++static int __maybe_unused axp288_extcon_suspend(struct device *dev)
++{
++      struct axp288_extcon_info *info = dev_get_drvdata(dev);
++
++      if (device_may_wakeup(dev))
++              enable_irq_wake(info->irq[VBUS_RISING_IRQ]);
++
+       return 0;
+ }
+ 
++static int __maybe_unused axp288_extcon_resume(struct device *dev)
++{
++      struct axp288_extcon_info *info = dev_get_drvdata(dev);
++
++      /*
++       * Wakeup when a charger is connected to do charger-type
++       * connection and generate an extcon event which makes the
++       * axp288 charger driver set the input current limit.
++       */
++      if (device_may_wakeup(dev))
++              disable_irq_wake(info->irq[VBUS_RISING_IRQ]);
++
++      return 0;
++}
++
++static SIMPLE_DEV_PM_OPS(axp288_extcon_pm_ops, axp288_extcon_suspend,
++                       axp288_extcon_resume);
++
+ static const struct platform_device_id axp288_extcon_table[] = {
+       { .name = "axp288_extcon" },
+       {},
+@@ -442,6 +473,7 @@ static struct platform_driver axp288_extcon_driver = {
+       .id_table = axp288_extcon_table,
+       .driver = {
+               .name = "axp288_extcon",
++              .pm = &axp288_extcon_pm_ops,
+       },
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 4f8f3bb21832..a54f8943ffa3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -857,7 +857,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
+ 
+       if (enable) {
+               /* wait for STATUS to clear */
+-              if (vcn_v1_0_is_idle(handle))
++              if (!vcn_v1_0_is_idle(handle))
+                       return -EBUSY;
+               vcn_v1_0_enable_clock_gating(adev);
+       } else {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 122249da03ab..a4928854a3de 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2440,6 +2440,17 @@ static bool retrieve_link_cap(struct dc_link *link)
+               sink_id.ieee_device_id,
+               sizeof(sink_id.ieee_device_id));
+ 
++      /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
++      {
++              uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
++
++              if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
++                  !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
++                          sizeof(str_mbp_2017))) {
++                      link->reported_link_cap.link_rate = 0x0c;
++              }
++      }
++
+       core_link_read_dpcd(
+               link,
+               DP_SINK_HW_REVISION_START,
+diff --git a/drivers/gpu/drm/bochs/bochs_hw.c 
b/drivers/gpu/drm/bochs/bochs_hw.c
+index a39b0343c197..401c218567af 100644
+--- a/drivers/gpu/drm/bochs/bochs_hw.c
++++ b/drivers/gpu/drm/bochs/bochs_hw.c
+@@ -97,10 +97,8 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
+               size = min(size, mem);
+       }
+ 
+-      if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
+-              DRM_ERROR("Cannot request framebuffer\n");
+-              return -EBUSY;
+-      }
++      if (pci_request_region(pdev, 0, "bochs-drm") != 0)
++              DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
+ 
+       bochs->fb_map = ioremap(addr, size);
+       if (bochs->fb_map == NULL) {
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
+index fc978603fc94..7c3c323773d3 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -439,6 +439,7 @@ static bool drm_dp_sideband_parse_remote_dpcd_read(struct 
drm_dp_sideband_msg_rx
+       if (idx > raw->curlen)
+               goto fail_len;
+       repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
++      idx++;
+       if (idx > raw->curlen)
+               goto fail_len;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c 
b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+index 7fea74861a87..c83655b008b9 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+@@ -311,6 +311,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 
exec_state,
+       u32 return_target, return_dwords;
+       u32 link_target, link_dwords;
+       bool switch_context = gpu->exec_state != exec_state;
++      unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
++      bool need_flush = gpu->flush_seq != new_flush_seq;
+ 
+       lockdep_assert_held(&gpu->lock);
+ 
+@@ -325,14 +327,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 
exec_state,
+        * need to append a mmu flush load state, followed by a new
+        * link to this buffer - a total of four additional words.
+        */
+-      if (gpu->mmu->need_flush || switch_context) {
++      if (need_flush || switch_context) {
+               u32 target, extra_dwords;
+ 
+               /* link command */
+               extra_dwords = 1;
+ 
+               /* flush command */
+-              if (gpu->mmu->need_flush) {
++              if (need_flush) {
+                       if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+                               extra_dwords += 1;
+                       else
+@@ -345,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 
exec_state,
+ 
+               target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
+ 
+-              if (gpu->mmu->need_flush) {
++              if (need_flush) {
+                       /* Add the MMU flush */
+                       if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
+                               CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
+@@ -365,7 +367,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 
exec_state,
+                                       SYNC_RECIPIENT_PE);
+                       }
+ 
+-                      gpu->mmu->need_flush = false;
++                      gpu->flush_seq = new_flush_seq;
+               }
+ 
+               if (switch_context) {
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h 
b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+index 9a75a6937268..039e0509af6a 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+@@ -139,6 +139,7 @@ struct etnaviv_gpu {
+ 
+       struct etnaviv_iommu *mmu;
+       struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
++      unsigned int flush_seq;
+ 
+       /* Power Control: */
+       struct clk *clk_bus;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+index 8069f9f36a2e..e132dccedf88 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+@@ -261,7 +261,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+       }
+ 
+       list_add_tail(&mapping->mmu_node, &mmu->mappings);
+-      mmu->need_flush = true;
++      mmu->flush_seq++;
+ unlock:
+       mutex_unlock(&mmu->lock);
+ 
+@@ -280,7 +280,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+               etnaviv_iommu_remove_mapping(mmu, mapping);
+ 
+       list_del(&mapping->mmu_node);
+-      mmu->need_flush = true;
++      mmu->flush_seq++;
+       mutex_unlock(&mmu->lock);
+ }
+ 
+@@ -357,7 +357,7 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, 
dma_addr_t paddr,
+                       mutex_unlock(&mmu->lock);
+                       return ret;
+               }
+-              gpu->mmu->need_flush = true;
++              mmu->flush_seq++;
+               mutex_unlock(&mmu->lock);
+ 
+               *iova = (u32)vram_node->start;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h 
b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+index a0db17ffb686..348a94d9695b 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+@@ -48,7 +48,7 @@ struct etnaviv_iommu {
+       struct mutex lock;
+       struct list_head mappings;
+       struct drm_mm mm;
+-      bool need_flush;
++      unsigned int flush_seq;
+ };
+ 
+ struct etnaviv_gem_object;
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index f59ca27a4a35..e53b7cb2211d 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -43,6 +43,46 @@ static bool use_pages(struct drm_gem_object *obj)
+       return !msm_obj->vram_node;
+ }
+ 
++/*
++ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
++ * API.  Really GPU cache is out of scope here (handled on cmdstream)
++ * and all we need to do is invalidate newly allocated pages before
++ * mapping to CPU as uncached/writecombine.
++ *
++ * On top of this, we have the added headache, that depending on
++ * display generation, the display's iommu may be wired up to either
++ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
++ * that here we either have dma-direct or iommu ops.
++ *
++ * Let this be a cautionary tail of abstraction gone wrong.
++ */
++
++static void sync_for_device(struct msm_gem_object *msm_obj)
++{
++      struct device *dev = msm_obj->base.dev->dev;
++
++      if (get_dma_ops(dev)) {
++              dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
++                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      } else {
++              dma_map_sg(dev, msm_obj->sgt->sgl,
++                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      }
++}
++
++static void sync_for_cpu(struct msm_gem_object *msm_obj)
++{
++      struct device *dev = msm_obj->base.dev->dev;
++
++      if (get_dma_ops(dev)) {
++              dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
++                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      } else {
++              dma_unmap_sg(dev, msm_obj->sgt->sgl,
++                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      }
++}
++
+ /* allocate pages from VRAM carveout, used when no IOMMU: */
+ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
+ {
+@@ -108,8 +148,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
+                * because display controller, GPU, etc. are not coherent:
+                */
+               if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+-                      dma_map_sg(dev->dev, msm_obj->sgt->sgl,
+-                                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++                      sync_for_device(msm_obj);
+       }
+ 
+       return msm_obj->pages;
+@@ -138,9 +177,7 @@ static void put_pages(struct drm_gem_object *obj)
+                        * GPU, etc. are not coherent:
+                        */
+                       if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+-                              dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+-                                           msm_obj->sgt->nents,
+-                                           DMA_BIDIRECTIONAL);
++                              sync_for_cpu(msm_obj);
+ 
+                       sg_free_table(msm_obj->sgt);
+                       kfree(msm_obj->sgt);
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index e16872e0724f..5c03f4701ece 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2753,6 +2753,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private 
*id_priv)
+ err2:
+       kfree(route->path_rec);
+       route->path_rec = NULL;
++      route->num_paths = 0;
+ err1:
+       kfree(work);
+       return ret;
+@@ -4635,6 +4636,19 @@ static int __init cma_init(void)
+ {
+       int ret;
+ 
++      /*
++       * There is a rare lock ordering dependency in cma_netdev_callback()
++       * that only happens when bonding is enabled. Teach lockdep that rtnl
++       * must never be nested under lock so it can find these without having
++       * to test with bonding.
++       */
++      if (IS_ENABLED(CONFIG_LOCKDEP)) {
++              rtnl_lock();
++              mutex_lock(&lock);
++              mutex_unlock(&lock);
++              rtnl_unlock();
++      }
++
+       cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
+       if (!cma_wq)
+               return -ENOMEM;
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 01d68ed46c1b..2acc30c3d5b2 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -89,6 +89,7 @@ struct ucma_context {
+ 
+       struct ucma_file        *file;
+       struct rdma_cm_id       *cm_id;
++      struct mutex            mutex;
+       u64                     uid;
+ 
+       struct list_head        list;
+@@ -215,6 +216,7 @@ static struct ucma_context *ucma_alloc_ctx(struct 
ucma_file *file)
+       init_completion(&ctx->comp);
+       INIT_LIST_HEAD(&ctx->mc_list);
+       ctx->file = file;
++      mutex_init(&ctx->mutex);
+ 
+       mutex_lock(&mut);
+       ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
+@@ -596,6 +598,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
+       }
+ 
+       events_reported = ctx->events_reported;
++      mutex_destroy(&ctx->mutex);
+       kfree(ctx);
+       return events_reported;
+ }
+@@ -665,7 +668,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const 
char __user *inbuf,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
++      mutex_unlock(&ctx->mutex);
++
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -688,7 +694,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const 
char __user *inbuf,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
++      mutex_unlock(&ctx->mutex);
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -712,8 +720,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+                               (struct sockaddr *) &cmd.dst_addr, 
cmd.timeout_ms);
++      mutex_unlock(&ctx->mutex);
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -738,8 +748,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+                               (struct sockaddr *) &cmd.dst_addr, 
cmd.timeout_ms);
++      mutex_unlock(&ctx->mutex);
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -759,7 +771,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
++      mutex_unlock(&ctx->mutex);
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -848,6 +862,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       memset(&resp, 0, sizeof resp);
+       addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
+       memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
+@@ -871,6 +886,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
+               ucma_copy_iw_route(&resp, &ctx->cm_id->route);
+ 
+ out:
++      mutex_unlock(&ctx->mutex);
+       if (copy_to_user(u64_to_user_ptr(cmd.response),
+                        &resp, sizeof(resp)))
+               ret = -EFAULT;
+@@ -1022,6 +1038,7 @@ static ssize_t ucma_query(struct ucma_file *file,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       switch (cmd.option) {
+       case RDMA_USER_CM_QUERY_ADDR:
+               ret = ucma_query_addr(ctx, response, out_len);
+@@ -1036,6 +1053,7 @@ static ssize_t ucma_query(struct ucma_file *file,
+               ret = -ENOSYS;
+               break;
+       }
++      mutex_unlock(&ctx->mutex);
+ 
+       ucma_put_ctx(ctx);
+       return ret;
+@@ -1076,7 +1094,9 @@ static ssize_t ucma_connect(struct ucma_file *file, 
const char __user *inbuf,
+               return PTR_ERR(ctx);
+ 
+       ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
++      mutex_lock(&ctx->mutex);
+       ret = rdma_connect(ctx->cm_id, &conn_param);
++      mutex_unlock(&ctx->mutex);
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -1097,7 +1117,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const 
char __user *inbuf,
+ 
+       ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
+                      cmd.backlog : max_backlog;
++      mutex_lock(&ctx->mutex);
+       ret = rdma_listen(ctx->cm_id, ctx->backlog);
++      mutex_unlock(&ctx->mutex);
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -1120,13 +1142,17 @@ static ssize_t ucma_accept(struct ucma_file *file, 
const char __user *inbuf,
+       if (cmd.conn_param.valid) {
+               ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+               mutex_lock(&file->mut);
++              mutex_lock(&ctx->mutex);
+               ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
++              mutex_unlock(&ctx->mutex);
+               if (!ret)
+                       ctx->uid = cmd.uid;
+               mutex_unlock(&file->mut);
+-      } else
++      } else {
++              mutex_lock(&ctx->mutex);
+               ret = __rdma_accept(ctx->cm_id, NULL, NULL);
+-
++              mutex_unlock(&ctx->mutex);
++      }
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -1145,7 +1171,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const 
char __user *inbuf,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
++      mutex_unlock(&ctx->mutex);
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -1164,7 +1192,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, 
const char __user *inbuf,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       ret = rdma_disconnect(ctx->cm_id);
++      mutex_unlock(&ctx->mutex);
+       ucma_put_ctx(ctx);
+       return ret;
+ }
+@@ -1195,7 +1225,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
+       resp.qp_attr_mask = 0;
+       memset(&qp_attr, 0, sizeof qp_attr);
+       qp_attr.qp_state = cmd.qp_state;
++      mutex_lock(&ctx->mutex);
+       ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
++      mutex_unlock(&ctx->mutex);
+       if (ret)
+               goto out;
+ 
+@@ -1274,9 +1306,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
+               struct sa_path_rec opa;
+ 
+               sa_convert_path_ib_to_opa(&opa, &sa_path);
++              mutex_lock(&ctx->mutex);
+               ret = rdma_set_ib_path(ctx->cm_id, &opa);
++              mutex_unlock(&ctx->mutex);
+       } else {
++              mutex_lock(&ctx->mutex);
+               ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
++              mutex_unlock(&ctx->mutex);
+       }
+       if (ret)
+               return ret;
+@@ -1309,7 +1345,9 @@ static int ucma_set_option_level(struct ucma_context 
*ctx, int level,
+ 
+       switch (level) {
+       case RDMA_OPTION_ID:
++              mutex_lock(&ctx->mutex);
+               ret = ucma_set_option_id(ctx, optname, optval, optlen);
++              mutex_unlock(&ctx->mutex);
+               break;
+       case RDMA_OPTION_IB:
+               ret = ucma_set_option_ib(ctx, optname, optval, optlen);
+@@ -1369,8 +1407,10 @@ static ssize_t ucma_notify(struct ucma_file *file, 
const char __user *inbuf,
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+ 
++      mutex_lock(&ctx->mutex);
+       if (ctx->cm_id->device)
+               ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
++      mutex_unlock(&ctx->mutex);
+ 
+       ucma_put_ctx(ctx);
+       return ret;
+@@ -1413,8 +1453,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
+       mc->join_state = join_state;
+       mc->uid = cmd->uid;
+       memcpy(&mc->addr, addr, cmd->addr_size);
++      mutex_lock(&ctx->mutex);
+       ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
+                                 join_state, mc);
++      mutex_unlock(&ctx->mutex);
+       if (ret)
+               goto err2;
+ 
+@@ -1518,7 +1560,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file 
*file,
+               goto out;
+       }
+ 
++      mutex_lock(&mc->ctx->mutex);
+       rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
++      mutex_unlock(&mc->ctx->mutex);
++
+       mutex_lock(&mc->ctx->file->mut);
+       ucma_cleanup_mc_events(mc);
+       list_del(&mc->list);
+diff --git a/drivers/infiniband/hw/hfi1/sysfs.c 
b/drivers/infiniband/hw/hfi1/sysfs.c
+index 25e867393463..e3e8d65646e3 100644
+--- a/drivers/infiniband/hw/hfi1/sysfs.c
++++ b/drivers/infiniband/hw/hfi1/sysfs.c
+@@ -670,7 +670,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 
port_num,
+               dd_dev_err(dd,
+                          "Skipping sc2vl sysfs info, (err %d) port %u\n",
+                          ret, port_num);
+-              goto bail;
++              /*
++               * Based on the documentation for kobject_init_and_add(), the
++               * caller should call kobject_put even if this call fails.
++               */
++              goto bail_sc2vl;
+       }
+       kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
+ 
+@@ -680,7 +684,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 
port_num,
+               dd_dev_err(dd,
+                          "Skipping sl2sc sysfs info, (err %d) port %u\n",
+                          ret, port_num);
+-              goto bail_sc2vl;
++              goto bail_sl2sc;
+       }
+       kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
+ 
+@@ -690,7 +694,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 
port_num,
+               dd_dev_err(dd,
+                          "Skipping vl2mtu sysfs info, (err %d) port %u\n",
+                          ret, port_num);
+-              goto bail_sl2sc;
++              goto bail_vl2mtu;
+       }
+       kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
+ 
+@@ -700,7 +704,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 
port_num,
+               dd_dev_err(dd,
+                          "Skipping Congestion Control sysfs info, (err %d) 
port %u\n",
+                          ret, port_num);
+-              goto bail_vl2mtu;
++              goto bail_cc;
+       }
+ 
+       kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
+@@ -738,7 +742,6 @@ bail_sl2sc:
+       kobject_put(&ppd->sl2sc_kobj);
+ bail_sc2vl:
+       kobject_put(&ppd->sc2vl_kobj);
+-bail:
+       return ret;
+ }
+ 
+@@ -858,8 +861,13 @@ bail:
+       for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
+               device_remove_file(&dev->dev, hfi1_attributes[i]);
+ 
+-      for (i = 0; i < dd->num_sdma; i++)
+-              kobject_del(&dd->per_sdma[i].kobj);
++      /*
++       * The function kobject_put() will call kobject_del() if the kobject
++       * has been added successfully. The sysfs files created under the
++       * kobject directory will also be removed during the process.
++       */
++      for (; i >= 0; i--)
++              kobject_put(&dd->per_sdma[i].kobj);
+ 
+       return ret;
+ }
+@@ -872,6 +880,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
+       struct hfi1_pportdata *ppd;
+       int i;
+ 
++      /* Unwind operations in hfi1_verbs_register_sysfs() */
++      for (i = 0; i < dd->num_sdma; i++)
++              kobject_put(&dd->per_sdma[i].kobj);
++
+       for (i = 0; i < dd->num_pports; i++) {
+               ppd = &dd->pport[i];
+ 
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index f862f1b7f996..d6f5f5b3f75f 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -29,7 +29,7 @@
+ #include "rc-core-priv.h"
+ #include <uapi/linux/lirc.h>
+ 
+-#define LIRCBUF_SIZE  256
++#define LIRCBUF_SIZE  1024
+ 
+ static dev_t lirc_base_dev;
+ 
+diff --git a/drivers/misc/cardreader/rts5227.c 
b/drivers/misc/cardreader/rts5227.c
+index 13645be5f3b5..f0e845c8e6a7 100644
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -369,6 +369,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
+ void rts522a_init_params(struct rtsx_pcr *pcr)
+ {
+       rts5227_init_params(pcr);
++      pcr->ops = &rts522a_pcr_ops;
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
+       pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
+ }
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index d80372d21c14..2ac1dc5104b7 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -147,6 +147,8 @@
+ #define MEI_DEV_ID_CMP_H      0x06e0  /* Comet Lake H */
+ #define MEI_DEV_ID_CMP_H_3    0x06e4  /* Comet Lake H 3 (iTouch) */
+ 
++#define MEI_DEV_ID_CDF        0x18D3  /* Cedar Fork */
++
+ #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
+ 
+ #define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 3498c10b8263..b4bf12f27caf 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -118,6 +118,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+       {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
+ 
++      {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
++
+       /* required last entry */
+       {0, }
+ };
+diff --git a/drivers/misc/pci_endpoint_test.c 
b/drivers/misc/pci_endpoint_test.c
+index fd33a3b9c66f..727dc6ec427d 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -104,6 +104,7 @@ struct pci_endpoint_test {
+       struct completion irq_raised;
+       int             last_irq;
+       int             num_irqs;
++      int             irq_type;
+       /* mutex to protect the ioctls */
+       struct mutex    mutex;
+       struct miscdevice miscdev;
+@@ -163,6 +164,7 @@ static void pci_endpoint_test_free_irq_vectors(struct 
pci_endpoint_test *test)
+       struct pci_dev *pdev = test->pdev;
+ 
+       pci_free_irq_vectors(pdev);
++      test->irq_type = IRQ_TYPE_UNDEFINED;
+ }
+ 
+ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test 
*test,
+@@ -197,6 +199,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct 
pci_endpoint_test *test,
+               irq = 0;
+               res = false;
+       }
++
++      test->irq_type = type;
+       test->num_irqs = irq;
+ 
+       return res;
+@@ -336,6 +340,7 @@ static bool pci_endpoint_test_copy(struct 
pci_endpoint_test *test, size_t size)
+       dma_addr_t orig_dst_phys_addr;
+       size_t offset;
+       size_t alignment = test->alignment;
++      int irq_type = test->irq_type;
+       u32 src_crc32;
+       u32 dst_crc32;
+ 
+@@ -432,6 +437,7 @@ static bool pci_endpoint_test_write(struct 
pci_endpoint_test *test, size_t size)
+       dma_addr_t orig_phys_addr;
+       size_t offset;
+       size_t alignment = test->alignment;
++      int irq_type = test->irq_type;
+       u32 crc32;
+ 
+       if (size > SIZE_MAX - alignment)
+@@ -500,6 +506,7 @@ static bool pci_endpoint_test_read(struct 
pci_endpoint_test *test, size_t size)
+       dma_addr_t orig_phys_addr;
+       size_t offset;
+       size_t alignment = test->alignment;
++      int irq_type = test->irq_type;
+       u32 crc32;
+ 
+       if (size > SIZE_MAX - alignment)
+@@ -561,7 +568,7 @@ static bool pci_endpoint_test_set_irq(struct 
pci_endpoint_test *test,
+               return false;
+       }
+ 
+-      if (irq_type == req_irq_type)
++      if (test->irq_type == req_irq_type)
+               return true;
+ 
+       pci_endpoint_test_release_irq(test);
+@@ -573,12 +580,10 @@ static bool pci_endpoint_test_set_irq(struct 
pci_endpoint_test *test,
+       if (!pci_endpoint_test_request_irq(test))
+               goto err;
+ 
+-      irq_type = req_irq_type;
+       return true;
+ 
+ err:
+       pci_endpoint_test_free_irq_vectors(test);
+-      irq_type = IRQ_TYPE_UNDEFINED;
+       return false;
+ }
+ 
+@@ -636,7 +641,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
+ {
+       int err;
+       int id;
+-      char name[20];
++      char name[24];
+       enum pci_barno bar;
+       void __iomem *base;
+       struct device *dev = &pdev->dev;
+@@ -655,6 +660,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
+       test->test_reg_bar = 0;
+       test->alignment = 0;
+       test->pdev = pdev;
++      test->irq_type = IRQ_TYPE_UNDEFINED;
+ 
+       if (no_msi)
+               irq_type = IRQ_TYPE_LEGACY;
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index db9607809620..f99cd94509be 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -147,7 +147,7 @@ static void slc_bump(struct slcan *sl)
+       u32 tmpid;
+       char *cmd = sl->rbuff;
+ 
+-      cf.can_id = 0;
++      memset(&cf, 0, sizeof(cf));
+ 
+       switch (*cmd) {
+       case 'r':
+@@ -186,8 +186,6 @@ static void slc_bump(struct slcan *sl)
+       else
+               return;
+ 
+-      *(u64 *) (&cf.data) = 0; /* clear payload */
+-
+       /* RTR frames may have a dlc > 0 but they never have any data bytes */
+       if (!(cf.can_id & CAN_RTR_FLAG)) {
+               for (i = 0; i < cf.can_dlc; i++) {
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 8c69789fbe09..ccba648452c4 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -461,7 +461,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+       priv->slave_mii_bus->parent = ds->dev->parent;
+       priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
+ 
+-      err = of_mdiobus_register(priv->slave_mii_bus, dn);
++      err = mdiobus_register(priv->slave_mii_bus);
+       if (err && dn)
+               of_node_put(dn);
+ 
+@@ -1014,6 +1014,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
+       const struct bcm_sf2_of_data *data;
+       struct b53_platform_data *pdata;
+       struct dsa_switch_ops *ops;
++      struct device_node *ports;
+       struct bcm_sf2_priv *priv;
+       struct b53_device *dev;
+       struct dsa_switch *ds;
+@@ -1077,7 +1078,11 @@ static int bcm_sf2_sw_probe(struct platform_device 
*pdev)
+       set_bit(0, priv->cfp.used);
+       set_bit(0, priv->cfp.unique);
+ 
+-      bcm_sf2_identify_ports(priv, dn->child);
++      ports = of_find_node_by_name(dn, "ports");
++      if (ports) {
++              bcm_sf2_identify_ports(priv, ports);
++              of_node_put(ports);
++      }
+ 
+       priv->irq0 = irq_of_parse_and_map(dn, 0);
+       priv->irq1 = irq_of_parse_and_map(dn, 1);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 
b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+index 8d211972c5e9..9f4eb3cde93e 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+@@ -98,9 +98,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp 
*mlxsw_sp,
+                       u8 prio = tcf_vlan_push_prio(a);
+                       u16 vid = tcf_vlan_push_vid(a);
+ 
+-                      return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+-                                                         action, vid,
+-                                                         proto, prio, extack);
++                      err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
++                                                        action, vid,
++                                                        proto, prio, extack);
++                      if (err)
++                              return err;
+               } else {
+                       NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+                       dev_err(mlxsw_sp->bus_info->dev, "Unsupported 
action\n");
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c 
b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+index 7b2a84320aab..e4e9a7591efe 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -218,7 +218,7 @@ static void dwmac1000_set_filter(struct mac_device_info 
*hw,
+                       reg++;
+               }
+ 
+-              while (reg <= perfect_addr_number) {
++              while (reg < perfect_addr_number) {
+                       writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+                       writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+                       reg++;
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index b4c67c3a928b..55caaaf969da 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -29,6 +29,7 @@
+ #include <linux/micrel_phy.h>
+ #include <linux/of.h>
+ #include <linux/clk.h>
++#include <linux/delay.h>
+ 
+ /* Operation Mode Strap Override */
+ #define MII_KSZPHY_OMSO                               0x16
+@@ -738,6 +739,12 @@ static int kszphy_resume(struct phy_device *phydev)
+ 
+       genphy_resume(phydev);
+ 
++      /* After switching from power-down to normal mode, an internal global
++       * reset is automatically generated. Wait a minimum of 1 ms before
++       * read/write access to the PHY registers.
++       */
++      usleep_range(1000, 2000);
++
+       ret = kszphy_config_reset(phydev);
+       if (ret)
+               return ret;
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index e4f167e35353..9711bfbdf431 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -815,9 +815,11 @@ out_free_tagset:
+       if (new)
+               nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+ out_free_async_qe:
+-      nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+-              sizeof(struct nvme_command), DMA_TO_DEVICE);
+-      ctrl->async_event_sqe.data = NULL;
++      if (ctrl->async_event_sqe.data) {
++              nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
++                      sizeof(struct nvme_command), DMA_TO_DEVICE);
++              ctrl->async_event_sqe.data = NULL;
++      }
+ out_free_queue:
+       nvme_rdma_free_queue(&ctrl->queues[0]);
+       return error;
+diff --git a/drivers/power/supply/axp288_charger.c 
b/drivers/power/supply/axp288_charger.c
+index c60659fb21de..46eb7716c35c 100644
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -28,6 +28,7 @@
+ #include <linux/property.h>
+ #include <linux/mfd/axp20x.h>
+ #include <linux/extcon.h>
++#include <linux/dmi.h>
+ 
+ #define PS_STAT_VBUS_TRIGGER          (1 << 0)
+ #define PS_STAT_BAT_CHRG_DIR          (1 << 2)
+@@ -552,6 +553,49 @@ out:
+       return IRQ_HANDLED;
+ }
+ 
++/*
++ * The HP Pavilion x2 10 series comes in a number of variants:
++ * Bay Trail SoC    + AXP288 PMIC, DMI_BOARD_NAME: "815D"
++ * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
++ * Cherry Trail SoC + TI PMIC,     DMI_BOARD_NAME: "827C" or "82F4"
++ *
++ * The variants with the AXP288 PMIC are all kinds of special:
++ *
++ * 1. All variants use a Type-C connector which the AXP288 does not support, 
so
++ * when using a Type-C charger it is not recognized. Unlike most AXP288 
devices,
++ * this model actually has mostly working ACPI AC / Battery code, the ACPI 
code
++ * "solves" this by simply setting the input_current_limit to 3A.
++ * There are still some issues with the ACPI code, so we use this native 
driver,
++ * and to solve the charging not working (500mA is not enough) issue we 
hardcode
++ * the 3A input_current_limit like the ACPI code does.
++ *
++ * 2. If no charger is connected the machine boots with the vbus-path 
disabled.
++ * Normally this is done when a 5V boost converter is active to avoid the PMIC
++ * trying to charge from the 5V boost converter's output. This is done when
++ * an OTG host cable is inserted and the ID pin on the micro-B receptacle is
++ * pulled low and the ID pin has an ACPI event handler associated with it
++ * which re-enables the vbus-path when the ID pin is pulled high when the
++ * OTG host cable is removed. The Type-C connector has no ID pin, there is
++ * no ID pin handler and there appears to be no 5V boost converter, so we
++ * end up not charging because the vbus-path is disabled, until we unplug
++ * the charger which automatically clears the vbus-path disable bit and then
++ * on the second plug-in of the adapter we start charging. To solve the not
++ * charging on first charger plugin we unconditionally enable the vbus-path at
++ * probe on this model, which is safe since there is no 5V boost converter.
++ */
++static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
++      {
++              /*
++               * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
++               * Trail model has "HP", so we only match on product_name.
++               */
++              .matches = {
++                      DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 
Detachable"),
++              },
++      },
++      {} /* Terminating entry */
++};
++
+ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
+ {
+       struct axp288_chrg_info *info =
+@@ -575,7 +619,11 @@ static void axp288_charger_extcon_evt_worker(struct 
work_struct *work)
+       }
+ 
+       /* Determine cable/charger type */
+-      if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
++      if (dmi_check_system(axp288_hp_x2_dmi_ids)) {
++              /* See comment above axp288_hp_x2_dmi_ids declaration */
++              dev_dbg(&info->pdev->dev, "HP X2 with Type-C, setting inlmt to 
3A\n");
++              current_limit = 3000000;
++      } else if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
+               dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n");
+               current_limit = 500000;
+       } else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
+@@ -692,6 +740,13 @@ static int charger_init_hw_regs(struct axp288_chrg_info 
*info)
+               return ret;
+       }
+ 
++      if (dmi_check_system(axp288_hp_x2_dmi_ids)) {
++              /* See comment above axp288_hp_x2_dmi_ids declaration */
++              ret = axp288_charger_vbus_path_select(info, true);
++              if (ret < 0)
++                      return ret;
++      }
++
+       /* Read current charge voltage and current limit */
+       ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val);
+       if (ret < 0) {
+diff --git a/drivers/rpmsg/qcom_glink_native.c 
b/drivers/rpmsg/qcom_glink_native.c
+index 25c394a7077b..facc577ab0ac 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -813,9 +813,6 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, 
size_t avail)
+               return -EAGAIN;
+       }
+ 
+-      if (WARN(chunk_size % 4, "Incoming data must be word aligned\n"))
+-              return -EINVAL;
+-
+       rcid = le16_to_cpu(hdr.msg.param1);
+       spin_lock_irqsave(&glink->idr_lock, flags);
+       channel = idr_find(&glink->rcids, rcid);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index d482f89ffae2..7b8b463676ad 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1369,7 +1369,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep 
*dep, struct dwc3_request *r
+       for (i = 0; i < req->num_trbs; i++) {
+               struct dwc3_trb *trb;
+ 
+-              trb = req->trb + i;
++              trb = &dep->trb_pool[dep->trb_dequeue];
+               trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+               dwc3_ep_inc_deq(dep);
+       }
+@@ -3166,7 +3166,6 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+       dwc->gadget.speed               = USB_SPEED_UNKNOWN;
+       dwc->gadget.sg_supported        = true;
+       dwc->gadget.name                = "dwc3-gadget";
+-      dwc->gadget.is_otg              = dwc->dr_mode == USB_DR_MODE_OTG;
+ 
+       /*
+        * FIXME We might be setting max_speed to <SUPER, however versions
+diff --git a/drivers/video/fbdev/core/fbcon.c 
b/drivers/video/fbdev/core/fbcon.c
+index b96d4e779333..cb93a6b38160 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1243,6 +1243,9 @@ finished:
+       if (!con_is_bound(&fb_con))
+               fbcon_exit();
+ 
++      if (vc->vc_num == logo_shown)
++              logo_shown = FBCON_LOGO_CANSHOW;
++
+       return;
+ }
+ 
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index c4314f449240..18e967089aeb 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -105,7 +105,6 @@ static int ceph_statfs(struct dentry *dentry, struct 
kstatfs *buf)
+       return 0;
+ }
+ 
+-
+ static int ceph_sync_fs(struct super_block *sb, int wait)
+ {
+       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+@@ -206,6 +205,26 @@ static match_table_t fsopt_tokens = {
+       {-1, NULL}
+ };
+ 
++/*
++ * Remove adjacent slashes and then the trailing slash, unless it is
++ * the only remaining character.
++ *
++ * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
++ */
++static void canonicalize_path(char *path)
++{
++      int i, j = 0;
++
++      for (i = 0; path[i] != '\0'; i++) {
++              if (path[i] != '/' || j < 1 || path[j - 1] != '/')
++                      path[j++] = path[i];
++      }
++
++      if (j > 1 && path[j - 1] == '/')
++              j--;
++      path[j] = '\0';
++}
++
+ static int parse_fsopt_token(char *c, void *private)
+ {
+       struct ceph_mount_options *fsopt = private;
+@@ -415,12 +434,15 @@ static int compare_mount_options(struct 
ceph_mount_options *new_fsopt,
+       ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
+       if (ret)
+               return ret;
++
+       ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
+       if (ret)
+               return ret;
++
+       ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
+       if (ret)
+               return ret;
++
+       ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
+       if (ret)
+               return ret;
+@@ -476,13 +498,17 @@ static int parse_mount_options(struct ceph_mount_options 
**pfsopt,
+        */
+       dev_name_end = strchr(dev_name, '/');
+       if (dev_name_end) {
+-              if (strlen(dev_name_end) > 1) {
+-                      fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+-                      if (!fsopt->server_path) {
+-                              err = -ENOMEM;
+-                              goto out;
+-                      }
++              /*
++               * The server_path will include the whole chars from userland
++               * including the leading '/'.
++               */
++              fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
++              if (!fsopt->server_path) {
++                      err = -ENOMEM;
++                      goto out;
+               }
++
++              canonicalize_path(fsopt->server_path);
+       } else {
+               dev_name_end = dev_name + strlen(dev_name);
+       }
+@@ -810,7 +836,6 @@ static void destroy_caches(void)
+       ceph_fscache_unregister();
+ }
+ 
+-
+ /*
+  * ceph_umount_begin - initiate forced umount.  Tear down down the
+  * mount, skipping steps that may hang while waiting for server(s).
+@@ -897,9 +922,6 @@ out:
+       return root;
+ }
+ 
+-
+-
+-
+ /*
+  * mount: join the ceph cluster, and open root directory.
+  */
+@@ -913,7 +935,9 @@ static struct dentry *ceph_real_mount(struct 
ceph_fs_client *fsc)
+       mutex_lock(&fsc->client->mount_mutex);
+ 
+       if (!fsc->sb->s_root) {
+-              const char *path;
++              const char *path = fsc->mount_options->server_path ?
++                                   fsc->mount_options->server_path + 1 : "";
++
+               err = __ceph_open_session(fsc->client, started);
+               if (err < 0)
+                       goto out;
+@@ -925,13 +949,7 @@ static struct dentry *ceph_real_mount(struct 
ceph_fs_client *fsc)
+                               goto out;
+               }
+ 
+-              if (!fsc->mount_options->server_path) {
+-                      path = "";
+-                      dout("mount opening path \\t\n");
+-              } else {
+-                      path = fsc->mount_options->server_path + 1;
+-                      dout("mount opening path %s\n", path);
+-              }
++              dout("mount opening path '%s'\n", path);
+ 
+               err = ceph_fs_debugfs_init(fsc);
+               if (err < 0)
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 8d3eabf06d66..65da12ff5449 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -86,7 +86,7 @@ struct ceph_mount_options {
+ 
+       char *snapdir_name;   /* default ".snap" */
+       char *mds_namespace;  /* default NULL */
+-      char *server_path;    /* default  "/" */
++      char *server_path;    /* default NULL (means "/") */
+       char *fscache_uniq;   /* default NULL */
+ };
+ 
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index 7ac2e46112b7..e02cbca3cfaf 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -236,17 +236,17 @@ static __always_inline void __assign_bit(long nr, 
volatile unsigned long *addr,
+ #ifdef __KERNEL__
+ 
+ #ifndef set_mask_bits
+-#define set_mask_bits(ptr, _mask, _bits)      \
++#define set_mask_bits(ptr, mask, bits)        \
+ ({                                                            \
+-      const typeof(*ptr) mask = (_mask), bits = (_bits);      \
+-      typeof(*ptr) old, new;                                  \
++      const typeof(*(ptr)) mask__ = (mask), bits__ = (bits);  \
++      typeof(*(ptr)) old__, new__;                            \
+                                                               \
+       do {                                                    \
+-              old = READ_ONCE(*ptr);                  \
+-              new = (old & ~mask) | bits;                     \
+-      } while (cmpxchg(ptr, old, new) != old);                \
++              old__ = READ_ONCE(*(ptr));                      \
++              new__ = (old__ & ~mask__) | bits__;             \
++      } while (cmpxchg(ptr, old__, new__) != old__);          \
+                                                               \
+-      new;                                                    \
++      new__;                                                  \
+ })
+ #endif
+ 
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index f35c7bf76143..0096a05395e3 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -122,8 +122,7 @@ extern void srcu_init_notifier_head(struct 
srcu_notifier_head *nh);
+ 
+ #ifdef CONFIG_TREE_SRCU
+ #define _SRCU_NOTIFIER_HEAD(name, mod)                                \
+-      static DEFINE_PER_CPU(struct srcu_data,                 \
+-                      name##_head_srcu_data);                 \
++      static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \
+       mod struct srcu_notifier_head name =                    \
+                       SRCU_NOTIFIER_INIT(name, name##_head_srcu_data)
+ 
+diff --git a/include/uapi/linux/coresight-stm.h 
b/include/uapi/linux/coresight-stm.h
+index aac550a52f80..8847dbf24151 100644
+--- a/include/uapi/linux/coresight-stm.h
++++ b/include/uapi/linux/coresight-stm.h
+@@ -2,8 +2,10 @@
+ #ifndef __UAPI_CORESIGHT_STM_H_
+ #define __UAPI_CORESIGHT_STM_H_
+ 
+-#define STM_FLAG_TIMESTAMPED   BIT(3)
+-#define STM_FLAG_GUARANTEED    BIT(7)
++#include <linux/const.h>
++
++#define STM_FLAG_TIMESTAMPED   _BITUL(3)
++#define STM_FLAG_GUARANTEED    _BITUL(7)
+ 
+ /*
+  * The CoreSight STM supports guaranteed and invariant timing
+diff --git a/kernel/padata.c b/kernel/padata.c
+index cfab62923c45..c280cb153915 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -671,8 +671,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int 
cpumask_type,
+       struct cpumask *serial_mask, *parallel_mask;
+       int err = -EINVAL;
+ 
+-      mutex_lock(&pinst->lock);
+       get_online_cpus();
++      mutex_lock(&pinst->lock);
+ 
+       switch (cpumask_type) {
+       case PADATA_CPU_PARALLEL:
+@@ -690,8 +690,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int 
cpumask_type,
+       err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
+ 
+ out:
+-      put_online_cpus();
+       mutex_unlock(&pinst->lock);
++      put_online_cpus();
+ 
+       return err;
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index adeb163cd661..68c46da82aac 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2832,7 +2832,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
+       switch (mode) {
+       case MPOL_PREFERRED:
+               /*
+-               * Insist on a nodelist of one node only
++               * Insist on a nodelist of one node only, although later
++               * we use first_node(nodes) to grab a single node, so here
++               * nodelist (or nodes) cannot be empty.
+                */
+               if (nodelist) {
+                       char *rest = nodelist;
+@@ -2840,6 +2842,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
+                               rest++;
+                       if (*rest)
+                               goto out;
++                      if (nodes_empty(nodes))
++                              goto out;
+               }
+               break;
+       case MPOL_INTERLEAVE:
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index 5e44d842cc5d..cf0ccd05329c 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void 
__user *arg)
+               dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
+               if (IS_ERR(dlc))
+                       return PTR_ERR(dlc);
+-              else if (dlc) {
+-                      rfcomm_dlc_put(dlc);
++              if (dlc)
+                       return -EBUSY;
+-              }
+               dlc = rfcomm_dlc_alloc(GFP_KERNEL);
+               if (!dlc)
+                       return -ENOMEM;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index c1a3baf16957..2f4d35101f4d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2854,6 +2854,8 @@ static u16 skb_tx_hash(const struct net_device *dev,
+ 
+       if (skb_rx_queue_recorded(skb)) {
+               hash = skb_get_rx_queue(skb);
++              if (hash >= qoffset)
++                      hash -= qoffset;
+               while (unlikely(hash >= qcount))
+                       hash -= qcount;
+               return hash + qoffset;
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 3955a6d7ea66..3047fc4737c4 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2337,6 +2337,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, 
void *v)
+                  " %zd bytes, size of tnode: %zd bytes.\n",
+                  LEAF_SIZE, TNODE_SIZE(0));
+ 
++      rcu_read_lock();
+       for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
+               struct hlist_head *head = &net->ipv4.fib_table_hash[h];
+               struct fib_table *tb;
+@@ -2356,7 +2357,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, 
void *v)
+                       trie_show_usage(seq, t->stats);
+ #endif
+               }
++              cond_resched_rcu();
+       }
++      rcu_read_unlock();
+ 
+       return 0;
+ }
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 14fd8a37a729..b37abba3b369 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -155,11 +155,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net 
*itn,
+                       cand = t;
+       }
+ 
+-      if (flags & TUNNEL_NO_KEY)
+-              goto skip_key_lookup;
+-
+       hlist_for_each_entry_rcu(t, head, hash_node) {
+-              if (t->parms.i_key != key ||
++              if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
+                   t->parms.iph.saddr != 0 ||
+                   t->parms.iph.daddr != 0 ||
+                   !(t->dev->flags & IFF_UP))
+@@ -171,7 +168,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net 
*itn,
+                       cand = t;
+       }
+ 
+-skip_key_lookup:
+       if (cand)
+               return cand;
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 0c804716a2aa..627cd24b7c0d 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3241,6 +3241,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, 
bool prefix_route)
+       if (netif_is_l3_master(idev->dev))
+               return;
+ 
++      /* no link local addresses on devices flagged as slaves */
++      if (idev->dev->flags & IFF_SLAVE)
++              return;
++
+       ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
+ 
+       switch (idev->cnf.addr_gen_mode) {
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 3e54ead1e921..250d3dae8af4 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -62,8 +62,8 @@ static int rxrpc_wait_for_tx_window_nonintr(struct 
rxrpc_sock *rx,
+ 
+       rtt = READ_ONCE(call->peer->rtt);
+       rtt2 = nsecs_to_jiffies64(rtt) * 2;
+-      if (rtt2 < 1)
+-              rtt2 = 1;
++      if (rtt2 < 2)
++              rtt2 = 2;
+ 
+       timeout = rtt2;
+       tx_start = READ_ONCE(call->tx_hard_ack);
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 4fede55b9010..7657194f396e 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -240,7 +240,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+ {
+       struct sctp_association *asoc = t->asoc;
+       struct dst_entry *dst = NULL;
+-      struct flowi6 *fl6 = &fl->u.ip6;
++      struct flowi _fl;
++      struct flowi6 *fl6 = &_fl.u.ip6;
+       struct sctp_bind_addr *bp;
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct sctp_sockaddr_entry *laddr;
+@@ -250,7 +251,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+       enum sctp_scope scope;
+       __u8 matchlen = 0;
+ 
+-      memset(fl6, 0, sizeof(struct flowi6));
++      memset(&_fl, 0, sizeof(_fl));
+       fl6->daddr = daddr->v6.sin6_addr;
+       fl6->fl6_dport = daddr->v6.sin6_port;
+       fl6->flowi6_proto = IPPROTO_SCTP;
+@@ -288,8 +289,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+       rcu_read_unlock();
+ 
+       dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+-      if (!asoc || saddr)
++      if (!asoc || saddr) {
++              t->dst = dst;
++              memcpy(fl, &_fl, sizeof(_fl));
+               goto out;
++      }
+ 
+       bp = &asoc->base.bind_addr;
+       scope = sctp_scope(daddr);
+@@ -312,6 +316,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+                       if ((laddr->a.sa.sa_family == AF_INET6) &&
+                           (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
+                               rcu_read_unlock();
++                              t->dst = dst;
++                              memcpy(fl, &_fl, sizeof(_fl));
+                               goto out;
+                       }
+               }
+@@ -350,6 +356,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+                       if (!IS_ERR_OR_NULL(dst))
+                               dst_release(dst);
+                       dst = bdst;
++                      t->dst = dst;
++                      memcpy(fl, &_fl, sizeof(_fl));
+                       break;
+               }
+ 
+@@ -363,6 +371,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+                       dst_release(dst);
+               dst = bdst;
+               matchlen = bmatchlen;
++              t->dst = dst;
++              memcpy(fl, &_fl, sizeof(_fl));
+       }
+       rcu_read_unlock();
+ 
+@@ -371,14 +381,12 @@ out:
+               struct rt6_info *rt;
+ 
+               rt = (struct rt6_info *)dst;
+-              t->dst = dst;
+               t->dst_cookie = rt6_get_cookie(rt);
+               pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
+                        &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
+-                       &fl6->saddr);
++                       &fl->u.ip6.saddr);
+       } else {
+               t->dst = NULL;
+-
+               pr_debug("no route\n");
+       }
+ }
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 269b528e50b9..787c59d798f4 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -424,7 +424,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+ {
+       struct sctp_association *asoc = t->asoc;
+       struct rtable *rt;
+-      struct flowi4 *fl4 = &fl->u.ip4;
++      struct flowi _fl;
++      struct flowi4 *fl4 = &_fl.u.ip4;
+       struct sctp_bind_addr *bp;
+       struct sctp_sockaddr_entry *laddr;
+       struct dst_entry *dst = NULL;
+@@ -434,7 +435,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+ 
+       if (t->dscp & SCTP_DSCP_SET_MASK)
+               tos = t->dscp & SCTP_DSCP_VAL_MASK;
+-      memset(fl4, 0x0, sizeof(struct flowi4));
++      memset(&_fl, 0x0, sizeof(_fl));
+       fl4->daddr  = daddr->v4.sin_addr.s_addr;
+       fl4->fl4_dport = daddr->v4.sin_port;
+       fl4->flowi4_proto = IPPROTO_SCTP;
+@@ -453,8 +454,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+                &fl4->saddr);
+ 
+       rt = ip_route_output_key(sock_net(sk), fl4);
+-      if (!IS_ERR(rt))
++      if (!IS_ERR(rt)) {
+               dst = &rt->dst;
++              t->dst = dst;
++              memcpy(fl, &_fl, sizeof(_fl));
++      }
+ 
+       /* If there is no association or if a source address is passed, no
+        * more validation is required.
+@@ -517,27 +521,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+               odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+                                    false);
+               if (!odev || odev->ifindex != fl4->flowi4_oif) {
+-                      if (!dst)
++                      if (!dst) {
+                               dst = &rt->dst;
+-                      else
++                              t->dst = dst;
++                              memcpy(fl, &_fl, sizeof(_fl));
++                      } else {
+                               dst_release(&rt->dst);
++                      }
+                       continue;
+               }
+ 
+               dst_release(dst);
+               dst = &rt->dst;
++              t->dst = dst;
++              memcpy(fl, &_fl, sizeof(_fl));
+               break;
+       }
+ 
+ out_unlock:
+       rcu_read_unlock();
+ out:
+-      t->dst = dst;
+-      if (dst)
++      if (dst) {
+               pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
+-                       &fl4->daddr, &fl4->saddr);
+-      else
++                       &fl->u.ip4.daddr, &fl->u.ip4.saddr);
++      } else {
++              t->dst = NULL;
+               pr_debug("no route\n");
++      }
+ }
+ 
+ /* For v4, the source address is cached in the route entry(dst). So no need
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 95f9068b8549..c93be3ba5df2 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -165,29 +165,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk)
+       skb_orphan(chunk->skb);
+ }
+ 
++#define traverse_and_process()        \
++do {                          \
++      msg = chunk->msg;       \
++      if (msg == prev_msg)    \
++              continue;       \
++      list_for_each_entry(c, &msg->chunks, frag_list) {       \
++              if ((clear && asoc->base.sk == c->skb->sk) ||   \
++                  (!clear && asoc->base.sk != c->skb->sk))    \
++                      cb(c);  \
++      }                       \
++      prev_msg = msg;         \
++} while (0)
++
+ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
++                                     bool clear,
+                                      void (*cb)(struct sctp_chunk *))
+ 
+ {
++      struct sctp_datamsg *msg, *prev_msg = NULL;
+       struct sctp_outq *q = &asoc->outqueue;
++      struct sctp_chunk *chunk, *c;
+       struct sctp_transport *t;
+-      struct sctp_chunk *chunk;
+ 
+       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+               list_for_each_entry(chunk, &t->transmitted, transmitted_list)
+-                      cb(chunk);
++                      traverse_and_process();
+ 
+       list_for_each_entry(chunk, &q->retransmit, transmitted_list)
+-              cb(chunk);
++              traverse_and_process();
+ 
+       list_for_each_entry(chunk, &q->sacked, transmitted_list)
+-              cb(chunk);
++              traverse_and_process();
+ 
+       list_for_each_entry(chunk, &q->abandoned, transmitted_list)
+-              cb(chunk);
++              traverse_and_process();
+ 
+       list_for_each_entry(chunk, &q->out_chunk_list, list)
+-              cb(chunk);
++              traverse_and_process();
+ }
+ 
+ static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock 
*sk,
+@@ -8899,9 +8914,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct 
sock *newsk,
+        * paths won't try to lock it and then oldsk.
+        */
+       lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
+-      sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
++      sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
+       sctp_assoc_migrate(assoc, newsk);
+-      sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
++      sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);
+ 
+       /* If the association on the newsk is already closed before accept()
+        * is called, set RCV_SHUTDOWN flag.
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index bc4edc5607c7..c9f3c002bd55 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1069,6 +1069,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+       SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
++      SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
+       {}
+ };
+diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
+index 99394c036998..e099c0505b76 100644
+--- a/sound/soc/jz4740/jz4740-i2s.c
++++ b/sound/soc/jz4740/jz4740-i2s.c
+@@ -92,7 +92,7 @@
+ #define JZ_AIC_I2S_STATUS_BUSY BIT(2)
+ 
+ #define JZ_AIC_CLK_DIV_MASK 0xf
+-#define I2SDIV_DV_SHIFT 8
++#define I2SDIV_DV_SHIFT 0
+ #define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
+ #define I2SDIV_IDV_SHIFT 8
+ #define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
+diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
+index 9f420d98b5fb..6bf6a204341e 100644
+--- a/tools/accounting/getdelays.c
++++ b/tools/accounting/getdelays.c
+@@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 
nlmsg_pid,
+       msg.g.version = 0x1;
+       na = (struct nlattr *) GENLMSG_DATA(&msg);
+       na->nla_type = nla_type;
+-      na->nla_len = nla_len + 1 + NLA_HDRLEN;
++      na->nla_len = nla_len + NLA_HDRLEN;
+       memcpy(NLA_DATA(na), nla_data, nla_len);
+       msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+ 
+diff --git a/tools/power/x86/turbostat/turbostat.c 
b/tools/power/x86/turbostat/turbostat.c
+index 02d123871ef9..2233cf722c69 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -299,6 +299,10 @@ int *irqs_per_cpu;                /* indexed by cpu_num */
+ 
+ void setup_all_buffers(void);
+ 
++char *sys_lpi_file;
++char *sys_lpi_file_sysfs = 
"/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us";
++char *sys_lpi_file_debugfs = 
"/sys/kernel/debug/pmc_core/slp_s0_residency_usec";
++
+ int cpu_is_not_present(int cpu)
+ {
+       return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
+@@ -2844,8 +2848,6 @@ int snapshot_gfx_mhz(void)
+  *
+  * record snapshot of
+  * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
+- *
+- * return 1 if config change requires a restart, else return 0
+  */
+ int snapshot_cpu_lpi_us(void)
+ {
+@@ -2865,17 +2867,14 @@ int snapshot_cpu_lpi_us(void)
+ /*
+  * snapshot_sys_lpi()
+  *
+- * record snapshot of
+- * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
+- *
+- * return 1 if config change requires a restart, else return 0
++ * record snapshot of sys_lpi_file
+  */
+ int snapshot_sys_lpi_us(void)
+ {
+       FILE *fp;
+       int retval;
+ 
+-      fp = 
fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us",
 "r");
++      fp = fopen_or_die(sys_lpi_file, "r");
+ 
+       retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
+       if (retval != 1)
+@@ -4743,10 +4742,16 @@ void process_cpuid()
+       else
+               BIC_NOT_PRESENT(BIC_CPU_LPI);
+ 
+-      if 
(!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", 
R_OK))
++      if (!access(sys_lpi_file_sysfs, R_OK)) {
++              sys_lpi_file = sys_lpi_file_sysfs;
+               BIC_PRESENT(BIC_SYS_LPI);
+-      else
++      } else if (!access(sys_lpi_file_debugfs, R_OK)) {
++              sys_lpi_file = sys_lpi_file_debugfs;
++              BIC_PRESENT(BIC_SYS_LPI);
++      } else {
++              sys_lpi_file_sysfs = NULL;
+               BIC_NOT_PRESENT(BIC_SYS_LPI);
++      }
+ 
+       if (!quiet)
+               decode_misc_feature_control();
+@@ -5144,9 +5149,9 @@ int add_counter(unsigned int msr_num, char *path, char 
*name,
+       }
+ 
+       msrp->msr_num = msr_num;
+-      strncpy(msrp->name, name, NAME_BYTES);
++      strncpy(msrp->name, name, NAME_BYTES - 1);
+       if (path)
+-              strncpy(msrp->path, path, PATH_BYTES);
++              strncpy(msrp->path, path, PATH_BYTES - 1);
+       msrp->width = width;
+       msrp->type = type;
+       msrp->format = format;
+diff --git a/usr/Kconfig b/usr/Kconfig
+index 43658b8a975e..8b4826de1189 100644
+--- a/usr/Kconfig
++++ b/usr/Kconfig
+@@ -131,17 +131,6 @@ choice
+ 
+         If in doubt, select 'None'
+ 
+-config INITRAMFS_COMPRESSION_NONE
+-      bool "None"
+-      help
+-        Do not compress the built-in initramfs at all. This may sound wasteful
+-        in space, but, you should be aware that the built-in initramfs will be
+-        compressed at a later stage anyways along with the rest of the kernel,
+-        on those architectures that support this. However, not compressing the
+-        initramfs may lead to slightly higher memory consumption during a
+-        short time at boot, while both the cpio image and the unpacked
+-        filesystem image will be present in memory simultaneously
+-
+ config INITRAMFS_COMPRESSION_GZIP
+       bool "Gzip"
+       depends on RD_GZIP
+@@ -214,6 +203,17 @@ config INITRAMFS_COMPRESSION_LZ4
+         If you choose this, keep in mind that most distros don't provide lz4
+         by default which could cause a build failure.
+ 
++config INITRAMFS_COMPRESSION_NONE
++      bool "None"
++      help
++        Do not compress the built-in initramfs at all. This may sound wasteful
++        in space, but, you should be aware that the built-in initramfs will be
++        compressed at a later stage anyways along with the rest of the kernel,
++        on those architectures that support this. However, not compressing the
++        initramfs may lead to slightly higher memory consumption during a
++        short time at boot, while both the cpio image and the unpacked
++        filesystem image will be present in memory simultaneously
++
+ endchoice
+ 
+ config INITRAMFS_COMPRESSION

Reply via email to