commit:     a8651f65ab88ed24697a49b6c917d90d9eb203fd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Nov 17 18:17:17 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Nov 17 18:17:17 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a8651f65

Linux patch 6.1.118

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1117_linux-6.1.118.patch | 1796 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1800 insertions(+)

diff --git a/0000_README b/0000_README
index 50d89d67..1fe735fc 100644
--- a/0000_README
+++ b/0000_README
@@ -511,6 +511,10 @@ Patch:  1115_linux-6.1.116.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.116
 
+Patch:  1116_linux-6.1.117.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.117
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1117_linux-6.1.118.patch b/1117_linux-6.1.118.patch
new file mode 100644
index 00000000..a2fa77d3
--- /dev/null
+++ b/1117_linux-6.1.118.patch
@@ -0,0 +1,1796 @@
+diff --git a/Makefile b/Makefile
+index c88229bb96269c..ca304cece572b0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 117
++SUBLEVEL = 118
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/loongarch/include/asm/loongarch.h 
b/arch/loongarch/include/asm/loongarch.h
+index 3d15fa5bef37d0..710b005fc8a696 100644
+--- a/arch/loongarch/include/asm/loongarch.h
++++ b/arch/loongarch/include/asm/loongarch.h
+@@ -325,7 +325,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
+ #define  CSR_ESTAT_IS_WIDTH           15
+ #define  CSR_ESTAT_IS                 (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
+ 
+-#define LOONGARCH_CSR_ERA             0x6     /* ERA */
++#define LOONGARCH_CSR_ERA             0x6     /* Exception return address */
+ 
+ #define LOONGARCH_CSR_BADV            0x7     /* Bad virtual address */
+ 
+diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c 
b/arch/powerpc/platforms/powernv/opal-irqchip.c
+index 391f505352007e..e9849d70aee4af 100644
+--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
++++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
+@@ -282,6 +282,7 @@ int __init opal_event_init(void)
+                                name, NULL);
+               if (rc) {
+                       pr_warn("Error %d requesting OPAL irq %d\n", rc, 
(int)r->start);
++                      kfree(name);
+                       continue;
+               }
+       }
+diff --git a/block/elevator.c b/block/elevator.c
+index bd71f0fc4e4b67..06288117e2dd6d 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -624,7 +624,7 @@ static int elevator_switch_mq(struct request_queue *q,
+ static inline bool elv_support_iosched(struct request_queue *q)
+ {
+       if (!queue_is_mq(q) ||
+-          (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
++          (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
+               return false;
+       return true;
+ }
+@@ -635,7 +635,7 @@ static inline bool elv_support_iosched(struct 
request_queue *q)
+  */
+ static struct elevator_type *elevator_get_default(struct request_queue *q)
+ {
+-      if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
++      if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
+               return NULL;
+ 
+       if (q->nr_hw_queues != 1 &&
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 5dc9ccdd5a510a..206a13f3959676 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -341,7 +341,7 @@ void crypto_alg_tested(const char *name, int err)
+       q->cra_flags |= CRYPTO_ALG_DEAD;
+       alg = test->adult;
+ 
+-      if (list_empty(&alg->cra_list))
++      if (crypto_is_dead(alg))
+               goto complete;
+ 
+       if (err == -ECANCELED)
+diff --git a/drivers/crypto/marvell/cesa/hash.c 
b/drivers/crypto/marvell/cesa/hash.c
+index c72b0672fc7103..84c1065092796b 100644
+--- a/drivers/crypto/marvell/cesa/hash.c
++++ b/drivers/crypto/marvell/cesa/hash.c
+@@ -947,7 +947,7 @@ struct ahash_alg mv_md5_alg = {
+               .base = {
+                       .cra_name = "md5",
+                       .cra_driver_name = "mv-md5",
+-                      .cra_priority = 300,
++                      .cra_priority = 0,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_ALLOCATES_MEMORY |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+@@ -1018,7 +1018,7 @@ struct ahash_alg mv_sha1_alg = {
+               .base = {
+                       .cra_name = "sha1",
+                       .cra_driver_name = "mv-sha1",
+-                      .cra_priority = 300,
++                      .cra_priority = 0,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_ALLOCATES_MEMORY |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+@@ -1092,7 +1092,7 @@ struct ahash_alg mv_sha256_alg = {
+               .base = {
+                       .cra_name = "sha256",
+                       .cra_driver_name = "mv-sha256",
+-                      .cra_priority = 300,
++                      .cra_priority = 0,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_ALLOCATES_MEMORY |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+@@ -1327,7 +1327,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
+               .base = {
+                       .cra_name = "hmac(md5)",
+                       .cra_driver_name = "mv-hmac-md5",
+-                      .cra_priority = 300,
++                      .cra_priority = 0,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_ALLOCATES_MEMORY |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+@@ -1398,7 +1398,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
+               .base = {
+                       .cra_name = "hmac(sha1)",
+                       .cra_driver_name = "mv-hmac-sha1",
+-                      .cra_priority = 300,
++                      .cra_priority = 0,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_ALLOCATES_MEMORY |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+@@ -1469,7 +1469,7 @@ struct ahash_alg mv_ahmac_sha256_alg = {
+               .base = {
+                       .cra_name = "hmac(sha256)",
+                       .cra_driver_name = "mv-hmac-sha256",
+-                      .cra_priority = 300,
++                      .cra_priority = 0,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_ALLOCATES_MEMORY |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 5d9a34601a1ac5..c31e5f9d63da2a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -344,15 +344,15 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device 
*adev, size_t size,
+       return r;
+ }
+ 
+-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
++void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
+ {
+-      struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
++      struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
+ 
+-      amdgpu_bo_reserve(bo, true);
+-      amdgpu_bo_kunmap(bo);
+-      amdgpu_bo_unpin(bo);
+-      amdgpu_bo_unreserve(bo);
+-      amdgpu_bo_unref(&(bo));
++      amdgpu_bo_reserve(*bo, true);
++      amdgpu_bo_kunmap(*bo);
++      amdgpu_bo_unpin(*bo);
++      amdgpu_bo_unreserve(*bo);
++      amdgpu_bo_unref(bo);
+ }
+ 
+ int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 4b694886715cf0..c7672a1d15609a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -210,7 +210,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 
struct mm_struct *mm)
+ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
+                               void **mem_obj, uint64_t *gpu_addr,
+                               void **cpu_ptr, bool mqd_gfx9);
+-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
++void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
+ int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
+                               void **mem_obj);
+ void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index e3cd66c4d95d8e..f83574107eb85b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -408,7 +408,7 @@ static int kfd_ioctl_create_queue(struct file *filep, 
struct kfd_process *p,
+ 
+ err_create_queue:
+       if (wptr_bo)
+-              amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
++              amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&wptr_bo);
+ err_wptr_map_gart:
+ err_alloc_doorbells:
+ err_bind_process:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 27820f0a282d16..e2c055abfea974 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -673,7 +673,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ kfd_doorbell_error:
+       kfd_gtt_sa_fini(kfd);
+ kfd_gtt_sa_init_error:
+-      amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
++      amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
+ alloc_gtt_mem_failure:
+       if (kfd->gws)
+               amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
+@@ -693,7 +693,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
+               kfd_doorbell_fini(kfd);
+               ida_destroy(&kfd->doorbell_ida);
+               kfd_gtt_sa_fini(kfd);
+-              amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
++              amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
+               if (kfd->gws)
+                       amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
+       }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 1b7b2942648047..3ab0a796af060e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -2392,7 +2392,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
+ {
+       WARN(!mqd, "No hiq sdma mqd trunk to free");
+ 
+-      amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
++      amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
+ }
+ 
+ void device_queue_manager_uninit(struct device_queue_manager *dqm)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+index 623ccd227b7de0..c733d6888c3088 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+@@ -204,7 +204,7 @@ void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
+             struct kfd_mem_obj *mqd_mem_obj)
+ {
+       if (mqd_mem_obj->gtt_mem) {
+-              amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
++              amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, 
&mqd_mem_obj->gtt_mem);
+               kfree(mqd_mem_obj);
+       } else {
+               kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 5bca6abd55aeff..9582c9449fff94 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -1052,7 +1052,7 @@ static void kfd_process_destroy_pdds(struct kfd_process 
*p)
+ 
+               if (pdd->dev->shared_resources.enable_mes)
+                       amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
+-                                                 pdd->proc_ctx_bo);
++                                                 &pdd->proc_ctx_bo);
+               /*
+                * before destroying pdd, make sure to report availability
+                * for auto suspend
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 99aa8a8399d69a..1918a3c06ac865 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -441,9 +441,9 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, 
unsigned int qid)
+ 
+               if (dev->shared_resources.enable_mes) {
+                       amdgpu_amdkfd_free_gtt_mem(dev->adev,
+-                                                 pqn->q->gang_ctx_bo);
++                                                 &pqn->q->gang_ctx_bo);
+                       if (pqn->q->wptr_bo)
+-                              amdgpu_amdkfd_free_gtt_mem(dev->adev, 
pqn->q->wptr_bo);
++                              amdgpu_amdkfd_free_gtt_mem(dev->adev, (void 
**)&pqn->q->wptr_bo);
+ 
+               }
+               uninit_queue(pqn->q);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index bca10214e0bf1f..abdca2346f1a08 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -59,7 +59,7 @@
+ #define VMWGFX_DRIVER_MINOR 20
+ #define VMWGFX_DRIVER_PATCHLEVEL 0
+ #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+-#define VMWGFX_MAX_DISPLAYS 16
++#define VMWGFX_NUM_DISPLAY_UNITS 8
+ #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+ 
+ #define VMWGFX_MIN_INITIAL_WIDTH 1280
+@@ -79,7 +79,7 @@
+ #define VMWGFX_NUM_GB_CONTEXT 256
+ #define VMWGFX_NUM_GB_SHADER 20000
+ #define VMWGFX_NUM_GB_SURFACE 32768
+-#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
++#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS
+ #define VMWGFX_NUM_DXCONTEXT 256
+ #define VMWGFX_NUM_DXQUERY 512
+ #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index a8f349e748e566..5210b8084217cb 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -2261,7 +2261,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, 
void *data,
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_vmw_update_layout_arg *arg =
+               (struct drm_vmw_update_layout_arg *)data;
+-      void __user *user_rects;
++      const void __user *user_rects;
+       struct drm_vmw_rect *rects;
+       struct drm_rect *drm_rects;
+       unsigned rects_size;
+@@ -2273,6 +2273,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, 
void *data,
+                                           VMWGFX_MIN_INITIAL_HEIGHT};
+               vmw_du_update_layout(dev_priv, 1, &def_rect);
+               return 0;
++      } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
++              return -E2BIG;
+       }
+ 
+       rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h 
b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+index 1099de1ece4b3e..a2a294841df414 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -199,9 +199,6 @@ struct vmw_kms_dirty {
+       s32 unit_y2;
+ };
+ 
+-#define VMWGFX_NUM_DISPLAY_UNITS 8
+-
+-
+ #define vmw_framebuffer_to_vfb(x) \
+       container_of(x, struct vmw_framebuffer, base)
+ #define vmw_framebuffer_to_vfbs(x) \
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index f3b183a7b7fa44..f1c106f5e90b92 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -859,6 +859,7 @@
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1     0xc539
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1   0xc53f
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY        0xc53a
++#define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER  0xc548
+ #define USB_DEVICE_ID_SPACETRAVELLER  0xc623
+ #define USB_DEVICE_ID_SPACENAVIGATOR  0xc626
+ #define USB_DEVICE_ID_DINOVO_DESKTOP  0xc704
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index f86c1ea83a0378..a4062f617ba202 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev,
+               return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
+                                                              usage, bit, max);
+       case USB_DEVICE_ID_LENOVO_X1_TAB:
++      case USB_DEVICE_ID_LENOVO_X1_TAB3:
+               return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, 
bit, max);
+       default:
+               return 0;
+@@ -583,6 +584,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
+               break;
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+       case USB_DEVICE_ID_LENOVO_X1_TAB:
++      case USB_DEVICE_ID_LENOVO_X1_TAB3:
+               ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, 
value);
+               if (ret)
+                       return ret;
+@@ -777,6 +779,7 @@ static int lenovo_event(struct hid_device *hdev, struct 
hid_field *field,
+               return lenovo_event_cptkbd(hdev, field, usage, value);
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+       case USB_DEVICE_ID_LENOVO_X1_TAB:
++      case USB_DEVICE_ID_LENOVO_X1_TAB3:
+               return lenovo_event_tp10ubkbd(hdev, field, usage, value);
+       default:
+               return 0;
+@@ -1059,6 +1062,7 @@ static int lenovo_led_brightness_set(struct led_classdev 
*led_cdev,
+               break;
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+       case USB_DEVICE_ID_LENOVO_X1_TAB:
++      case USB_DEVICE_ID_LENOVO_X1_TAB3:
+               ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], 
value);
+               break;
+       }
+@@ -1289,6 +1293,7 @@ static int lenovo_probe(struct hid_device *hdev,
+               break;
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+       case USB_DEVICE_ID_LENOVO_X1_TAB:
++      case USB_DEVICE_ID_LENOVO_X1_TAB3:
+               ret = lenovo_probe_tp10ubkbd(hdev);
+               break;
+       default:
+@@ -1375,6 +1380,7 @@ static void lenovo_remove(struct hid_device *hdev)
+               break;
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+       case USB_DEVICE_ID_LENOVO_X1_TAB:
++      case USB_DEVICE_ID_LENOVO_X1_TAB3:
+               lenovo_remove_tp10ubkbd(hdev);
+               break;
+       }
+@@ -1424,6 +1430,8 @@ static const struct hid_device_id lenovo_devices[] = {
+        */
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
++      { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++                   USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) },
+       { }
+ };
+ 
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index e7199ae2e3d918..bf9cad71125923 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2020,6 +2020,10 @@ static const struct hid_device_id mt_devices[] = {
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_ELAN, 0x3148) },
+ 
++      { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++              HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++                      USB_VENDOR_ID_ELAN, 0x32ae) },
++
+       /* Elitegroup panel */
+       { .driver_data = MT_CLS_SERIAL,
+               MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
+@@ -2089,6 +2093,11 @@ static const struct hid_device_id mt_devices[] = {
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       0x347d, 0x7853) },
+ 
++      /* HONOR MagicBook Art 14 touchpad */
++      { .driver_data = MT_CLS_VTL,
++              HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++                      0x35cc, 0x0104) },
++
+       /* Ilitek dual touch panel */
+       {  .driver_data = MT_CLS_NSMU,
+               MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
+@@ -2131,6 +2140,10 @@ static const struct hid_device_id mt_devices[] = {
+               HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_LOGITECH,
+                       USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) },
++      { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++              HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
++                      USB_VENDOR_ID_LOGITECH,
++                      USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER) },
+ 
+       /* MosArt panels */
+       { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+diff --git a/drivers/irqchip/irq-mscc-ocelot.c 
b/drivers/irqchip/irq-mscc-ocelot.c
+index 4d0c3532dbe735..c19ab379e8c5ea 100644
+--- a/drivers/irqchip/irq-mscc-ocelot.c
++++ b/drivers/irqchip/irq-mscc-ocelot.c
+@@ -37,7 +37,7 @@ static struct chip_props ocelot_props = {
+       .reg_off_ena_clr        = 0x1c,
+       .reg_off_ena_set        = 0x20,
+       .reg_off_ident          = 0x38,
+-      .reg_off_trigger        = 0x5c,
++      .reg_off_trigger        = 0x4,
+       .n_irq                  = 24,
+ };
+ 
+@@ -70,7 +70,7 @@ static struct chip_props jaguar2_props = {
+       .reg_off_ena_clr        = 0x1c,
+       .reg_off_ena_set        = 0x20,
+       .reg_off_ident          = 0x38,
+-      .reg_off_trigger        = 0x5c,
++      .reg_off_trigger        = 0x4,
+       .n_irq                  = 29,
+ };
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 009f7ffe4e10c7..f608f30c7d6a61 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3432,7 +3432,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+                       sector_t sect;
+                       int must_sync;
+                       int any_working;
+-                      int need_recover = 0;
+                       struct raid10_info *mirror = &conf->mirrors[i];
+                       struct md_rdev *mrdev, *mreplace;
+ 
+@@ -3440,14 +3439,13 @@ static sector_t raid10_sync_request(struct mddev 
*mddev, sector_t sector_nr,
+                       mrdev = rcu_dereference(mirror->rdev);
+                       mreplace = rcu_dereference(mirror->replacement);
+ 
+-                      if (mrdev != NULL &&
+-                          !test_bit(Faulty, &mrdev->flags) &&
+-                          !test_bit(In_sync, &mrdev->flags))
+-                              need_recover = 1;
++                      if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
++                          test_bit(In_sync, &mrdev->flags)))
++                              mrdev = NULL;
+                       if (mreplace && test_bit(Faulty, &mreplace->flags))
+                               mreplace = NULL;
+ 
+-                      if (!need_recover && !mreplace) {
++                      if (!mrdev && !mreplace) {
+                               rcu_read_unlock();
+                               continue;
+                       }
+@@ -3481,7 +3479,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+                               rcu_read_unlock();
+                               continue;
+                       }
+-                      atomic_inc(&mrdev->nr_pending);
++                      if (mrdev)
++                              atomic_inc(&mrdev->nr_pending);
+                       if (mreplace)
+                               atomic_inc(&mreplace->nr_pending);
+                       rcu_read_unlock();
+@@ -3568,7 +3567,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+                               r10_bio->devs[1].devnum = i;
+                               r10_bio->devs[1].addr = to_addr;
+ 
+-                              if (need_recover) {
++                              if (mrdev) {
+                                       bio = r10_bio->devs[1].bio;
+                                       bio->bi_next = biolist;
+                                       biolist = bio;
+@@ -3613,7 +3612,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, 
sector_t sector_nr,
+                                       for (k = 0; k < conf->copies; k++)
+                                               if (r10_bio->devs[k].devnum == 
i)
+                                                       break;
+-                                      if (!test_bit(In_sync,
++                                      if (mrdev && !test_bit(In_sync,
+                                                     &mrdev->flags)
+                                           && !rdev_set_badblocks(
+                                                   mrdev,
+@@ -3639,12 +3638,14 @@ static sector_t raid10_sync_request(struct mddev 
*mddev, sector_t sector_nr,
+                               if (rb2)
+                                       atomic_dec(&rb2->remaining);
+                               r10_bio = rb2;
+-                              rdev_dec_pending(mrdev, mddev);
++                              if (mrdev)
++                                      rdev_dec_pending(mrdev, mddev);
+                               if (mreplace)
+                                       rdev_dec_pending(mreplace, mddev);
+                               break;
+                       }
+-                      rdev_dec_pending(mrdev, mddev);
++                      if (mrdev)
++                              rdev_dec_pending(mrdev, mddev);
+                       if (mreplace)
+                               rdev_dec_pending(mreplace, mddev);
+                       if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 72a2c41b9dbf80..fe9abc4ea3afe9 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1431,6 +1431,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */
+       {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
++      {QMI_QUIRK_SET_DTR(0x2cb7, 0x0112, 0)}, /* Fibocom FG132 */
+       {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
+       {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},    /* Foxconn T77W968 LTE with 
eSIM support*/
+       {QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded 
Qualcomm 05c6:9025) */
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 0729ab54307259..92ffeb66056182 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1231,10 +1231,9 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl 
*ctrl)
+                          nvme_keep_alive_work_period(ctrl));
+ }
+ 
+-static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+-                                               blk_status_t status)
++static void nvme_keep_alive_finish(struct request *rq,
++              blk_status_t status, struct nvme_ctrl *ctrl)
+ {
+-      struct nvme_ctrl *ctrl = rq->end_io_data;
+       unsigned long flags;
+       bool startka = false;
+       unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
+@@ -1252,13 +1251,11 @@ static enum rq_end_io_ret 
nvme_keep_alive_end_io(struct request *rq,
+               delay = 0;
+       }
+ 
+-      blk_mq_free_request(rq);
+-
+       if (status) {
+               dev_err(ctrl->device,
+                       "failed nvme_keep_alive_end_io error=%d\n",
+                               status);
+-              return RQ_END_IO_NONE;
++              return;
+       }
+ 
+       ctrl->ka_last_check_time = jiffies;
+@@ -1270,7 +1267,6 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct 
request *rq,
+       spin_unlock_irqrestore(&ctrl->lock, flags);
+       if (startka)
+               queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
+-      return RQ_END_IO_NONE;
+ }
+ 
+ static void nvme_keep_alive_work(struct work_struct *work)
+@@ -1279,6 +1275,7 @@ static void nvme_keep_alive_work(struct work_struct 
*work)
+                       struct nvme_ctrl, ka_work);
+       bool comp_seen = ctrl->comp_seen;
+       struct request *rq;
++      blk_status_t status;
+ 
+       ctrl->ka_last_check_time = jiffies;
+ 
+@@ -1301,9 +1298,9 @@ static void nvme_keep_alive_work(struct work_struct 
*work)
+       nvme_init_request(rq, &ctrl->ka_cmd);
+ 
+       rq->timeout = ctrl->kato * HZ;
+-      rq->end_io = nvme_keep_alive_end_io;
+-      rq->end_io_data = ctrl;
+-      blk_execute_rq_nowait(rq, false);
++      status = blk_execute_rq(rq, false);
++      nvme_keep_alive_finish(rq, status, ctrl);
++      blk_mq_free_request(rq);
+ }
+ 
+ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+@@ -2394,8 +2391,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
+       else
+               ctrl->ctrl_config = NVME_CC_CSS_NVM;
+ 
+-      if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
+-              ctrl->ctrl_config |= NVME_CC_CRIME;
++      /*
++       * Setting CRIME results in CSTS.RDY before the media is ready. This
++       * makes it possible for media related commands to return the error
++       * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is
++       * restructured to handle retries, disable CC.CRIME.
++       */
++      ctrl->ctrl_config &= ~NVME_CC_CRIME;
+ 
+       ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+       ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
+@@ -2430,10 +2432,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
+                * devices are known to get this wrong. Use the larger of the
+                * two values.
+                */
+-              if (ctrl->ctrl_config & NVME_CC_CRIME)
+-                      ready_timeout = NVME_CRTO_CRIMT(crto);
+-              else
+-                      ready_timeout = NVME_CRTO_CRWMT(crto);
++              ready_timeout = NVME_CRTO_CRWMT(crto);
+ 
+               if (ready_timeout < timeout)
+                       dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 93ada8941a4c5c..43b89c7d585f0b 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -463,6 +463,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head 
*head)
+       return ret;
+ }
+ 
++static void nvme_partition_scan_work(struct work_struct *work)
++{
++      struct nvme_ns_head *head =
++              container_of(work, struct nvme_ns_head, partition_scan_work);
++
++      if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
++                                           &head->disk->state)))
++              return;
++
++      mutex_lock(&head->disk->open_mutex);
++      bdev_disk_changed(head->disk, false);
++      mutex_unlock(&head->disk->open_mutex);
++}
++
+ static void nvme_requeue_work(struct work_struct *work)
+ {
+       struct nvme_ns_head *head =
+@@ -489,6 +503,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct 
nvme_ns_head *head)
+       bio_list_init(&head->requeue_list);
+       spin_lock_init(&head->requeue_lock);
+       INIT_WORK(&head->requeue_work, nvme_requeue_work);
++      INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work);
+ 
+       /*
+        * Add a multipath node if the subsystems supports multiple controllers.
+@@ -504,6 +519,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct 
nvme_ns_head *head)
+               return -ENOMEM;
+       head->disk->fops = &nvme_ns_head_ops;
+       head->disk->private_data = head;
++
++      /*
++       * We need to suppress the partition scan from occuring within the
++       * controller's scan_work context. If a path error occurs here, the IO
++       * will wait until a path becomes available or all paths are torn down,
++       * but that action also occurs within scan_work, so it would deadlock.
++       * Defer the partion scan to a different context that does not block
++       * scan_work.
++       */
++      set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state);
+       sprintf(head->disk->disk_name, "nvme%dn%d",
+                       ctrl->subsys->instance, head->instance);
+ 
+@@ -552,6 +577,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+                       return;
+               }
+               nvme_add_ns_head_cdev(head);
++              kblockd_schedule_work(&head->partition_scan_work);
+       }
+ 
+       mutex_lock(&head->lock);
+@@ -851,6 +877,12 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
+       kblockd_schedule_work(&head->requeue_work);
+       if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+               nvme_cdev_del(&head->cdev, &head->cdev_device);
++              /*
++               * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
++               * to allow multipath to fail all I/O.
++               */
++              synchronize_srcu(&head->srcu);
++              kblockd_schedule_work(&head->requeue_work);
+               del_gendisk(head->disk);
+       }
+ }
+@@ -862,6 +894,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+       /* make sure all pending bios are cleaned up */
+       kblockd_schedule_work(&head->requeue_work);
+       flush_work(&head->requeue_work);
++      flush_work(&head->partition_scan_work);
+       put_disk(head->disk);
+ }
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 5f8a146b701484..0f49b779dec65a 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -460,6 +460,7 @@ struct nvme_ns_head {
+       struct bio_list         requeue_list;
+       spinlock_t              requeue_lock;
+       struct work_struct      requeue_work;
++      struct work_struct      partition_scan_work;
+       struct mutex            lock;
+       unsigned long           flags;
+ #define NVME_NSHEAD_DISK_LIVE 0
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index f2fedd25915f96..29489c2c52fb9c 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -2495,10 +2495,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl 
*ctrl, char *buf, int size)
+ 
+       len = nvmf_get_address(ctrl, buf, size);
+ 
++      if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
++              return len;
++
+       mutex_lock(&queue->queue_lock);
+ 
+-      if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+-              goto done;
+       ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
+       if (ret > 0) {
+               if (len > 0)
+@@ -2506,7 +2507,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, 
char *buf, int size)
+               len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
+                               (len) ? "," : "", &src_addr);
+       }
+-done:
++
+       mutex_unlock(&queue->queue_lock);
+ 
+       return len;
+diff --git a/drivers/platform/x86/x86-android-tablets.c 
b/drivers/platform/x86/x86-android-tablets.c
+index 9178076d9d7da4..94710471d7dd26 100644
+--- a/drivers/platform/x86/x86-android-tablets.c
++++ b/drivers/platform/x86/x86-android-tablets.c
+@@ -1853,8 +1853,9 @@ static __init int x86_android_tablet_init(void)
+       for (i = 0; i < pdev_count; i++) {
+               pdevs[i] = 
platform_device_register_full(&dev_info->pdev_info[i]);
+               if (IS_ERR(pdevs[i])) {
++                      ret = PTR_ERR(pdevs[i]);
+                       x86_android_tablet_cleanup();
+-                      return PTR_ERR(pdevs[i]);
++                      return ret;
+               }
+       }
+ 
+diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
+index 3ec5ca3aefe1d2..c80cb72b06491c 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_base.c
++++ b/drivers/vdpa/ifcvf/ifcvf_base.c
+@@ -78,7 +78,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
+       u32 i;
+ 
+       ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
+-      if (ret < 0) {
++      if (ret) {
+               IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
+               return -EIO;
+       }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3bf214d4afef50..987d49e18dbe86 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -5617,8 +5617,8 @@ failed_mount9: __maybe_unused
+ failed_mount3:
+       /* flush s_error_work before sbi destroy */
+       flush_work(&sbi->s_error_work);
+-      del_timer_sync(&sbi->s_err_report);
+       ext4_stop_mmpd(sbi);
++      del_timer_sync(&sbi->s_err_report);
+       ext4_group_desc_free(sbi);
+ failed_mount:
+       if (sbi->s_chksum_driver)
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index 026ed43c067042..057aa3cec90252 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -1646,6 +1646,15 @@ struct inode *ntfs_create_inode(struct user_namespace 
*mnt_userns,
+                         le16_to_cpu(new_de->key_size), sbi);
+       /* ni_unlock(dir_ni); will be called later. */
+ out6:
++      attr = ni_find_attr(ni, NULL, NULL, ATTR_EA, NULL, 0, NULL, NULL);
++      if (attr && attr->non_res) {
++              /* Delete ATTR_EA, if non-resident. */
++              struct runs_tree run;
++              run_init(&run);
++              attr_set_size(ni, ATTR_EA, NULL, 0, &run, 0, NULL, false, NULL);
++              run_close(&run);
++      }
++
+       if (rp_inserted)
+               ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
+ 
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index ea7c79e8ce4294..e96b947c3f5dd5 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1129,9 +1129,12 @@ int ocfs2_setattr(struct user_namespace *mnt_userns, 
struct dentry *dentry,
+       trace_ocfs2_setattr(inode, dentry,
+                           (unsigned long long)OCFS2_I(inode)->ip_blkno,
+                           dentry->d_name.len, dentry->d_name.name,
+-                          attr->ia_valid, attr->ia_mode,
+-                          from_kuid(&init_user_ns, attr->ia_uid),
+-                          from_kgid(&init_user_ns, attr->ia_gid));
++                          attr->ia_valid,
++                              attr->ia_valid & ATTR_MODE ? attr->ia_mode : 0,
++                              attr->ia_valid & ATTR_UID ?
++                                      from_kuid(&init_user_ns, attr->ia_uid) 
: 0,
++                              attr->ia_valid & ATTR_GID ?
++                                      from_kgid(&init_user_ns, attr->ia_gid) 
: 0);
+ 
+       /* ensuring we don't even attempt to truncate a symlink */
+       if (S_ISLNK(inode->i_mode))
+diff --git a/include/net/bluetooth/hci_core.h 
b/include/net/bluetooth/hci_core.h
+index 215b56dc26df2b..d26b57e87f7f43 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -544,7 +544,6 @@ struct hci_dev {
+       __u32                   req_status;
+       __u32                   req_result;
+       struct sk_buff          *req_skb;
+-      struct sk_buff          *req_rsp;
+ 
+       void                    *smp_data;
+       void                    *smp_bredr_data;
+@@ -735,7 +734,6 @@ struct hci_conn {
+       unsigned long   flags;
+ 
+       enum conn_reasons conn_reason;
+-      __u8            abort_reason;
+ 
+       __u32           clock;
+       __u16           clock_accuracy;
+@@ -755,6 +753,7 @@ struct hci_conn {
+       struct delayed_work auto_accept_work;
+       struct delayed_work idle_work;
+       struct delayed_work le_conn_timeout;
++      struct work_struct  le_scan_cleanup;
+ 
+       struct device   dev;
+       struct dentry   *debugfs;
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 92c1aa8f350178..4f0ae938b1463f 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -3921,8 +3921,10 @@ static __cold int io_register_iowq_max_workers(struct 
io_ring_ctx *ctx,
+       }
+ 
+       if (sqd) {
++              mutex_unlock(&ctx->uring_lock);
+               mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
++              mutex_lock(&ctx->uring_lock);
+       }
+ 
+       if (copy_to_user(arg, new_count, sizeof(new_count)))
+@@ -3947,8 +3949,11 @@ static __cold int io_register_iowq_max_workers(struct 
io_ring_ctx *ctx,
+       return 0;
+ err:
+       if (sqd) {
++              mutex_unlock(&ctx->uring_lock);
+               mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
++              mutex_lock(&ctx->uring_lock);
++
+       }
+       return ret;
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index bb54f1f4fafba5..da90f565317d4c 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -15500,7 +15500,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr 
*attr, bpfptr_t uattr)
+       /* 'struct bpf_verifier_env' can be global, but since it's not small,
+        * allocate/free it every time bpf_check() is called
+        */
+-      env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
++      env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
+       if (!env)
+               return -ENOMEM;
+       log = &env->log;
+@@ -15721,6 +15721,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr 
*attr, bpfptr_t uattr)
+               mutex_unlock(&bpf_verifier_lock);
+       vfree(env->insn_aux_data);
+ err_free_env:
+-      kfree(env);
++      kvfree(env);
+       return ret;
+ }
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 127c78aec17dbd..a6a3ff2a441ed8 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -858,9 +858,11 @@ static const struct file_operations uprobe_profile_ops = {
+ struct uprobe_cpu_buffer {
+       struct mutex mutex;
+       void *buf;
++      int dsize;
+ };
+ static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
+ static int uprobe_buffer_refcnt;
++#define MAX_UCB_BUFFER_SIZE PAGE_SIZE
+ 
+ static int uprobe_buffer_init(void)
+ {
+@@ -947,9 +949,31 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer 
*ucb)
+       mutex_unlock(&ucb->mutex);
+ }
+ 
++static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe 
*tu,
++                                                     struct pt_regs *regs)
++{
++      struct uprobe_cpu_buffer *ucb;
++      int dsize, esize;
++
++      esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
++      dsize = __get_data_size(&tu->tp, regs);
++
++      ucb = uprobe_buffer_get();
++      ucb->dsize = tu->tp.size + dsize;
++
++      if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) {
++              ucb->dsize = MAX_UCB_BUFFER_SIZE;
++              dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size;
++      }
++
++      store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
++
++      return ucb;
++}
++
+ static void __uprobe_trace_func(struct trace_uprobe *tu,
+                               unsigned long func, struct pt_regs *regs,
+-                              struct uprobe_cpu_buffer *ucb, int dsize,
++                              struct uprobe_cpu_buffer *ucb,
+                               struct trace_event_file *trace_file)
+ {
+       struct uprobe_trace_entry_head *entry;
+@@ -960,14 +984,11 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
+ 
+       WARN_ON(call != trace_file->event_call);
+ 
+-      if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
+-              return;
+-
+       if (trace_trigger_soft_disabled(trace_file))
+               return;
+ 
+       esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+-      size = esize + tu->tp.size + dsize;
++      size = esize + ucb->dsize;
+       entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
+       if (!entry)
+               return;
+@@ -981,14 +1002,14 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
+               data = DATAOF_TRACE_ENTRY(entry, false);
+       }
+ 
+-      memcpy(data, ucb->buf, tu->tp.size + dsize);
++      memcpy(data, ucb->buf, ucb->dsize);
+ 
+       trace_event_buffer_commit(&fbuffer);
+ }
+ 
+ /* uprobe handler */
+ static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
+-                           struct uprobe_cpu_buffer *ucb, int dsize)
++                           struct uprobe_cpu_buffer *ucb)
+ {
+       struct event_file_link *link;
+ 
+@@ -997,7 +1018,7 @@ static int uprobe_trace_func(struct trace_uprobe *tu, 
struct pt_regs *regs,
+ 
+       rcu_read_lock();
+       trace_probe_for_each_link_rcu(link, &tu->tp)
+-              __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
++              __uprobe_trace_func(tu, 0, regs, ucb, link->file);
+       rcu_read_unlock();
+ 
+       return 0;
+@@ -1005,13 +1026,13 @@ static int uprobe_trace_func(struct trace_uprobe *tu, 
struct pt_regs *regs,
+ 
+ static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
+                                struct pt_regs *regs,
+-                               struct uprobe_cpu_buffer *ucb, int dsize)
++                               struct uprobe_cpu_buffer *ucb)
+ {
+       struct event_file_link *link;
+ 
+       rcu_read_lock();
+       trace_probe_for_each_link_rcu(link, &tu->tp)
+-              __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
++              __uprobe_trace_func(tu, func, regs, ucb, link->file);
+       rcu_read_unlock();
+ }
+ 
+@@ -1339,7 +1360,7 @@ static bool uprobe_perf_filter(struct uprobe_consumer 
*uc,
+ 
+ static void __uprobe_perf_func(struct trace_uprobe *tu,
+                              unsigned long func, struct pt_regs *regs,
+-                             struct uprobe_cpu_buffer *ucb, int dsize)
++                             struct uprobe_cpu_buffer *ucb)
+ {
+       struct trace_event_call *call = trace_probe_event_call(&tu->tp);
+       struct uprobe_trace_entry_head *entry;
+@@ -1360,7 +1381,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
+ 
+       esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+ 
+-      size = esize + tu->tp.size + dsize;
++      size = esize + ucb->dsize;
+       size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large 
enough"))
+               return;
+@@ -1383,13 +1404,10 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
+               data = DATAOF_TRACE_ENTRY(entry, false);
+       }
+ 
+-      memcpy(data, ucb->buf, tu->tp.size + dsize);
+-
+-      if (size - esize > tu->tp.size + dsize) {
+-              int len = tu->tp.size + dsize;
++      memcpy(data, ucb->buf, ucb->dsize);
+ 
+-              memset(data + len, 0, size - esize - len);
+-      }
++      if (size - esize > ucb->dsize)
++              memset(data + ucb->dsize, 0, size - esize - ucb->dsize);
+ 
+       perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+                             head, NULL);
+@@ -1399,21 +1417,21 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
+ 
+ /* uprobe profile handler */
+ static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
+-                          struct uprobe_cpu_buffer *ucb, int dsize)
++                          struct uprobe_cpu_buffer *ucb)
+ {
+       if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
+               return UPROBE_HANDLER_REMOVE;
+ 
+       if (!is_ret_probe(tu))
+-              __uprobe_perf_func(tu, 0, regs, ucb, dsize);
++              __uprobe_perf_func(tu, 0, regs, ucb);
+       return 0;
+ }
+ 
+ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
+                               struct pt_regs *regs,
+-                              struct uprobe_cpu_buffer *ucb, int dsize)
++                              struct uprobe_cpu_buffer *ucb)
+ {
+-      __uprobe_perf_func(tu, func, regs, ucb, dsize);
++      __uprobe_perf_func(tu, func, regs, ucb);
+ }
+ 
+ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
+@@ -1479,10 +1497,8 @@ static int uprobe_dispatcher(struct uprobe_consumer 
*con, struct pt_regs *regs)
+       struct trace_uprobe *tu;
+       struct uprobe_dispatch_data udd;
+       struct uprobe_cpu_buffer *ucb;
+-      int dsize, esize;
+       int ret = 0;
+ 
+-
+       tu = container_of(con, struct trace_uprobe, consumer);
+       tu->nhit++;
+ 
+@@ -1494,18 +1510,14 @@ static int uprobe_dispatcher(struct uprobe_consumer 
*con, struct pt_regs *regs)
+       if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+               return 0;
+ 
+-      dsize = __get_data_size(&tu->tp, regs);
+-      esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+-
+-      ucb = uprobe_buffer_get();
+-      store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
++      ucb = prepare_uprobe_buffer(tu, regs);
+ 
+       if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
+-              ret |= uprobe_trace_func(tu, regs, ucb, dsize);
++              ret |= uprobe_trace_func(tu, regs, ucb);
+ 
+ #ifdef CONFIG_PERF_EVENTS
+       if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
+-              ret |= uprobe_perf_func(tu, regs, ucb, dsize);
++              ret |= uprobe_perf_func(tu, regs, ucb);
+ #endif
+       uprobe_buffer_put(ucb);
+       return ret;
+@@ -1517,7 +1529,6 @@ static int uretprobe_dispatcher(struct uprobe_consumer 
*con,
+       struct trace_uprobe *tu;
+       struct uprobe_dispatch_data udd;
+       struct uprobe_cpu_buffer *ucb;
+-      int dsize, esize;
+ 
+       tu = container_of(con, struct trace_uprobe, consumer);
+ 
+@@ -1529,18 +1540,13 @@ static int uretprobe_dispatcher(struct uprobe_consumer 
*con,
+       if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+               return 0;
+ 
+-      dsize = __get_data_size(&tu->tp, regs);
+-      esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+-
+-      ucb = uprobe_buffer_get();
+-      store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
+-
++      ucb = prepare_uprobe_buffer(tu, regs);
+       if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
+-              uretprobe_trace_func(tu, func, regs, ucb, dsize);
++              uretprobe_trace_func(tu, func, regs, ucb);
+ 
+ #ifdef CONFIG_PERF_EVENTS
+       if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
+-              uretprobe_perf_func(tu, func, regs, ucb, dsize);
++              uretprobe_perf_func(tu, func, regs, ucb);
+ #endif
+       uprobe_buffer_put(ucb);
+       return 0;
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index a14d655bddc680..d89e0846cb1687 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -1325,7 +1325,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t 
flags)
+               /* Zero out spare memory. */
+               if (want_init_on_alloc(flags)) {
+                       kasan_disable_current();
+-                      memset((void *)p + new_size, 0, ks - new_size);
++                      memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
+                       kasan_enable_current();
+               }
+ 
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 0fc2d706d9c239..e876d6fea2fc44 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -967,8 +967,10 @@ static int p9_client_version(struct p9_client *c)
+ struct p9_client *p9_client_create(const char *dev_name, char *options)
+ {
+       int err;
++      static atomic_t seqno = ATOMIC_INIT(0);
+       struct p9_client *clnt;
+       char *client_id;
++      char *cache_name;
+ 
+       err = 0;
+       clnt = kmalloc(sizeof(*clnt), GFP_KERNEL);
+@@ -1026,15 +1028,23 @@ struct p9_client *p9_client_create(const char 
*dev_name, char *options)
+       if (err)
+               goto close_trans;
+ 
++      cache_name = kasprintf(GFP_KERNEL,
++              "9p-fcall-cache-%u", atomic_inc_return(&seqno));
++      if (!cache_name) {
++              err = -ENOMEM;
++              goto close_trans;
++      }
++
+       /* P9_HDRSZ + 4 is the smallest packet header we can have that is
+        * followed by data accessed from userspace by read
+        */
+       clnt->fcall_cache =
+-              kmem_cache_create_usercopy("9p-fcall-cache", clnt->msize,
++              kmem_cache_create_usercopy(cache_name, clnt->msize,
+                                          0, 0, P9_HDRSZ + 4,
+                                          clnt->msize - (P9_HDRSZ + 4),
+                                          NULL);
+ 
++      kfree(cache_name);
+       return clnt;
+ 
+ close_trans:
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 4c3bd051603872..7960fd514e5a80 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -307,11 +307,14 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr 
*msg, size_t len,
+       if (flags & MSG_OOB)
+               return -EOPNOTSUPP;
+ 
++      lock_sock(sk);
++
+       skb = skb_recv_datagram(sk, flags, &err);
+       if (!skb) {
+               if (sk->sk_shutdown & RCV_SHUTDOWN)
+                       err = 0;
+ 
++              release_sock(sk);
+               return err;
+       }
+ 
+@@ -337,6 +340,8 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr 
*msg, size_t len,
+ 
+       skb_free_datagram(sk, skb);
+ 
++      release_sock(sk);
++
+       if (flags & MSG_TRUNC)
+               copied = skblen;
+ 
+@@ -559,11 +564,10 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, 
unsigned long arg)
+               if (sk->sk_state == BT_LISTEN)
+                       return -EINVAL;
+ 
+-              spin_lock(&sk->sk_receive_queue.lock);
++              lock_sock(sk);
+               skb = skb_peek(&sk->sk_receive_queue);
+               amount = skb ? skb->len : 0;
+-              spin_unlock(&sk->sk_receive_queue.lock);
+-
++              release_sock(sk);
+               err = put_user(amount, (int __user *)arg);
+               break;
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 5ec2160108a1fa..49b9dd21b73ea6 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -174,6 +174,57 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+       hci_dev_put(hdev);
+ }
+ 
++static void le_scan_cleanup(struct work_struct *work)
++{
++      struct hci_conn *conn = container_of(work, struct hci_conn,
++                                           le_scan_cleanup);
++      struct hci_dev *hdev = conn->hdev;
++      struct hci_conn *c = NULL;
++
++      BT_DBG("%s hcon %p", hdev->name, conn);
++
++      hci_dev_lock(hdev);
++
++      /* Check that the hci_conn is still around */
++      rcu_read_lock();
++      list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
++              if (c == conn)
++                      break;
++      }
++      rcu_read_unlock();
++
++      if (c == conn) {
++              hci_connect_le_scan_cleanup(conn, 0x00);
++              hci_conn_cleanup(conn);
++      }
++
++      hci_dev_unlock(hdev);
++      hci_dev_put(hdev);
++      hci_conn_put(conn);
++}
++
++static void hci_connect_le_scan_remove(struct hci_conn *conn)
++{
++      BT_DBG("%s hcon %p", conn->hdev->name, conn);
++
++      /* We can't call hci_conn_del/hci_conn_cleanup here since that
++       * could deadlock with another hci_conn_del() call that's holding
++       * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
++       * Instead, grab temporary extra references to the hci_dev and
++       * hci_conn and perform the necessary cleanup in a separate work
++       * callback.
++       */
++
++      hci_dev_hold(conn->hdev);
++      hci_conn_get(conn);
++
++      /* Even though we hold a reference to the hdev, many other
++       * things might get cleaned up meanwhile, including the hdev's
++       * own workqueue, so we can't use that for scheduling.
++       */
++      schedule_work(&conn->le_scan_cleanup);
++}
++
+ static void hci_acl_create_connection(struct hci_conn *conn)
+ {
+       struct hci_dev *hdev = conn->hdev;
+@@ -625,6 +676,13 @@ static void hci_conn_timeout(struct work_struct *work)
+       if (refcnt > 0)
+               return;
+ 
++      /* LE connections in scanning state need special handling */
++      if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
++          test_bit(HCI_CONN_SCANNING, &conn->flags)) {
++              hci_connect_le_scan_remove(conn);
++              return;
++      }
++
+       hci_abort_conn(conn, hci_proto_disconn_ind(conn));
+ }
+ 
+@@ -996,6 +1054,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int 
type, bdaddr_t *dst,
+       INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
+       INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
+       INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
++      INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
+ 
+       atomic_set(&conn->refcnt, 0);
+ 
+@@ -2781,46 +2840,81 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+       return phys;
+ }
+ 
+-static int abort_conn_sync(struct hci_dev *hdev, void *data)
++int hci_abort_conn(struct hci_conn *conn, u8 reason)
+ {
+-      struct hci_conn *conn;
+-      u16 handle = PTR_ERR(data);
++      int r = 0;
+ 
+-      conn = hci_conn_hash_lookup_handle(hdev, handle);
+-      if (!conn)
++      if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+               return 0;
+ 
+-      return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
+-}
++      switch (conn->state) {
++      case BT_CONNECTED:
++      case BT_CONFIG:
++              if (conn->type == AMP_LINK) {
++                      struct hci_cp_disconn_phy_link cp;
+ 
+-int hci_abort_conn(struct hci_conn *conn, u8 reason)
+-{
+-      struct hci_dev *hdev = conn->hdev;
++                      cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
++                      cp.reason = reason;
++                      r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
++                                       sizeof(cp), &cp);
++              } else {
++                      struct hci_cp_disconnect dc;
+ 
+-      /* If abort_reason has already been set it means the connection is
+-       * already being aborted so don't attempt to overwrite it.
+-       */
+-      if (conn->abort_reason)
+-              return 0;
++                      dc.handle = cpu_to_le16(conn->handle);
++                      dc.reason = reason;
++                      r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
++                                       sizeof(dc), &dc);
++              }
+ 
+-      bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
++              conn->state = BT_DISCONN;
+ 
+-      conn->abort_reason = reason;
++              break;
++      case BT_CONNECT:
++              if (conn->type == LE_LINK) {
++                      if (test_bit(HCI_CONN_SCANNING, &conn->flags))
++                              break;
++                      r = hci_send_cmd(conn->hdev,
++                                       HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
++              } else if (conn->type == ACL_LINK) {
++                      if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
++                              break;
++                      r = hci_send_cmd(conn->hdev,
++                                       HCI_OP_CREATE_CONN_CANCEL,
++                                       6, &conn->dst);
++              }
++              break;
++      case BT_CONNECT2:
++              if (conn->type == ACL_LINK) {
++                      struct hci_cp_reject_conn_req rej;
++
++                      bacpy(&rej.bdaddr, &conn->dst);
++                      rej.reason = reason;
++
++                      r = hci_send_cmd(conn->hdev,
++                                       HCI_OP_REJECT_CONN_REQ,
++                                       sizeof(rej), &rej);
++              } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
++                      struct hci_cp_reject_sync_conn_req rej;
++
++                      bacpy(&rej.bdaddr, &conn->dst);
++
++                      /* SCO rejection has its own limited set of
++                       * allowed error values (0x0D-0x0F) which isn't
++                       * compatible with most values passed to this
++                       * function. To be safe hard-code one of the
++                       * values that's suitable for SCO.
++                       */
++                      rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
+ 
+-      /* If the connection is pending check the command opcode since that
+-       * might be blocking on hci_cmd_sync_work while waiting its respective
+-       * event so we need to hci_cmd_sync_cancel to cancel it.
+-       */
+-      if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
+-              switch (hci_skb_event(hdev->sent_cmd)) {
+-              case HCI_EV_LE_CONN_COMPLETE:
+-              case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
+-              case HCI_EVT_LE_CIS_ESTABLISHED:
+-                      hci_cmd_sync_cancel(hdev, ECANCELED);
+-                      break;
++                      r = hci_send_cmd(conn->hdev,
++                                       HCI_OP_REJECT_SYNC_CONN_REQ,
++                                       sizeof(rej), &rej);
+               }
++              break;
++      default:
++              conn->state = BT_CLOSED;
++              break;
+       }
+ 
+-      return hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
+-                                NULL);
++      return r;
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 993b98257bc280..f93f3e7a3d9052 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -869,7 +869,7 @@ int hci_get_dev_info(void __user *arg)
+       else
+               flags = hdev->flags;
+ 
+-      strscpy(di.name, hdev->name, sizeof(di.name));
++      strcpy(di.name, hdev->name);
+       di.bdaddr   = hdev->bdaddr;
+       di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
+       di.flags    = flags;
+@@ -1452,8 +1452,8 @@ static void hci_cmd_timeout(struct work_struct *work)
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                           cmd_timer.work);
+ 
+-      if (hdev->req_skb) {
+-              u16 opcode = hci_skb_opcode(hdev->req_skb);
++      if (hdev->sent_cmd) {
++              u16 opcode = hci_skb_opcode(hdev->sent_cmd);
+ 
+               bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
+ 
+@@ -2762,7 +2762,6 @@ void hci_release_dev(struct hci_dev *hdev)
+ 
+       ida_simple_remove(&hci_index_ida, hdev->id);
+       kfree_skb(hdev->sent_cmd);
+-      kfree_skb(hdev->req_skb);
+       kfree_skb(hdev->recv_event);
+       kfree(hdev);
+ }
+@@ -3092,33 +3091,21 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, 
u32 plen,
+ EXPORT_SYMBOL(__hci_cmd_send);
+ 
+ /* Get data from the previously sent command */
+-static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
++void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+ {
+       struct hci_command_hdr *hdr;
+ 
+-      if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
++      if (!hdev->sent_cmd)
+               return NULL;
+ 
+-      hdr = (void *)skb->data;
++      hdr = (void *) hdev->sent_cmd->data;
+ 
+       if (hdr->opcode != cpu_to_le16(opcode))
+               return NULL;
+ 
+-      return skb->data + HCI_COMMAND_HDR_SIZE;
+-}
++      BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+ 
+-/* Get data from the previously sent command */
+-void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+-{
+-      void *data;
+-
+-      /* Check if opcode matches last sent command */
+-      data = hci_cmd_data(hdev->sent_cmd, opcode);
+-      if (!data)
+-              /* Check if opcode matches last request */
+-              data = hci_cmd_data(hdev->req_skb, opcode);
+-
+-      return data;
++      return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
+ }
+ 
+ /* Get data from last received event */
+@@ -3859,6 +3846,8 @@ static void hci_acldata_packet(struct hci_dev *hdev, 
struct sk_buff *skb)
+ 
+       hci_dev_lock(hdev);
+       conn = hci_conn_hash_lookup_handle(hdev, handle);
++      if (conn && hci_dev_test_flag(hdev, HCI_MGMT))
++              mgmt_device_connected(hdev, conn, NULL, 0);
+       hci_dev_unlock(hdev);
+ 
+       if (conn) {
+@@ -4014,19 +4003,17 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 
opcode, u8 status,
+       if (!status && !hci_req_is_complete(hdev))
+               return;
+ 
+-      skb = hdev->req_skb;
+-
+       /* If this was the last command in a request the complete
+-       * callback would be found in hdev->req_skb instead of the
++       * callback would be found in hdev->sent_cmd instead of the
+        * command queue (hdev->cmd_q).
+        */
+-      if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
+-              *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
++      if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
++              *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
+               return;
+       }
+ 
+-      if (skb && bt_cb(skb)->hci.req_complete) {
+-              *req_complete = bt_cb(skb)->hci.req_complete;
++      if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
++              *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
+               return;
+       }
+ 
+@@ -4143,11 +4130,8 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, 
struct sk_buff *skb)
+               return;
+       }
+ 
+-      if (hci_req_status_pend(hdev) &&
+-          !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
+-              kfree_skb(hdev->req_skb);
+-              hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
+-      }
++      if (hci_req_status_pend(hdev))
++              hci_dev_set_flag(hdev, HCI_CMD_PENDING);
+ 
+       atomic_dec(&hdev->cmd_cnt);
+ }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 0c0141c59fd1fe..7c1df481ebe9d5 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3789,7 +3789,7 @@ static void hci_remote_features_evt(struct hci_dev 
*hdev, void *data,
+               goto unlock;
+       }
+ 
+-      if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
++      if (!ev->status) {
+               struct hci_cp_remote_name_req cp;
+               memset(&cp, 0, sizeof(cp));
+               bacpy(&cp.bdaddr, &conn->dst);
+@@ -4354,7 +4354,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, 
void *data,
+        * (since for this kind of commands there will not be a command
+        * complete event).
+        */
+-      if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
++      if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
+               hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
+                                    req_complete_skb);
+               if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
+@@ -7171,10 +7171,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void 
*data,
+       bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
+ 
+       /* Only match event if command OGF is for LE */
+-      if (hdev->req_skb &&
+-          hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
+-          hci_skb_event(hdev->req_skb) == ev->subevent) {
+-              *opcode = hci_skb_opcode(hdev->req_skb);
++      if (hdev->sent_cmd &&
++          hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
++          hci_skb_event(hdev->sent_cmd) == ev->subevent) {
++              *opcode = hci_skb_opcode(hdev->sent_cmd);
+               hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
+                                    req_complete_skb);
+       }
+@@ -7561,10 +7561,10 @@ void hci_event_packet(struct hci_dev *hdev, struct 
sk_buff *skb)
+       }
+ 
+       /* Only match event if command OGF is not for LE */
+-      if (hdev->req_skb &&
+-          hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
+-          hci_skb_event(hdev->req_skb) == event) {
+-              hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
++      if (hdev->sent_cmd &&
++          hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
++          hci_skb_event(hdev->sent_cmd) == event) {
++              hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
+                                    status, &req_complete, &req_complete_skb);
+               req_evt = event;
+       }
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index c368235202b25f..862ac5e1f4b492 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -31,10 +31,6 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 
result, u16 opcode,
+       hdev->req_result = result;
+       hdev->req_status = HCI_REQ_DONE;
+ 
+-      /* Free the request command so it is not used as response */
+-      kfree_skb(hdev->req_skb);
+-      hdev->req_skb = NULL;
+-
+       if (skb) {
+               struct sock *sk = hci_skb_sk(skb);
+ 
+@@ -42,7 +38,7 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 
result, u16 opcode,
+               if (sk)
+                       sock_put(sk);
+ 
+-              hdev->req_rsp = skb_get(skb);
++              hdev->req_skb = skb_get(skb);
+       }
+ 
+       wake_up_interruptible(&hdev->req_wait_q);
+@@ -190,8 +186,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, 
u16 opcode, u32 plen,
+ 
+       hdev->req_status = 0;
+       hdev->req_result = 0;
+-      skb = hdev->req_rsp;
+-      hdev->req_rsp = NULL;
++      skb = hdev->req_skb;
++      hdev->req_skb = NULL;
+ 
+       bt_dev_dbg(hdev, "end: err %d", err);
+ 
+@@ -4941,11 +4937,6 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+                       hdev->sent_cmd = NULL;
+               }
+ 
+-              if (hdev->req_skb) {
+-                      kfree_skb(hdev->req_skb);
+-                      hdev->req_skb = NULL;
+-              }
+-
+               clear_bit(HCI_RUNNING, &hdev->flags);
+               hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+ 
+@@ -5107,12 +5098,6 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+               hdev->sent_cmd = NULL;
+       }
+ 
+-      /* Drop last request */
+-      if (hdev->req_skb) {
+-              kfree_skb(hdev->req_skb);
+-              hdev->req_skb = NULL;
+-      }
+-
+       clear_bit(HCI_RUNNING, &hdev->flags);
+       hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+ 
+@@ -5308,27 +5293,22 @@ static int hci_disconnect_sync(struct hci_dev *hdev, 
struct hci_conn *conn,
+ }
+ 
+ static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
+-                                    struct hci_conn *conn, u8 reason)
++                                    struct hci_conn *conn)
+ {
+-      /* Return reason if scanning since the connection shall probably be
+-       * cleanup directly.
+-       */
+       if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+-              return reason;
++              return 0;
+ 
+-      if (conn->role == HCI_ROLE_SLAVE ||
+-          test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++      if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+               return 0;
+ 
+       return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
+                                    0, NULL, HCI_CMD_TIMEOUT);
+ }
+ 
+-static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn 
*conn,
+-                                 u8 reason)
++static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn 
*conn)
+ {
+       if (conn->type == LE_LINK)
+-              return hci_le_connect_cancel_sync(hdev, conn, reason);
++              return hci_le_connect_cancel_sync(hdev, conn);
+ 
+       if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+               return 0;
+@@ -5381,11 +5361,9 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct 
hci_conn *conn, u8 reason)
+       case BT_CONFIG:
+               return hci_disconnect_sync(hdev, conn, reason);
+       case BT_CONNECT:
+-              err = hci_connect_cancel_sync(hdev, conn, reason);
++              err = hci_connect_cancel_sync(hdev, conn);
+               /* Cleanup hci_conn object if it cannot be cancelled as it
+-               * likelly means the controller and host stack are out of sync
+-               * or in case of LE it was still scanning so it can be cleanup
+-               * safely.
++               * likelly means the controller and host stack are out of sync.
+                */
+               if (err) {
+                       hci_dev_lock(hdev);
+@@ -6300,7 +6278,7 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct 
hci_conn *conn)
+ 
+ done:
+       if (err == -ETIMEDOUT)
+-              hci_le_connect_cancel_sync(hdev, conn, 0x00);
++              hci_le_connect_cancel_sync(hdev, conn);
+ 
+       /* Re-enable advertising after the connection attempt is finished. */
+       hci_resume_advertising_sync(hdev);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 209c6d458d3369..187c9184387673 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4300,18 +4300,9 @@ static struct l2cap_chan *l2cap_connect(struct 
l2cap_conn *conn,
+ static int l2cap_connect_req(struct l2cap_conn *conn,
+                            struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+ {
+-      struct hci_dev *hdev = conn->hcon->hdev;
+-      struct hci_conn *hcon = conn->hcon;
+-
+       if (cmd_len < sizeof(struct l2cap_conn_req))
+               return -EPROTO;
+ 
+-      hci_dev_lock(hdev);
+-      if (hci_dev_test_flag(hdev, HCI_MGMT) &&
+-          !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
+-              mgmt_device_connected(hdev, hcon, NULL, 0);
+-      hci_dev_unlock(hdev);
+-
+       l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
+       return 0;
+ }
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 284a0672dcc385..5a1015ccc0635b 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -3600,6 +3600,18 @@ static int pair_device(struct sock *sk, struct hci_dev 
*hdev, void *data,
+       return err;
+ }
+ 
++static int abort_conn_sync(struct hci_dev *hdev, void *data)
++{
++      struct hci_conn *conn;
++      u16 handle = PTR_ERR(data);
++
++      conn = hci_conn_hash_lookup_handle(hdev, handle);
++      if (!conn)
++              return 0;
++
++      return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
++}
++
+ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void 
*data,
+                             u16 len)
+ {
+@@ -3650,7 +3662,8 @@ static int cancel_pair_device(struct sock *sk, struct 
hci_dev *hdev, void *data,
+                                             le_addr_type(addr->type));
+ 
+       if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
+-              hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
++              hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
++                                 NULL);
+ 
+ unlock:
+       hci_dev_unlock(hdev);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 3f3286cf438e7d..2f6fef5f5864fc 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2226,7 +2226,7 @@ static int bpf_out_neigh_v6(struct net *net, struct 
sk_buff *skb,
+               rcu_read_unlock();
+               return ret;
+       }
+-      rcu_read_unlock_bh();
++      rcu_read_unlock();
+       if (dst)
+               IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ out_drop:
+diff --git a/sound/Kconfig b/sound/Kconfig
+index 1903c35d799e11..5848eedcc3c9f6 100644
+--- a/sound/Kconfig
++++ b/sound/Kconfig
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ menuconfig SOUND
+       tristate "Sound card support"
+-      depends on HAS_IOMEM || UML
++      depends on HAS_IOMEM || INDIRECT_IOMEM
+       help
+         If you have a sound card in your computer, i.e. if it can say more
+         than an occasional beep, say Y.

Reply via email to