commit:     eabb0eeb83a288eb30a7f95735245dbdf98879ac
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov 18 19:56:24 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 18 19:56:24 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eabb0eeb

Linux patch 4.19.158

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1157_linux-4.19.158.patch | 3734 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3738 insertions(+)

diff --git a/0000_README b/0000_README
index 6ed2267..9824d3a 100644
--- a/0000_README
+++ b/0000_README
@@ -667,6 +667,10 @@ Patch:  1156_linux-4.19.157.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.157
 
+Patch:  1157_linux-4.19.158.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.158
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1157_linux-4.19.158.patch b/1157_linux-4.19.158.patch
new file mode 100644
index 0000000..43b384d
--- /dev/null
+++ b/1157_linux-4.19.158.patch
@@ -0,0 +1,3734 @@
+diff --git a/Makefile b/Makefile
+index 245bcd8dd7b72..698a9cc2864bd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 157
++SUBLEVEL = 158
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
+index 82290f212d8e7..e1eb662e0f9e0 100644
+--- a/arch/arm/include/asm/kprobes.h
++++ b/arch/arm/include/asm/kprobes.h
+@@ -52,20 +52,20 @@ int kprobe_exceptions_notify(struct notifier_block *self,
+                            unsigned long val, void *data);
+ 
+ /* optinsn template addresses */
+-extern __visible kprobe_opcode_t optprobe_template_entry;
+-extern __visible kprobe_opcode_t optprobe_template_val;
+-extern __visible kprobe_opcode_t optprobe_template_call;
+-extern __visible kprobe_opcode_t optprobe_template_end;
+-extern __visible kprobe_opcode_t optprobe_template_sub_sp;
+-extern __visible kprobe_opcode_t optprobe_template_add_sp;
+-extern __visible kprobe_opcode_t optprobe_template_restore_begin;
+-extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
+-extern __visible kprobe_opcode_t optprobe_template_restore_end;
++extern __visible kprobe_opcode_t optprobe_template_entry[];
++extern __visible kprobe_opcode_t optprobe_template_val[];
++extern __visible kprobe_opcode_t optprobe_template_call[];
++extern __visible kprobe_opcode_t optprobe_template_end[];
++extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
++extern __visible kprobe_opcode_t optprobe_template_add_sp[];
++extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
++extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
++extern __visible kprobe_opcode_t optprobe_template_restore_end[];
+ 
+ #define MAX_OPTIMIZED_LENGTH  4
+ #define MAX_OPTINSN_SIZE                              \
+-      ((unsigned long)&optprobe_template_end -        \
+-       (unsigned long)&optprobe_template_entry)
++      ((unsigned long)optprobe_template_end - \
++       (unsigned long)optprobe_template_entry)
+ #define RELATIVEJUMP_SIZE     4
+ 
+ struct arch_optimized_insn {
+diff --git a/arch/arm/probes/kprobes/opt-arm.c 
b/arch/arm/probes/kprobes/opt-arm.c
+index 0dc23fc227ed2..cf08cb7267670 100644
+--- a/arch/arm/probes/kprobes/opt-arm.c
++++ b/arch/arm/probes/kprobes/opt-arm.c
+@@ -98,21 +98,21 @@ asm (
+                       "optprobe_template_end:\n");
+ 
+ #define TMPL_VAL_IDX \
+-      ((unsigned long *)&optprobe_template_val - (unsigned long 
*)&optprobe_template_entry)
++      ((unsigned long *)optprobe_template_val - (unsigned long 
*)optprobe_template_entry)
+ #define TMPL_CALL_IDX \
+-      ((unsigned long *)&optprobe_template_call - (unsigned long 
*)&optprobe_template_entry)
++      ((unsigned long *)optprobe_template_call - (unsigned long 
*)optprobe_template_entry)
+ #define TMPL_END_IDX \
+-      ((unsigned long *)&optprobe_template_end - (unsigned long 
*)&optprobe_template_entry)
++      ((unsigned long *)optprobe_template_end - (unsigned long 
*)optprobe_template_entry)
+ #define TMPL_ADD_SP \
+-      ((unsigned long *)&optprobe_template_add_sp - (unsigned long 
*)&optprobe_template_entry)
++      ((unsigned long *)optprobe_template_add_sp - (unsigned long 
*)optprobe_template_entry)
+ #define TMPL_SUB_SP \
+-      ((unsigned long *)&optprobe_template_sub_sp - (unsigned long 
*)&optprobe_template_entry)
++      ((unsigned long *)optprobe_template_sub_sp - (unsigned long 
*)optprobe_template_entry)
+ #define TMPL_RESTORE_BEGIN \
+-      ((unsigned long *)&optprobe_template_restore_begin - (unsigned long 
*)&optprobe_template_entry)
++      ((unsigned long *)optprobe_template_restore_begin - (unsigned long 
*)optprobe_template_entry)
+ #define TMPL_RESTORE_ORIGN_INSN \
+-      ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long 
*)&optprobe_template_entry)
++      ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long 
*)optprobe_template_entry)
+ #define TMPL_RESTORE_END \
+-      ((unsigned long *)&optprobe_template_restore_end - (unsigned long 
*)&optprobe_template_entry)
++      ((unsigned long *)optprobe_template_restore_end - (unsigned long 
*)optprobe_template_entry)
+ 
+ /*
+  * ARM can always optimize an instruction when using ARM ISA, except
+@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op, struct kprobe *or
+       }
+ 
+       /* Copy arch-dep-instance from template. */
+-      memcpy(code, (unsigned long *)&optprobe_template_entry,
++      memcpy(code, (unsigned long *)optprobe_template_entry,
+                       TMPL_END_IDX * sizeof(kprobe_opcode_t));
+ 
+       /* Adjust buffer according to instruction. */
+diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
+index 483a7130cf0e1..496c243de4ac3 100644
+--- a/arch/arm64/crypto/aes-modes.S
++++ b/arch/arm64/crypto/aes-modes.S
+@@ -232,17 +232,19 @@ AES_ENTRY(aes_ctr_encrypt)
+       bmi             .Lctr1x
+       cmn             w6, #4                  /* 32 bit overflow? */
+       bcs             .Lctr1x
+-      ldr             q8, =0x30000000200000001        /* addends 1,2,3[,0] */
+-      dup             v7.4s, w6
++      add             w7, w6, #1
+       mov             v0.16b, v4.16b
+-      add             v7.4s, v7.4s, v8.4s
++      add             w8, w6, #2
+       mov             v1.16b, v4.16b
+-      rev32           v8.16b, v7.16b
++      add             w9, w6, #3
+       mov             v2.16b, v4.16b
++      rev             w7, w7
+       mov             v3.16b, v4.16b
+-      mov             v1.s[3], v8.s[0]
+-      mov             v2.s[3], v8.s[1]
+-      mov             v3.s[3], v8.s[2]
++      rev             w8, w8
++      mov             v1.s[3], w7
++      rev             w9, w9
++      mov             v2.s[3], w8
++      mov             v3.s[3], w9
+       ld1             {v5.16b-v7.16b}, [x20], #48     /* get 3 input blocks */
+       bl              aes_encrypt_block4x
+       eor             v0.16b, v5.16b, v0.16b
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 8e31dfd85de32..888f247c9261a 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -831,7 +831,7 @@ void __init smp_detect_cpus(void)
+  */
+ static void smp_start_secondary(void *cpuvoid)
+ {
+-      int cpu = smp_processor_id();
++      int cpu = raw_smp_processor_id();
+ 
+       S390_lowcore.last_update_clock = get_tod_clock();
+       S390_lowcore.restart_stack = (unsigned long) restart_stack;
+@@ -844,6 +844,7 @@ static void smp_start_secondary(void *cpuvoid)
+       set_cpu_flag(CIF_ASCE_PRIMARY);
+       set_cpu_flag(CIF_ASCE_SECONDARY);
+       cpu_init();
++      rcu_cpu_starting(cpu);
+       preempt_disable();
+       init_cpu_timer();
+       vtime_init();
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index bf554ed2fd51a..9e482fbdb28fb 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1240,6 +1240,14 @@ static int ssb_prctl_set(struct task_struct *task, 
unsigned long ctrl)
+       return 0;
+ }
+ 
++static bool is_spec_ib_user_controlled(void)
++{
++      return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
++              spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
++              spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
++              spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
++}
++
+ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+       switch (ctrl) {
+@@ -1247,17 +1255,26 @@ static int ib_prctl_set(struct task_struct *task, 
unsigned long ctrl)
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+                       return 0;
+-              /*
+-               * Indirect branch speculation is always disabled in strict
+-               * mode. It can neither be enabled if it was force-disabled
+-               * by a  previous prctl call.
+ 
++              /*
++               * With strict mode for both IBPB and STIBP, the instruction
++               * code paths avoid checking this task flag and instead,
++               * unconditionally run the instruction. However, STIBP and IBPB
++               * are independent and either can be set to conditionally
++               * enabled regardless of the mode of the other.
++               *
++               * If either is set to conditional, allow the task flag to be
++               * updated, unless it was force-disabled by a previous prctl
++               * call. Currently, this is possible on an AMD CPU which has the
++               * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
++               * kernel is booted with 'spectre_v2_user=seccomp', then
++               * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
++               * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
+                */
+-              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
++              if (!is_spec_ib_user_controlled() ||
+                   task_spec_ib_force_disable(task))
+                       return -EPERM;
++
+               task_clear_spec_ib_disable(task);
+               task_update_spec_tif(task);
+               break;
+@@ -1270,10 +1287,10 @@ static int ib_prctl_set(struct task_struct *task, 
unsigned long ctrl)
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+                       return -EPERM;
+-              if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++
++              if (!is_spec_ib_user_controlled())
+                       return 0;
++
+               task_set_spec_ib_disable(task);
+               if (ctrl == PR_SPEC_FORCE_DISABLE)
+                       task_set_spec_ib_force_disable(task);
+@@ -1336,20 +1353,17 @@ static int ib_prctl_get(struct task_struct *task)
+       if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+           spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+               return PR_SPEC_ENABLE;
+-      else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+-          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+-              return PR_SPEC_DISABLE;
+-      else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+-          spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+-          spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+-          spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
++      else if (is_spec_ib_user_controlled()) {
+               if (task_spec_ib_force_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+               if (task_spec_ib_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+-      } else
++      } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++              return PR_SPEC_DISABLE;
++      else
+               return PR_SPEC_NOT_AFFECTED;
+ }
+ 
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 52e1e71e81241..517318bb350cf 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -276,7 +276,7 @@ static void nbd_size_clear(struct nbd_device *nbd)
+       }
+ }
+ 
+-static void nbd_size_update(struct nbd_device *nbd)
++static void nbd_size_update(struct nbd_device *nbd, bool start)
+ {
+       struct nbd_config *config = nbd->config;
+       struct block_device *bdev = bdget_disk(nbd->disk, 0);
+@@ -292,7 +292,8 @@ static void nbd_size_update(struct nbd_device *nbd)
+       if (bdev) {
+               if (bdev->bd_disk) {
+                       bd_set_size(bdev, config->bytesize);
+-                      set_blocksize(bdev, config->blksize);
++                      if (start)
++                              set_blocksize(bdev, config->blksize);
+               } else
+                       bdev->bd_invalidated = 1;
+               bdput(bdev);
+@@ -307,7 +308,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t 
blocksize,
+       config->blksize = blocksize;
+       config->bytesize = blocksize * nr_blocks;
+       if (nbd->task_recv != NULL)
+-              nbd_size_update(nbd);
++              nbd_size_update(nbd, false);
+ }
+ 
+ static void nbd_complete_rq(struct request *req)
+@@ -1244,7 +1245,7 @@ static int nbd_start_device(struct nbd_device *nbd)
+               args->index = i;
+               queue_work(nbd->recv_workq, &args->work);
+       }
+-      nbd_size_update(nbd);
++      nbd_size_update(nbd, true);
+       return error;
+ }
+ 
+@@ -1447,6 +1448,7 @@ static void nbd_release(struct gendisk *disk, fmode_t 
mode)
+       if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+                       bdev->bd_openers == 0)
+               nbd_disconnect_and_put(nbd);
++      bdput(bdev);
+ 
+       nbd_config_put(nbd);
+       nbd_put(nbd);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 80dedecfe15c5..98925d49c96be 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1257,7 +1257,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ 
+       fast_mix(fast_pool);
+       add_interrupt_bench(cycles);
+-      this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
+ 
+       if (unlikely(crng_init == 0)) {
+               if ((fast_pool->count >= 64) &&
+diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
+index 3e673ab22cb45..abd3beeb51589 100644
+--- a/drivers/char/tpm/eventlog/efi.c
++++ b/drivers/char/tpm/eventlog/efi.c
+@@ -43,6 +43,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+       log_size = log_tbl->size;
+       memunmap(log_tbl);
+ 
++      if (!log_size) {
++              pr_warn("UEFI TPM log area empty\n");
++              return -EIO;
++      }
++
+       log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+                          MEMREMAP_WB);
+       if (!log_tbl) {
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index f08949a5f6785..5a3a4f0953910 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -31,6 +31,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/kernel.h>
++#include <linux/dmi.h>
+ #include "tpm.h"
+ #include "tpm_tis_core.h"
+ 
+@@ -53,8 +54,8 @@ static inline struct tpm_tis_tcg_phy 
*to_tpm_tis_tcg_phy(struct tpm_tis_data *da
+       return container_of(data, struct tpm_tis_tcg_phy, priv);
+ }
+ 
+-static bool interrupts = true;
+-module_param(interrupts, bool, 0444);
++static int interrupts = -1;
++module_param(interrupts, int, 0444);
+ MODULE_PARM_DESC(interrupts, "Enable interrupts");
+ 
+ static bool itpm;
+@@ -67,6 +68,28 @@ module_param(force, bool, 0444);
+ MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+ #endif
+ 
++static int tpm_tis_disable_irq(const struct dmi_system_id *d)
++{
++      if (interrupts == -1) {
++              pr_notice("tpm_tis: %s detected: disabling interrupts.\n", 
d->ident);
++              interrupts = 0;
++      }
++
++      return 0;
++}
++
++static const struct dmi_system_id tpm_tis_dmi_table[] = {
++      {
++              .callback = tpm_tis_disable_irq,
++              .ident = "ThinkPad T490s",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
++              },
++      },
++      {}
++};
++
+ #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
+ static int has_hid(struct acpi_device *dev, const char *hid)
+ {
+@@ -196,6 +219,8 @@ static int tpm_tis_init(struct device *dev, struct 
tpm_info *tpm_info)
+       int irq = -1;
+       int rc;
+ 
++      dmi_check_system(tpm_tis_dmi_table);
++
+       rc = check_acpi_tpm2(dev);
+       if (rc)
+               return rc;
+diff --git a/drivers/gpio/gpio-pcie-idio-24.c 
b/drivers/gpio/gpio-pcie-idio-24.c
+index f953541e78901..634125747a039 100644
+--- a/drivers/gpio/gpio-pcie-idio-24.c
++++ b/drivers/gpio/gpio-pcie-idio-24.c
+@@ -28,6 +28,47 @@
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+ 
++/*
++ * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status
++ *
++ * Bit: Description
++ *   0: Enable Interrupt Sources (Bit 0)
++ *   1: Enable Interrupt Sources (Bit 1)
++ *   2: Generate Internal PCI Bus Internal SERR# Interrupt
++ *   3: Mailbox Interrupt Enable
++ *   4: Power Management Interrupt Enable
++ *   5: Power Management Interrupt
++ *   6: Slave Read Local Data Parity Check Error Enable
++ *   7: Slave Read Local Data Parity Check Error Status
++ *   8: Internal PCI Wire Interrupt Enable
++ *   9: PCI Express Doorbell Interrupt Enable
++ *  10: PCI Abort Interrupt Enable
++ *  11: Local Interrupt Input Enable
++ *  12: Retry Abort Enable
++ *  13: PCI Express Doorbell Interrupt Active
++ *  14: PCI Abort Interrupt Active
++ *  15: Local Interrupt Input Active
++ *  16: Local Interrupt Output Enable
++ *  17: Local Doorbell Interrupt Enable
++ *  18: DMA Channel 0 Interrupt Enable
++ *  19: DMA Channel 1 Interrupt Enable
++ *  20: Local Doorbell Interrupt Active
++ *  21: DMA Channel 0 Interrupt Active
++ *  22: DMA Channel 1 Interrupt Active
++ *  23: Built-In Self-Test (BIST) Interrupt Active
++ *  24: Direct Master was the Bus Master during a Master or Target Abort
++ *  25: DMA Channel 0 was the Bus Master during a Master or Target Abort
++ *  26: DMA Channel 1 was the Bus Master during a Master or Target Abort
++ *  27: Target Abort after internal 256 consecutive Master Retrys
++ *  28: PCI Bus wrote data to LCS_MBOX0
++ *  29: PCI Bus wrote data to LCS_MBOX1
++ *  30: PCI Bus wrote data to LCS_MBOX2
++ *  31: PCI Bus wrote data to LCS_MBOX3
++ */
++#define PLX_PEX8311_PCI_LCS_INTCSR  0x68
++#define INTCSR_INTERNAL_PCI_WIRE    BIT(8)
++#define INTCSR_LOCAL_INPUT          BIT(11)
++
+ /**
+  * struct idio_24_gpio_reg - GPIO device registers structure
+  * @out0_7:   Read: FET Outputs 0-7
+@@ -92,6 +133,7 @@ struct idio_24_gpio_reg {
+ struct idio_24_gpio {
+       struct gpio_chip chip;
+       raw_spinlock_t lock;
++      __u8 __iomem *plx;
+       struct idio_24_gpio_reg __iomem *reg;
+       unsigned long irq_mask;
+ };
+@@ -360,13 +402,13 @@ static void idio_24_irq_mask(struct irq_data *data)
+       unsigned long flags;
+       const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+       unsigned char new_irq_mask;
+-      const unsigned long bank_offset = bit_offset/8 * 8;
++      const unsigned long bank_offset = bit_offset / 8;
+       unsigned char cos_enable_state;
+ 
+       raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+ 
+-      idio24gpio->irq_mask &= BIT(bit_offset);
+-      new_irq_mask = idio24gpio->irq_mask >> bank_offset;
++      idio24gpio->irq_mask &= ~BIT(bit_offset);
++      new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
+ 
+       if (!new_irq_mask) {
+               cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+@@ -389,12 +431,12 @@ static void idio_24_irq_unmask(struct irq_data *data)
+       unsigned long flags;
+       unsigned char prev_irq_mask;
+       const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+-      const unsigned long bank_offset = bit_offset/8 * 8;
++      const unsigned long bank_offset = bit_offset / 8;
+       unsigned char cos_enable_state;
+ 
+       raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+ 
+-      prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
++      prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
+       idio24gpio->irq_mask |= BIT(bit_offset);
+ 
+       if (!prev_irq_mask) {
+@@ -481,6 +523,7 @@ static int idio_24_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+       struct device *const dev = &pdev->dev;
+       struct idio_24_gpio *idio24gpio;
+       int err;
++      const size_t pci_plx_bar_index = 1;
+       const size_t pci_bar_index = 2;
+       const char *const name = pci_name(pdev);
+ 
+@@ -494,12 +537,13 @@ static int idio_24_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+               return err;
+       }
+ 
+-      err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
++      err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | 
BIT(pci_bar_index), name);
+       if (err) {
+               dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
+               return err;
+       }
+ 
++      idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index];
+       idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
+ 
+       idio24gpio->chip.label = name;
+@@ -520,6 +564,12 @@ static int idio_24_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+ 
+       /* Software board reset */
+       iowrite8(0, &idio24gpio->reg->soft_reset);
++      /*
++       * enable PLX PEX8311 internal PCI wire interrupt and local interrupt
++       * input
++       */
++      iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8,
++               idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1);
+ 
+       err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
+       if (err) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index d0fa2aac23888..ca66c2f797584 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -1086,22 +1086,19 @@ static int cik_sdma_soft_reset(void *handle)
+ {
+       u32 srbm_soft_reset = 0;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-      u32 tmp = RREG32(mmSRBM_STATUS2);
++      u32 tmp;
+ 
+-      if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
+-              /* sdma0 */
+-              tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+-              tmp |= SDMA0_F32_CNTL__HALT_MASK;
+-              WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+-              srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+-      }
+-      if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
+-              /* sdma1 */
+-              tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+-              tmp |= SDMA0_F32_CNTL__HALT_MASK;
+-              WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+-              srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+-      }
++      /* sdma0 */
++      tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
++      tmp |= SDMA0_F32_CNTL__HALT_MASK;
++      WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
++      srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
++
++      /* sdma1 */
++      tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
++      tmp |= SDMA0_F32_CNTL__HALT_MASK;
++      WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
++      srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+ 
+       if (srbm_soft_reset) {
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 058898b321b8a..d8e624d64ae38 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1531,6 +1531,10 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to reset to default!", result = tmp_result);
+ 
++      tmp_result = smum_stop_smc(hwmgr);
++      PP_ASSERT_WITH_CODE((tmp_result == 0),
++                      "Failed to stop smc!", result = tmp_result);
++
+       tmp_result = smu7_force_switch_to_arbf0(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to force to switch arbf0!", result = 
tmp_result);
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h 
b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 6ee864455a12a..f59e1e737735f 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -216,6 +216,7 @@ struct pp_smumgr_func {
+       bool (*is_hw_avfs_present)(struct pp_hwmgr  *hwmgr);
+       int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void 
*profile_setting);
+       int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, 
uint16_t table_id, bool rw); /*rw: true for read, false for write */
++      int (*stop_smc)(struct pp_hwmgr *hwmgr);
+ };
+ 
+ struct pp_hwmgr_func {
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h 
b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+index 82550a8a3a3fc..ef4f2392e2e7d 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+@@ -113,4 +113,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr 
*hwmgr, void *profile_settin
+ 
+ extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, 
uint16_t table_id, bool rw);
+ 
++extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c 
b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+index db87cb8930d24..c05bec5effb2e 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+@@ -2723,10 +2723,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr 
*hwmgr)
+ 
+ static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
+ {
+-      return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+-                                           CGS_IND_REG__SMC, FEATURE_STATUS,
+-                                           VOLTAGE_CONTROLLER_ON))
+-              ? true : false;
++      return ci_is_smc_ram_running(hwmgr);
+ }
+ 
+ static int ci_smu_init(struct pp_hwmgr *hwmgr)
+@@ -2934,6 +2931,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, 
uint32_t type)
+       return 0;
+ }
+ 
++static void ci_reset_smc(struct pp_hwmgr *hwmgr)
++{
++      PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++                                SMC_SYSCON_RESET_CNTL,
++                                rst_reg, 1);
++}
++
++
++static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
++{
++      PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++                                SMC_SYSCON_CLOCK_CNTL_0,
++                                ck_disable, 1);
++}
++
++static int ci_stop_smc(struct pp_hwmgr *hwmgr)
++{
++      ci_reset_smc(hwmgr);
++      ci_stop_smc_clock(hwmgr);
++
++      return 0;
++}
++
+ const struct pp_smumgr_func ci_smu_funcs = {
+       .smu_init = ci_smu_init,
+       .smu_fini = ci_smu_fini,
+@@ -2957,4 +2977,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
+       .is_dpm_running = ci_is_dpm_running,
+       .update_dpm_settings = ci_update_dpm_settings,
+       .update_smc_table = ci_update_smc_table,
++      .stop_smc = ci_stop_smc,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c 
b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+index a6edd5df33b0f..20ecf994d47f3 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+@@ -213,3 +213,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, 
uint8_t *table, uint16_t tabl
+ 
+       return -EINVAL;
+ }
++
++int smum_stop_smc(struct pp_hwmgr *hwmgr)
++{
++      if (hwmgr->smumgr_funcs->stop_smc)
++              return hwmgr->smumgr_funcs->stop_smc(hwmgr);
++
++      return 0;
++}
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c 
b/drivers/gpu/drm/gma500/psb_irq.c
+index 78eb109028091..076b6da44f461 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -350,6 +350,7 @@ int psb_irq_postinstall(struct drm_device *dev)
+ {
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
++      unsigned int i;
+ 
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ 
+@@ -362,20 +363,12 @@ int psb_irq_postinstall(struct drm_device *dev)
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ 
+-      if (dev->vblank[0].enabled)
+-              psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-      else
+-              psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-      if (dev->vblank[1].enabled)
+-              psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-      else
+-              psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-      if (dev->vblank[2].enabled)
+-              psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+-      else
+-              psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
++      for (i = 0; i < dev->num_crtcs; ++i) {
++              if (dev->vblank[i].enabled)
++                      psb_enable_pipestat(dev_priv, i, 
PIPE_VBLANK_INTERRUPT_ENABLE);
++              else
++                      psb_disable_pipestat(dev_priv, i, 
PIPE_VBLANK_INTERRUPT_ENABLE);
++      }
+ 
+       if (dev_priv->ops->hotplug_enable)
+               dev_priv->ops->hotplug_enable(dev, true);
+@@ -388,6 +381,7 @@ void psb_irq_uninstall(struct drm_device *dev)
+ {
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
++      unsigned int i;
+ 
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ 
+@@ -396,14 +390,10 @@ void psb_irq_uninstall(struct drm_device *dev)
+ 
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ 
+-      if (dev->vblank[0].enabled)
+-              psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-      if (dev->vblank[1].enabled)
+-              psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-      if (dev->vblank[2].enabled)
+-              psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
++      for (i = 0; i < dev->num_crtcs; ++i) {
++              if (dev->vblank[i].enabled)
++                      psb_disable_pipestat(dev_priv, i, 
PIPE_VBLANK_INTERRUPT_ENABLE);
++      }
+ 
+       dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+                                 _PSB_IRQ_MSVDX_FLAG |
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index 9ca0706a9d402..e5fc719a34e70 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -1275,7 +1275,7 @@ static void balloon_up(struct work_struct *dummy)
+ 
+       /* Refuse to balloon below the floor. */
+       if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+-              pr_warn("Balloon request will be partially fulfilled. %s\n",
++              pr_info("Balloon request will be partially fulfilled. %s\n",
+                       avail_pages < num_pages ? "Not enough memory." :
+                       "Balloon floor reached.");
+ 
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index 859b06424e5c4..df6f3cc958e5e 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -410,7 +410,11 @@ extern bool amd_iommu_np_cache;
+ /* Only true if all IOMMUs support device IOTLBs */
+ extern bool amd_iommu_iotlb_sup;
+ 
+-#define MAX_IRQS_PER_TABLE    256
++/*
++ * AMD IOMMU hardware only support 512 IRTEs despite
++ * the architectural limitation of 2048 entries.
++ */
++#define MAX_IRQS_PER_TABLE    512
+ #define IRQ_TABLE_ALIGNMENT   128
+ 
+ struct irq_remap_table {
+diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
+index 69df27769c213..3ba8cfa4b3b7a 100644
+--- a/drivers/mfd/sprd-sc27xx-spi.c
++++ b/drivers/mfd/sprd-sc27xx-spi.c
+@@ -212,7 +212,7 @@ static int sprd_pmic_probe(struct spi_device *spi)
+       }
+ 
+       ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
+-                                     IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
++                                     IRQF_ONESHOT, 0,
+                                      &ddata->irq_chip, &ddata->irq_data);
+       if (ret) {
+               dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret);
+@@ -228,9 +228,34 @@ static int sprd_pmic_probe(struct spi_device *spi)
+               return ret;
+       }
+ 
++      device_init_wakeup(&spi->dev, true);
+       return 0;
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
++static int sprd_pmic_suspend(struct device *dev)
++{
++      struct sprd_pmic *ddata = dev_get_drvdata(dev);
++
++      if (device_may_wakeup(dev))
++              enable_irq_wake(ddata->irq);
++
++      return 0;
++}
++
++static int sprd_pmic_resume(struct device *dev)
++{
++      struct sprd_pmic *ddata = dev_get_drvdata(dev);
++
++      if (device_may_wakeup(dev))
++              disable_irq_wake(ddata->irq);
++
++      return 0;
++}
++#endif
++
++static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, 
sprd_pmic_resume);
++
+ static const struct of_device_id sprd_pmic_match[] = {
+       { .compatible = "sprd,sc2731", .data = &sc2731_data },
+       {},
+@@ -242,6 +267,7 @@ static struct spi_driver sprd_pmic_driver = {
+               .name = "sc27xx-pmic",
+               .bus = &spi_bus_type,
+               .of_match_table = sprd_pmic_match,
++              .pm = &sprd_pmic_pm_ops,
+       },
+       .probe = sprd_pmic_probe,
+ };
+diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
+index 64e318f589b42..0d23efcc74ffe 100644
+--- a/drivers/misc/mei/client.h
++++ b/drivers/misc/mei/client.h
+@@ -138,11 +138,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl)
+  *
+  * @cl: host client
+  *
+- * Return: mtu
++ * Return: mtu or 0 if client is not connected
+  */
+ static inline size_t mei_cl_mtu(const struct mei_cl *cl)
+ {
+-      return cl->me_cl->props.max_msg_length;
++      return cl->me_cl ? cl->me_cl->props.max_msg_length : 0;
+ }
+ 
+ /**
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c 
b/drivers/mmc/host/renesas_sdhi_core.c
+index 61f0faddfd889..e8ab582551f8e 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -764,6 +764,7 @@ int renesas_sdhi_remove(struct platform_device *pdev)
+ 
+       tmio_mmc_host_remove(host);
+       renesas_sdhi_clk_disable(host);
++      tmio_mmc_host_free(host);
+ 
+       return 0;
+ }
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 1545f2b299d06..1950b13f22dfc 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -493,9 +493,13 @@ struct sk_buff *__can_get_echo_skb(struct net_device 
*dev, unsigned int idx, u8
+                */
+               struct sk_buff *skb = priv->echo_skb[idx];
+               struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+-              u8 len = cf->len;
+ 
+-              *len_ptr = len;
++              /* get the real payload length for netdev statistics */
++              if (cf->can_id & CAN_RTR_FLAG)
++                      *len_ptr = 0;
++              else
++                      *len_ptr = cf->len;
++
+               priv->echo_skb[idx] = NULL;
+ 
+               return skb;
+@@ -520,7 +524,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, 
unsigned int idx)
+       if (!skb)
+               return 0;
+ 
+-      netif_rx(skb);
++      skb_get(skb);
++      if (netif_rx(skb) == NET_RX_SUCCESS)
++              dev_consume_skb_any(skb);
++      else
++              dev_kfree_skb_any(skb);
+ 
+       return len;
+ }
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 0be8db6ab3195..92fe345e48ab7 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -301,8 +301,7 @@ static const struct flexcan_devtype_data 
fsl_vf610_devtype_data = {
+ 
+ static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
+       .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+-              FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+-              FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
++              FLEXCAN_QUIRK_BROKEN_PERR_STATE | 
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ };
+ 
+ static const struct can_bittiming_const flexcan_bittiming_const = {
+diff --git a/drivers/net/can/peak_canfd/peak_canfd.c 
b/drivers/net/can/peak_canfd/peak_canfd.c
+index 5696d7e807513..4bc5d522c74bb 100644
+--- a/drivers/net/can/peak_canfd/peak_canfd.c
++++ b/drivers/net/can/peak_canfd/peak_canfd.c
+@@ -256,8 +256,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv 
*priv,
+               cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
+ 
+       /* if this frame is an echo, */
+-      if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
+-          !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
++      if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
+               unsigned long flags;
+ 
+               spin_lock_irqsave(&priv->echo_lock, flags);
+@@ -271,7 +270,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv 
*priv,
+               netif_wake_queue(priv->ndev);
+ 
+               spin_unlock_irqrestore(&priv->echo_lock, flags);
+-              return 0;
++
++              /* if this frame is only an echo, stop here. Otherwise,
++               * continue to push this application self-received frame into
++               * its own rx queue.
++               */
++              if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
++                      return 0;
+       }
+ 
+       /* otherwise, it should be pushed into rx fifo */
+diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
+index 5f7e97d54733c..5cf4171df1f42 100644
+--- a/drivers/net/can/rx-offload.c
++++ b/drivers/net/can/rx-offload.c
+@@ -281,7 +281,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload 
*offload,
+ 
+       if (skb_queue_len(&offload->skb_queue) >
+           offload->skb_queue_len_max) {
+-              kfree_skb(skb);
++              dev_kfree_skb_any(skb);
+               return -ENOBUFS;
+       }
+ 
+@@ -326,7 +326,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload 
*offload,
+ {
+       if (skb_queue_len(&offload->skb_queue) >
+           offload->skb_queue_len_max) {
+-              kfree_skb(skb);
++              dev_kfree_skb_any(skb);
+               return -ENOBUFS;
+       }
+ 
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index afc8d978124ef..db156a11e6db5 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -138,14 +138,55 @@ void peak_usb_get_ts_time(struct peak_time_ref 
*time_ref, u32 ts, ktime_t *time)
+       /* protect from getting time before setting now */
+       if (ktime_to_ns(time_ref->tv_host)) {
+               u64 delta_us;
++              s64 delta_ts = 0;
++
++              /* General case: dev_ts_1 < dev_ts_2 < ts, with:
++               *
++               * - dev_ts_1 = previous sync timestamp
++               * - dev_ts_2 = last sync timestamp
++               * - ts = event timestamp
++               * - ts_period = known sync period (theoretical)
++               *             ~ dev_ts2 - dev_ts1
++               * *but*:
++               *
++               * - time counters wrap (see adapter->ts_used_bits)
++               * - sometimes, dev_ts_1 < ts < dev_ts2
++               *
++               * "normal" case (sync time counters increase):
++               * must take into account case when ts wraps (tsw)
++               *
++               *      < ts_period > <          >
++               *     |             |            |
++               *  ---+--------+----+-------0-+--+-->
++               *     ts_dev_1 |    ts_dev_2  |
++               *              ts             tsw
++               */
++              if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
++                      /* case when event time (tsw) wraps */
++                      if (ts < time_ref->ts_dev_1)
++                              delta_ts = 1 << time_ref->adapter->ts_used_bits;
++
++              /* Otherwise, sync time counter (ts_dev_2) has wrapped:
++               * handle case when event time (tsn) hasn't.
++               *
++               *      < ts_period > <          >
++               *     |             |            |
++               *  ---+--------+--0-+---------+--+-->
++               *     ts_dev_1 |    ts_dev_2  |
++               *              tsn            ts
++               */
++              } else if (time_ref->ts_dev_1 < ts) {
++                      delta_ts = -(1 << time_ref->adapter->ts_used_bits);
++              }
+ 
+-              delta_us = ts - time_ref->ts_dev_2;
+-              if (ts < time_ref->ts_dev_2)
+-                      delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
++              /* add delay between last sync and event timestamps */
++              delta_ts += (signed int)(ts - time_ref->ts_dev_2);
+ 
+-              delta_us += time_ref->ts_total;
++              /* add time from beginning to last sync */
++              delta_ts += time_ref->ts_total;
+ 
+-              delta_us *= time_ref->adapter->us_per_ts_scale;
++              /* convert ticks number into microseconds */
++              delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
+               delta_us >>= time_ref->adapter->us_per_ts_shift;
+ 
+               *time = ktime_add_us(time_ref->tv_host_0, delta_us);
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index 41988358f63c8..19600d35aac55 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -476,12 +476,18 @@ static int pcan_usb_fd_decode_canmsg(struct 
pcan_usb_fd_if *usb_if,
+                                    struct pucan_msg *rx_msg)
+ {
+       struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
+-      struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
+-      struct net_device *netdev = dev->netdev;
++      struct peak_usb_device *dev;
++      struct net_device *netdev;
+       struct canfd_frame *cfd;
+       struct sk_buff *skb;
+       const u16 rx_msg_flags = le16_to_cpu(rm->flags);
+ 
++      if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
++              return -ENOMEM;
++
++      dev = usb_if->dev[pucan_msg_get_channel(rm)];
++      netdev = dev->netdev;
++
+       if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
+               /* CANFD frame case */
+               skb = alloc_canfd_skb(netdev, &cfd);
+@@ -527,15 +533,21 @@ static int pcan_usb_fd_decode_status(struct 
pcan_usb_fd_if *usb_if,
+                                    struct pucan_msg *rx_msg)
+ {
+       struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
+-      struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+-      struct pcan_usb_fd_device *pdev =
+-                      container_of(dev, struct pcan_usb_fd_device, dev);
++      struct pcan_usb_fd_device *pdev;
+       enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+       enum can_state rx_state, tx_state;
+-      struct net_device *netdev = dev->netdev;
++      struct peak_usb_device *dev;
++      struct net_device *netdev;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+ 
++      if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
++              return -ENOMEM;
++
++      dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
++      pdev = container_of(dev, struct pcan_usb_fd_device, dev);
++      netdev = dev->netdev;
++
+       /* nothing should be sent while in BUS_OFF state */
+       if (dev->can.state == CAN_STATE_BUS_OFF)
+               return 0;
+@@ -587,9 +599,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if 
*usb_if,
+                                   struct pucan_msg *rx_msg)
+ {
+       struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
+-      struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+-      struct pcan_usb_fd_device *pdev =
+-                      container_of(dev, struct pcan_usb_fd_device, dev);
++      struct pcan_usb_fd_device *pdev;
++      struct peak_usb_device *dev;
++
++      if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
++              return -EINVAL;
++
++      dev = usb_if->dev[pucan_ermsg_get_channel(er)];
++      pdev = container_of(dev, struct pcan_usb_fd_device, dev);
+ 
+       /* keep a trace of tx and rx error counters for later use */
+       pdev->bec.txerr = er->tx_err_cnt;
+@@ -603,11 +620,17 @@ static int pcan_usb_fd_decode_overrun(struct 
pcan_usb_fd_if *usb_if,
+                                     struct pucan_msg *rx_msg)
+ {
+       struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
+-      struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+-      struct net_device *netdev = dev->netdev;
++      struct peak_usb_device *dev;
++      struct net_device *netdev;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+ 
++      if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
++              return -EINVAL;
++
++      dev = usb_if->dev[pufd_omsg_get_channel(ov)];
++      netdev = dev->netdev;
++
+       /* allocate an skb to store the error frame */
+       skb = alloc_can_err_skb(netdev, &cf);
+       if (!skb)
+@@ -724,6 +747,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device 
*dev,
+       u16 tx_msg_size, tx_msg_flags;
+       u8 can_dlc;
+ 
++      if (cfd->len > CANFD_MAX_DLEN)
++              return -EINVAL;
++
+       tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
+       tx_msg->size = cpu_to_le16(tx_msg_size);
+       tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c 
b/drivers/net/ethernet/microchip/lan743x_main.c
+index 208341541087e..085fdceb3821b 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -672,14 +672,12 @@ clean_up:
+ static int lan743x_dp_write(struct lan743x_adapter *adapter,
+                           u32 select, u32 addr, u32 length, u32 *buf)
+ {
+-      int ret = -EIO;
+       u32 dp_sel;
+       int i;
+ 
+-      mutex_lock(&adapter->dp_lock);
+       if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
+                                    1, 40, 100, 100))
+-              goto unlock;
++              return -EIO;
+       dp_sel = lan743x_csr_read(adapter, DP_SEL);
+       dp_sel &= ~DP_SEL_MASK_;
+       dp_sel |= select;
+@@ -691,13 +689,10 @@ static int lan743x_dp_write(struct lan743x_adapter 
*adapter,
+               lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
+               if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
+                                            1, 40, 100, 100))
+-                      goto unlock;
++                      return -EIO;
+       }
+-      ret = 0;
+ 
+-unlock:
+-      mutex_unlock(&adapter->dp_lock);
+-      return ret;
++      return 0;
+ }
+ 
+ static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
+@@ -2679,7 +2674,6 @@ static int lan743x_hardware_init(struct lan743x_adapter 
*adapter,
+ 
+       adapter->intr.irq = adapter->pdev->irq;
+       lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
+-      mutex_init(&adapter->dp_lock);
+ 
+       ret = lan743x_gpio_init(adapter);
+       if (ret)
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h 
b/drivers/net/ethernet/microchip/lan743x_main.h
+index 2d6eea18973e8..77273be2d1ee0 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -702,9 +702,6 @@ struct lan743x_adapter {
+       struct lan743x_csr      csr;
+       struct lan743x_intr     intr;
+ 
+-      /* lock, used to prevent concurrent access to data port */
+-      struct mutex            dp_lock;
+-
+       struct lan743x_gpio     gpio;
+       struct lan743x_ptp      ptp;
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169.c 
b/drivers/net/ethernet/realtek/r8169.c
+index 1555c0dae490b..c7ce167c67a08 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -6274,7 +6274,8 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private 
*tp,
+               opts[1] |= transport_offset << TCPHO_SHIFT;
+       } else {
+               if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+-                      return !eth_skb_pad(skb);
++                      /* eth_skb_pad would free the skb on error */
++                      return !__skb_put_padto(skb, ETH_ZLEN, false);
+       }
+ 
+       return true;
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index b55eeb8f8fa3a..93899a7be9c57 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -336,8 +336,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct 
net_device *dev)
+       return ret;
+ }
+ 
+-static int vrf_finish_direct(struct net *net, struct sock *sk,
+-                           struct sk_buff *skb)
++static void vrf_finish_direct(struct sk_buff *skb)
+ {
+       struct net_device *vrf_dev = skb->dev;
+ 
+@@ -356,7 +355,8 @@ static int vrf_finish_direct(struct net *net, struct sock 
*sk,
+               skb_pull(skb, ETH_HLEN);
+       }
+ 
+-      return 1;
++      /* reset skb device */
++      nf_reset(skb);
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -435,15 +435,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct 
net_device *vrf_dev,
+       return skb;
+ }
+ 
++static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
++                                   struct sk_buff *skb)
++{
++      vrf_finish_direct(skb);
++
++      return vrf_ip6_local_out(net, sk, skb);
++}
++
+ static int vrf_output6_direct(struct net *net, struct sock *sk,
+                             struct sk_buff *skb)
+ {
++      int err = 1;
++
+       skb->protocol = htons(ETH_P_IPV6);
+ 
+-      return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+-                          net, sk, skb, NULL, skb->dev,
+-                          vrf_finish_direct,
+-                          !(IPCB(skb)->flags & IPSKB_REROUTED));
++      if (!(IPCB(skb)->flags & IPSKB_REROUTED))
++              err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
++                            NULL, skb->dev, vrf_output6_direct_finish);
++
++      if (likely(err == 1))
++              vrf_finish_direct(skb);
++
++      return err;
++}
++
++static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
++                                   struct sk_buff *skb)
++{
++      int err;
++
++      err = vrf_output6_direct(net, sk, skb);
++      if (likely(err == 1))
++              err = vrf_ip6_local_out(net, sk, skb);
++
++      return err;
+ }
+ 
+ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
+@@ -456,18 +482,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct 
net_device *vrf_dev,
+       skb->dev = vrf_dev;
+ 
+       err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
+-                    skb, NULL, vrf_dev, vrf_output6_direct);
++                    skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
+ 
+       if (likely(err == 1))
+               err = vrf_output6_direct(net, sk, skb);
+ 
+-      /* reset skb device */
+       if (likely(err == 1))
+-              nf_reset(skb);
+-      else
+-              skb = NULL;
++              return skb;
+ 
+-      return skb;
++      return NULL;
+ }
+ 
+ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+@@ -649,15 +672,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct 
net_device *vrf_dev,
+       return skb;
+ }
+ 
++static int vrf_output_direct_finish(struct net *net, struct sock *sk,
++                                  struct sk_buff *skb)
++{
++      vrf_finish_direct(skb);
++
++      return vrf_ip_local_out(net, sk, skb);
++}
++
+ static int vrf_output_direct(struct net *net, struct sock *sk,
+                            struct sk_buff *skb)
+ {
++      int err = 1;
++
+       skb->protocol = htons(ETH_P_IP);
+ 
+-      return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+-                          net, sk, skb, NULL, skb->dev,
+-                          vrf_finish_direct,
+-                          !(IPCB(skb)->flags & IPSKB_REROUTED));
++      if (!(IPCB(skb)->flags & IPSKB_REROUTED))
++              err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
++                            NULL, skb->dev, vrf_output_direct_finish);
++
++      if (likely(err == 1))
++              vrf_finish_direct(skb);
++
++      return err;
++}
++
++static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
++                                  struct sk_buff *skb)
++{
++      int err;
++
++      err = vrf_output_direct(net, sk, skb);
++      if (likely(err == 1))
++              err = vrf_ip_local_out(net, sk, skb);
++
++      return err;
+ }
+ 
+ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
+@@ -670,18 +719,15 @@ static struct sk_buff *vrf_ip_out_direct(struct 
net_device *vrf_dev,
+       skb->dev = vrf_dev;
+ 
+       err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
+-                    skb, NULL, vrf_dev, vrf_output_direct);
++                    skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
+ 
+       if (likely(err == 1))
+               err = vrf_output_direct(net, sk, skb);
+ 
+-      /* reset skb device */
+       if (likely(err == 1))
+-              nf_reset(skb);
+-      else
+-              skb = NULL;
++              return skb;
+ 
+-      return skb;
++      return NULL;
+ }
+ 
+ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
+index f6b000ddcd151..b7bfc0caa5dc8 100644
+--- a/drivers/net/wan/cosa.c
++++ b/drivers/net/wan/cosa.c
+@@ -902,6 +902,7 @@ static ssize_t cosa_write(struct file *file,
+                       chan->tx_status = 1;
+                       spin_unlock_irqrestore(&cosa->lock, flags);
+                       up(&chan->wsem);
++                      kfree(kbuf);
+                       return -ERESTARTSYS;
+               }
+       }
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 
b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index f19393e584dc9..d567fbe79cffa 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -973,7 +973,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+       struct ath_htc_rx_status *rxstatus;
+       struct ath_rx_status rx_stats;
+       bool decrypt_error = false;
+-      __be16 rs_datalen;
++      u16 rs_datalen;
+       bool is_phyerr;
+ 
+       if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index c42aebba35ab8..30806dd357350 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -975,11 +975,13 @@ EXPORT_SYMBOL_GPL(of_dma_get_range);
+  */
+ bool of_dma_is_coherent(struct device_node *np)
+ {
+-      struct device_node *node = of_node_get(np);
++      struct device_node *node;
+ 
+       if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
+               return true;
+ 
++      node = of_node_get(np);
++
+       while (node) {
+               if (of_property_read_bool(node, "dma-coherent")) {
+                       of_node_put(node);
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c 
b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+index aefe3c33dffd8..8dec302dc067a 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+@@ -458,13 +458,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, 
unsigned int function,
+ static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
+ {
+       /*
+-       * The signal type is GPIO if the signal name has "GPIO" as a prefix.
++       * The signal type is GPIO if the signal name has "GPI" as a prefix.
+        * strncmp (rather than strcmp) is used to implement the prefix
+        * requirement.
+        *
+-       * expr->signal might look like "GPIOT3" in the GPIO case.
++       * expr->signal might look like "GPIOB1" in the GPIO case.
++       * expr->signal might look like "GPIT0" in the GPI case.
+        */
+-      return strncmp(expr->signal, "GPIO", 4) == 0;
++      return strncmp(expr->signal, "GPI", 3) == 0;
+ }
+ 
+ static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c 
b/drivers/pinctrl/intel/pinctrl-intel.c
+index 89ff2795a8b55..5e0adb00b4307 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -621,6 +621,10 @@ static int intel_config_set_pull(struct intel_pinctrl 
*pctrl, unsigned pin,
+ 
+               value |= PADCFG1_TERM_UP;
+ 
++              /* Set default strength value in case none is given */
++              if (arg == 1)
++                      arg = 5000;
++
+               switch (arg) {
+               case 20000:
+                       value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+@@ -643,6 +647,10 @@ static int intel_config_set_pull(struct intel_pinctrl 
*pctrl, unsigned pin,
+       case PIN_CONFIG_BIAS_PULL_DOWN:
+               value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
+ 
++              /* Set default strength value in case none is given */
++              if (arg == 1)
++                      arg = 5000;
++
+               switch (arg) {
+               case 20000:
+                       value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index b1ffdd3f6d076..d6255049e5196 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -157,7 +157,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, 
unsigned offset,
+                       pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+                       pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+               } else if (debounce < 250000) {
+-                      time = debounce / 15600;
++                      time = debounce / 15625;
+                       pin_reg |= time & DB_TMR_OUT_MASK;
+                       pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
+                       pin_reg |= BIT(DB_TMR_LARGE_OFF);
+@@ -167,14 +167,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, 
unsigned offset,
+                       pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+                       pin_reg |= BIT(DB_TMR_LARGE_OFF);
+               } else {
+-                      pin_reg &= ~DB_CNTRl_MASK;
++                      pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+                       ret = -EINVAL;
+               }
+       } else {
+               pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
+               pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+               pin_reg &= ~DB_TMR_OUT_MASK;
+-              pin_reg &= ~DB_CNTRl_MASK;
++              pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+       }
+       writel(pin_reg, gpio_dev->base + offset * 4);
+       raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index c290c89421314..ad5235ca8e4ee 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3405,6 +3405,8 @@ static int _regulator_get_voltage(struct regulator_dev 
*rdev)
+               ret = rdev->desc->fixed_uV;
+       } else if (rdev->supply) {
+               ret = _regulator_get_voltage(rdev->supply->rdev);
++      } else if (rdev->supply_name) {
++              return -EPROBE_DEFER;
+       } else {
+               return -EINVAL;
+       }
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c 
b/drivers/scsi/device_handler/scsi_dh_alua.c
+index c95c782b93a53..60c48dc5d9453 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -672,8 +672,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct 
alua_port_group *pg)
+                                       rcu_read_lock();
+                                       list_for_each_entry_rcu(h,
+                                               &tmp_pg->dh_list, node) {
+-                                              /* h->sdev should always be 
valid */
+-                                              BUG_ON(!h->sdev);
++                                              if (!h->sdev)
++                                                      continue;
+                                               h->sdev->access_state = desc[0];
+                                       }
+                                       rcu_read_unlock();
+@@ -719,7 +719,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct 
alua_port_group *pg)
+                       pg->expiry = 0;
+                       rcu_read_lock();
+                       list_for_each_entry_rcu(h, &pg->dh_list, node) {
+-                              BUG_ON(!h->sdev);
++                              if (!h->sdev)
++                                      continue;
+                               h->sdev->access_state =
+                                       (pg->state & SCSI_ACCESS_STATE_MASK);
+                               if (pg->pref)
+@@ -1160,7 +1161,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
+       spin_lock(&h->pg_lock);
+       pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
+       rcu_assign_pointer(h->pg, NULL);
+-      h->sdev = NULL;
+       spin_unlock(&h->pg_lock);
+       if (pg) {
+               spin_lock_irq(&pg->lock);
+@@ -1169,6 +1169,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
+               kref_put(&pg->kref, release_port_group);
+       }
+       sdev->handler_data = NULL;
++      synchronize_rcu();
+       kfree(h);
+ }
+ 
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 11de2198bb87d..0fe21cbdf0ca7 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -8781,7 +8781,7 @@ reinit_after_soft_reset:
+       /* hook into SCSI subsystem */
+       rc = hpsa_scsi_add_host(h);
+       if (rc)
+-              goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
++              goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, 
lu, aer/h */
+ 
+       /* Monitor the controller for firmware lockups */
+       h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
+@@ -8796,6 +8796,8 @@ reinit_after_soft_reset:
+                               HPSA_EVENT_MONITOR_INTERVAL);
+       return 0;
+ 
++clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
++      kfree(h->lastlogicals);
+ clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+       hpsa_free_performant_mode(h);
+       h->access.set_intr_mask(h, HPSA_INTR_OFF);
+diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
+index 7448744cc5159..12a5be95457f8 100644
+--- a/drivers/staging/erofs/inode.c
++++ b/drivers/staging/erofs/inode.c
+@@ -53,11 +53,9 @@ static int read_inode(struct inode *inode, void *data)
+               i_gid_write(inode, le32_to_cpu(v2->i_gid));
+               set_nlink(inode, le32_to_cpu(v2->i_nlink));
+ 
+-              /* ns timestamp */
+-              inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+-                      le64_to_cpu(v2->i_ctime);
+-              inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+-                      le32_to_cpu(v2->i_ctime_nsec);
++              /* extended inode has its own timestamp */
++              inode->i_ctime.tv_sec = le64_to_cpu(v2->i_ctime);
++              inode->i_ctime.tv_nsec = le32_to_cpu(v2->i_ctime_nsec);
+ 
+               inode->i_size = le64_to_cpu(v2->i_size);
+       } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) {
+@@ -83,11 +81,9 @@ static int read_inode(struct inode *inode, void *data)
+               i_gid_write(inode, le16_to_cpu(v1->i_gid));
+               set_nlink(inode, le16_to_cpu(v1->i_nlink));
+ 
+-              /* use build time to derive all file time */
+-              inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+-                      sbi->build_time;
+-              inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+-                      sbi->build_time_nsec;
++              /* use build time for compact inodes */
++              inode->i_ctime.tv_sec = sbi->build_time;
++              inode->i_ctime.tv_nsec = sbi->build_time_nsec;
+ 
+               inode->i_size = le32_to_cpu(v1->i_size);
+       } else {
+@@ -97,6 +93,11 @@ static int read_inode(struct inode *inode, void *data)
+               return -EIO;
+       }
+ 
++      inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
++      inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
++      inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
++      inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
++
+       /* measure inode.i_blocks as the generic filesystem */
+       inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+       return 0;
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index d436a1534fc2b..384623c49cfee 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -408,12 +408,23 @@ static int ring_request_msix(struct tb_ring *ring, bool 
no_suspend)
+ 
+       ring->vector = ret;
+ 
+-      ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
+-      if (ring->irq < 0)
+-              return ring->irq;
++      ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
++      if (ret < 0)
++              goto err_ida_remove;
++
++      ring->irq = ret;
+ 
+       irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
+-      return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
++      ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
++      if (ret)
++              goto err_ida_remove;
++
++      return 0;
++
++err_ida_remove:
++      ida_simple_remove(&nhi->msix_ida, ring->vector);
++
++      return ret;
+ }
+ 
+ static void ring_release_msix(struct tb_ring *ring)
+diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
+index befe754906979..4eb51a123a6fd 100644
+--- a/drivers/thunderbolt/xdomain.c
++++ b/drivers/thunderbolt/xdomain.c
+@@ -774,6 +774,7 @@ static void enumerate_services(struct tb_xdomain *xd)
+ 
+               id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+               if (id < 0) {
++                      kfree(svc->key);
+                       kfree(svc);
+                       break;
+               }
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index 3926be6591471..0e3e16c51d3a9 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -413,10 +413,10 @@ static int uio_get_minor(struct uio_device *idev)
+       return retval;
+ }
+ 
+-static void uio_free_minor(struct uio_device *idev)
++static void uio_free_minor(unsigned long minor)
+ {
+       mutex_lock(&minor_lock);
+-      idr_remove(&uio_idr, idev->minor);
++      idr_remove(&uio_idr, minor);
+       mutex_unlock(&minor_lock);
+ }
+ 
+@@ -988,7 +988,7 @@ err_request_irq:
+ err_uio_dev_add_attributes:
+       device_del(&idev->dev);
+ err_device_create:
+-      uio_free_minor(idev);
++      uio_free_minor(idev->minor);
+       put_device(&idev->dev);
+       return ret;
+ }
+@@ -1002,11 +1002,13 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
+ void uio_unregister_device(struct uio_info *info)
+ {
+       struct uio_device *idev;
++      unsigned long minor;
+ 
+       if (!info || !info->uio_dev)
+               return;
+ 
+       idev = info->uio_dev;
++      minor = idev->minor;
+ 
+       mutex_lock(&idev->info_lock);
+       uio_dev_del_attributes(idev);
+@@ -1019,7 +1021,7 @@ void uio_unregister_device(struct uio_info *info)
+ 
+       device_unregister(&idev->dev);
+ 
+-      uio_free_minor(idev);
++      uio_free_minor(minor);
+ 
+       return;
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 08751d1a765ff..e0d8da4e3967b 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1738,6 +1738,15 @@ static const struct usb_device_id acm_ids[] = {
+       { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
++      { USB_DEVICE(0x045b, 0x023c),   /* Renesas USB Download mode */
++      .driver_info = DISABLE_ECHO,    /* Don't echo banner */
++      },
++      { USB_DEVICE(0x045b, 0x0248),   /* Renesas USB Download mode */
++      .driver_info = DISABLE_ECHO,    /* Don't echo banner */
++      },
++      { USB_DEVICE(0x045b, 0x024D),   /* Renesas USB Download mode */
++      .driver_info = DISABLE_ECHO,    /* Don't echo banner */
++      },
+       { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; 
[email protected] */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index f0d2f0a4e9908..6ab5c48f5d873 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2386,6 +2386,11 @@ static int 
dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+               ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
+                               status);
+ 
++      req->request.actual = req->request.length - req->remaining;
++
++      if (!dwc3_gadget_ep_request_completed(req))
++              goto out;
++
+       if (req->needs_extra_trb) {
+               unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ 
+@@ -2401,13 +2406,6 @@ static int 
dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+               req->needs_extra_trb = false;
+       }
+ 
+-      req->request.actual = req->request.length - req->remaining;
+-
+-      if (!dwc3_gadget_ep_request_completed(req)) {
+-              __dwc3_gadget_kick_transfer(dep);
+-              goto out;
+-      }
+-
+       dwc3_gadget_giveback(dep, req, status);
+ 
+ out:
+@@ -2430,6 +2428,24 @@ static void 
dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
+       }
+ }
+ 
++static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
++{
++      struct dwc3_request     *req;
++
++      if (!list_empty(&dep->pending_list))
++              return true;
++
++      /*
++       * We only need to check the first entry of the started list. We can
++       * assume the completed requests are removed from the started list.
++       */
++      req = next_request(&dep->started_list);
++      if (!req)
++              return false;
++
++      return !dwc3_gadget_ep_request_completed(req);
++}
++
+ static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
+               const struct dwc3_event_depevt *event)
+ {
+@@ -2459,6 +2475,8 @@ static void 
dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
+ 
+       if (stop)
+               dwc3_stop_active_transfer(dep, true, true);
++      else if (dwc3_gadget_ep_should_continue(dep))
++              __dwc3_gadget_kick_transfer(dep);
+ 
+       /*
+        * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
+diff --git a/drivers/usb/gadget/udc/goku_udc.c 
b/drivers/usb/gadget/udc/goku_udc.c
+index c3721225b61ed..b706ad3034bc1 100644
+--- a/drivers/usb/gadget/udc/goku_udc.c
++++ b/drivers/usb/gadget/udc/goku_udc.c
+@@ -1757,6 +1757,7 @@ static int goku_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
+               goto err;
+       }
+ 
++      pci_set_drvdata(pdev, dev);
+       spin_lock_init(&dev->lock);
+       dev->pdev = pdev;
+       dev->gadget.ops = &goku_ops;
+@@ -1790,7 +1791,6 @@ static int goku_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
+       }
+       dev->regs = (struct goku_udc_regs __iomem *) base;
+ 
+-      pci_set_drvdata(pdev, dev);
+       INFO(dev, "%s\n", driver_desc);
+       INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
+       INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
+diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
+index 3c4abb5a1c3fc..73aba464b66ab 100644
+--- a/drivers/usb/host/xhci-histb.c
++++ b/drivers/usb/host/xhci-histb.c
+@@ -241,7 +241,7 @@ static int xhci_histb_probe(struct platform_device *pdev)
+       /* Initialize dma_mask and coherent_dma_mask to 32-bits */
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+-              return ret;
++              goto disable_pm;
+ 
+       hcd = usb_create_hcd(driver, dev, dev_name(dev));
+       if (!hcd) {
+diff --git a/drivers/vfio/platform/vfio_platform_common.c 
b/drivers/vfio/platform/vfio_platform_common.c
+index c0cd824be2b76..460760d0becfe 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -273,7 +273,7 @@ static int vfio_platform_open(void *device_data)
+ 
+               ret = pm_runtime_get_sync(vdev->device);
+               if (ret < 0)
+-                      goto err_pm;
++                      goto err_rst;
+ 
+               ret = vfio_platform_call_reset(vdev, &extra_dbg);
+               if (ret && vdev->reset_required) {
+@@ -290,7 +290,6 @@ static int vfio_platform_open(void *device_data)
+ 
+ err_rst:
+       pm_runtime_put(vdev->device);
+-err_pm:
+       vfio_platform_irq_cleanup(vdev);
+ err_irq:
+       vfio_platform_regions_cleanup(vdev);
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 36c0490156ac5..4d1d2657d70cd 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -54,6 +54,17 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
+       ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
+       if (ret) {
+ no_valid_dev_replace_entry_found:
++              /*
++               * We don't have a replace item or it's corrupted.  If there is
++               * a replace target, fail the mount.
++               */
++              if (btrfs_find_device(fs_info->fs_devices,
++                                    BTRFS_DEV_REPLACE_DEVID, NULL, NULL, 
false)) {
++                      btrfs_err(fs_info,
++                      "found replace target device without a valid replace 
item");
++                      ret = -EUCLEAN;
++                      goto out;
++              }
+               ret = 0;
+               dev_replace->replace_state =
+                       BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED;
+@@ -107,8 +118,19 @@ no_valid_dev_replace_entry_found:
+       case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+       case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+       case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+-              dev_replace->srcdev = NULL;
+-              dev_replace->tgtdev = NULL;
++              /*
++               * We don't have an active replace item but if there is a
++               * replace target, fail the mount.
++               */
++              if (btrfs_find_device(fs_info->fs_devices,
++                                    BTRFS_DEV_REPLACE_DEVID, NULL, NULL, 
false)) {
++                      btrfs_err(fs_info,
++                      "replace devid present without an active replace item");
++                      ret = -EUCLEAN;
++              } else {
++                      dev_replace->srcdev = NULL;
++                      dev_replace->tgtdev = NULL;
++              }
+               break;
+       case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+       case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 301111922a1a2..dabf153843e90 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3913,6 +3913,10 @@ retry:
+                       if (!ret) {
+                               free_extent_buffer(eb);
+                               continue;
++                      } else if (ret < 0) {
++                              done = 1;
++                              free_extent_buffer(eb);
++                              break;
+                       }
+ 
+                       ret = write_one_eb(eb, fs_info, wbc, &epd);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 01a90fa03c24f..f3658d6ea6571 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1239,6 +1239,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
+       u64 page_start;
+       u64 page_end;
+       u64 page_cnt;
++      u64 start = (u64)start_index << PAGE_SHIFT;
+       int ret;
+       int i;
+       int i_done;
+@@ -1255,8 +1256,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
+       page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
+ 
+       ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
+-                      start_index << PAGE_SHIFT,
+-                      page_cnt << PAGE_SHIFT);
++                      start, page_cnt << PAGE_SHIFT);
+       if (ret)
+               return ret;
+       i_done = 0;
+@@ -1346,8 +1346,7 @@ again:
+               btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
+               spin_unlock(&BTRFS_I(inode)->lock);
+               btrfs_delalloc_release_space(inode, data_reserved,
+-                              start_index << PAGE_SHIFT,
+-                              (page_cnt - i_done) << PAGE_SHIFT, true);
++                              start, (page_cnt - i_done) << PAGE_SHIFT, true);
+       }
+ 
+ 
+@@ -1374,8 +1373,7 @@ out:
+               put_page(pages[i]);
+       }
+       btrfs_delalloc_release_space(inode, data_reserved,
+-                      start_index << PAGE_SHIFT,
+-                      page_cnt << PAGE_SHIFT, true);
++                      start, page_cnt << PAGE_SHIFT, true);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
+       extent_changeset_free(data_reserved);
+       return ret;
+@@ -4217,6 +4215,8 @@ process_slot:
+                       ret = -EINTR;
+                       goto out;
+               }
++
++              cond_resched();
+       }
+       ret = 0;
+ 
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 5dec52bd2897b..b26739d0e991b 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -854,6 +854,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 
bytenr, u64 num_bytes,
+ "dropping a ref for a root that doesn't have a ref on the block");
+                       dump_block_entry(fs_info, be);
+                       dump_ref_action(fs_info, ra);
++                      kfree(ref);
+                       kfree(ra);
+                       goto out_unlock;
+               }
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 05daa2b816c31..7e000d061813c 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -974,22 +974,13 @@ again:
+                       continue;
+               }
+ 
+-              if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
+-                      /*
+-                       * In the first step, keep the device which has
+-                       * the correct fsid and the devid that is used
+-                       * for the dev_replace procedure.
+-                       * In the second step, the dev_replace state is
+-                       * read from the device tree and it is known
+-                       * whether the procedure is really active or
+-                       * not, which means whether this device is
+-                       * used or whether it should be removed.
+-                       */
+-                      if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+-                                                &device->dev_state)) {
+-                              continue;
+-                      }
+-              }
++              /*
++               * We have already validated the presence of 
BTRFS_DEV_REPLACE_DEVID,
++               * in btrfs_init_dev_replace() so just continue.
++               */
++              if (device->devid == BTRFS_DEV_REPLACE_DEVID)
++                      continue;
++
+               if (device->bdev) {
+                       blkdev_put(device->bdev, device->mode);
+                       device->bdev = NULL;
+@@ -998,9 +989,6 @@ again:
+               if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
+                       list_del_init(&device->dev_alloc_list);
+                       clear_bit(BTRFS_DEV_STATE_WRITEABLE, 
&device->dev_state);
+-                      if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+-                                    &device->dev_state))
+-                              fs_devices->rw_devices--;
+               }
+               list_del_init(&device->dev_list);
+               fs_devices->num_devices--;
+@@ -2459,9 +2447,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, 
const char *device_path
+       btrfs_set_super_num_devices(fs_info->super_copy,
+                                   orig_super_num_devices + 1);
+ 
+-      /* add sysfs device entry */
+-      btrfs_sysfs_add_device_link(fs_devices, device);
+-
+       /*
+        * we've got more storage, clear any full flags on the space
+        * infos
+@@ -2469,6 +2454,10 @@ int btrfs_init_new_device(struct btrfs_fs_info 
*fs_info, const char *device_path
+       btrfs_clear_space_info_full(fs_info);
+ 
+       mutex_unlock(&fs_info->chunk_mutex);
++
++      /* Add sysfs device entry */
++      btrfs_sysfs_add_device_link(fs_devices, device);
++
+       mutex_unlock(&fs_devices->device_list_mutex);
+ 
+       if (seeding_dev) {
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index a2b2355e7f019..9986817532b10 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -501,7 +501,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, 
int srclen,
+               else if (map_chars == SFM_MAP_UNI_RSVD) {
+                       bool end_of_string;
+ 
+-                      if (i == srclen - 1)
++                      /**
++                       * Remap spaces and periods found at the end of every
++                       * component of the path. The special cases of '.' and
++                       * '..' do not need to be dealt with explicitly because
++                       * they are addressed in namei.c:link_path_walk().
++                       **/
++                      if ((i == srclen - 1) || (source[i+1] == '\\'))
+                               end_of_string = true;
+                       else
+                               end_of_string = false;
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 4572cb0579518..c952461876595 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1921,6 +1921,7 @@ int ext4_inline_data_truncate(struct inode *inode, int 
*has_inline)
+ 
+       ext4_write_lock_xattr(inode, &no_expand);
+       if (!ext4_has_inline_data(inode)) {
++              ext4_write_unlock_xattr(inode, &no_expand);
+               *has_inline = 0;
+               ext4_journal_stop(handle);
+               return 0;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 6338ca95d8b3a..ee96f504ed782 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1748,8 +1748,8 @@ static const struct mount_opts {
+       {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
+                      EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
+                                                       MOPT_CLEAR | MOPT_Q},
+-      {Opt_usrjquota, 0, MOPT_Q},
+-      {Opt_grpjquota, 0, MOPT_Q},
++      {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
++      {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
+       {Opt_offusrjquota, 0, MOPT_Q},
+       {Opt_offgrpjquota, 0, MOPT_Q},
+       {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index c94c4ac1ae78b..de9b561b1c385 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -739,9 +739,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
+               }
+ 
+               gfs2_free_clones(rgd);
++              return_all_reservations(rgd);
+               kfree(rgd->rd_bits);
+               rgd->rd_bits = NULL;
+-              return_all_reservations(rgd);
+               kmem_cache_free(gfs2_rgrpd_cachep, rgd);
+       }
+ }
+@@ -1387,6 +1387,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+ 
++      if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
++              return -EROFS;
++
+       if (!blk_queue_discard(q))
+               return -EOPNOTSUPP;
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index a971862b186e3..22cd68bd8c9b0 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -934,6 +934,7 @@ restart:
+       gfs2_jindex_free(sdp);
+       /*  Take apart glock structures and buffer lists  */
+       gfs2_gl_hash_clear(sdp);
++      truncate_inode_pages_final(&sdp->sd_aspace);
+       gfs2_delete_debugfs_file(sdp);
+       /*  Unmount the locking protocol  */
+       gfs2_lm_unmount(sdp);
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 2658d91c1f7b6..09bc2cf5f61cd 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1747,6 +1747,7 @@ static void ocfs2_inode_init_once(void *data)
+ 
+       oi->ip_blkno = 0ULL;
+       oi->ip_clusters = 0;
++      oi->ip_next_orphan = NULL;
+ 
+       ocfs2_resv_init_once(&oi->ip_la_data_resv);
+ 
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index 1eb7933dac83e..b3a9043b0c9ee 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -2213,6 +2213,7 @@ xfs_defer_agfl_block(
+       new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
+       new->xefi_blockcount = 1;
+       new->xefi_oinfo = *oinfo;
++      new->xefi_skip_discard = false;
+ 
+       trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
+ 
+diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
+index 488dc8860fd7c..50242ba3cdb72 100644
+--- a/fs/xfs/libxfs/xfs_bmap.h
++++ b/fs/xfs/libxfs/xfs_bmap.h
+@@ -52,9 +52,9 @@ struct xfs_extent_free_item
+ {
+       xfs_fsblock_t           xefi_startblock;/* starting fs block number */
+       xfs_extlen_t            xefi_blockcount;/* number of blocks in extent */
++      bool                    xefi_skip_discard;
+       struct list_head        xefi_list;
+       struct xfs_owner_info   xefi_oinfo;     /* extent owner */
+-      bool                    xefi_skip_discard;
+ };
+ 
+ #define       XFS_BMAP_MAX_NMAP       4
+diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
+index 245af452840ef..ab3e72e702f00 100644
+--- a/fs/xfs/libxfs/xfs_rmap.c
++++ b/fs/xfs/libxfs/xfs_rmap.c
+@@ -1387,7 +1387,7 @@ xfs_rmap_convert_shared(
+        * record for our insertion point. This will also give us the record for
+        * start block contiguity tests.
+        */
+-      error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
++      error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
+                       &PREV, &i);
+       if (error)
+               goto done;
+diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
+index f79cf040d7450..77528f413286b 100644
+--- a/fs/xfs/libxfs/xfs_rmap_btree.c
++++ b/fs/xfs/libxfs/xfs_rmap_btree.c
+@@ -247,8 +247,8 @@ xfs_rmapbt_key_diff(
+       else if (y > x)
+               return -1;
+ 
+-      x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
+-      y = rec->rm_offset;
++      x = be64_to_cpu(kp->rm_offset);
++      y = xfs_rmap_irec_offset_pack(rec);
+       if (x > y)
+               return 1;
+       else if (y > x)
+@@ -279,8 +279,8 @@ xfs_rmapbt_diff_two_keys(
+       else if (y > x)
+               return -1;
+ 
+-      x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
+-      y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
++      x = be64_to_cpu(kp1->rm_offset);
++      y = be64_to_cpu(kp2->rm_offset);
+       if (x > y)
+               return 1;
+       else if (y > x)
+@@ -393,8 +393,8 @@ xfs_rmapbt_keys_inorder(
+               return 1;
+       else if (a > b)
+               return 0;
+-      a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
+-      b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
++      a = be64_to_cpu(k1->rmap.rm_offset);
++      b = be64_to_cpu(k2->rmap.rm_offset);
+       if (a <= b)
+               return 1;
+       return 0;
+@@ -423,8 +423,8 @@ xfs_rmapbt_recs_inorder(
+               return 1;
+       else if (a > b)
+               return 0;
+-      a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
+-      b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
++      a = be64_to_cpu(r1->rmap.rm_offset);
++      b = be64_to_cpu(r2->rmap.rm_offset);
+       if (a <= b)
+               return 1;
+       return 0;
+diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
+index f84a58e523bc8..b05d65fd360b3 100644
+--- a/fs/xfs/scrub/bmap.c
++++ b/fs/xfs/scrub/bmap.c
+@@ -120,6 +120,8 @@ xchk_bmap_get_rmap(
+ 
+       if (info->whichfork == XFS_ATTR_FORK)
+               rflags |= XFS_RMAP_ATTR_FORK;
++      if (irec->br_state == XFS_EXT_UNWRITTEN)
++              rflags |= XFS_RMAP_UNWRITTEN;
+ 
+       /*
+        * CoW staging extents are owned (on disk) by the refcountbt, so
+diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
+index e386c9b0b4ab7..8d45d60832db9 100644
+--- a/fs/xfs/scrub/inode.c
++++ b/fs/xfs/scrub/inode.c
+@@ -131,8 +131,7 @@ xchk_inode_flags(
+               goto bad;
+ 
+       /* rt flags require rt device */
+-      if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) &&
+-          !mp->m_rtdev_targp)
++      if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
+               goto bad;
+ 
+       /* new rt bitmap flag only valid for rbmino */
+diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
+index e8c82b026083e..76e4f16a9fab2 100644
+--- a/fs/xfs/scrub/refcount.c
++++ b/fs/xfs/scrub/refcount.c
+@@ -180,7 +180,6 @@ xchk_refcountbt_process_rmap_fragments(
+        */
+       INIT_LIST_HEAD(&worklist);
+       rbno = NULLAGBLOCK;
+-      nr = 1;
+ 
+       /* Make sure the fragments actually /are/ in agbno order. */
+       bno = 0;
+@@ -194,15 +193,14 @@ xchk_refcountbt_process_rmap_fragments(
+        * Find all the rmaps that start at or before the refc extent,
+        * and put them on the worklist.
+        */
++      nr = 0;
+       list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+-              if (frag->rm.rm_startblock > refchk->bno)
+-                      goto done;
++              if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
++                      break;
+               bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+               if (bno < rbno)
+                       rbno = bno;
+               list_move_tail(&frag->list, &worklist);
+-              if (nr == target_nr)
+-                      break;
+               nr++;
+       }
+ 
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index e427ad097e2ee..948ac1290121b 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -895,6 +895,16 @@ xfs_setattr_size(
+               error = iomap_zero_range(inode, oldsize, newsize - oldsize,
+                               &did_zeroing, &xfs_iomap_ops);
+       } else {
++              /*
++               * iomap won't detect a dirty page over an unwritten block (or a
++               * cow block over a hole) and subsequently skips zeroing the
++               * newly post-EOF portion of the page. Flush the new EOF to
++               * convert the block before the pagecache truncate.
++               */
++              error = filemap_write_and_wait_range(inode->i_mapping, newsize,
++                                                   newsize);
++              if (error)
++                      return error;
+               error = iomap_truncate_page(inode, newsize, &did_zeroing,
+                               &xfs_iomap_ops);
+       }
+diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
+index f44c3599527d0..1c9bced3e8601 100644
+--- a/fs/xfs/xfs_pnfs.c
++++ b/fs/xfs/xfs_pnfs.c
+@@ -141,7 +141,7 @@ xfs_fs_map_blocks(
+               goto out_unlock;
+       error = invalidate_inode_pages2(inode->i_mapping);
+       if (WARN_ON_ONCE(error))
+-              return error;
++              goto out_unlock;
+ 
+       end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
+       offset_fsb = XFS_B_TO_FSBT(mp, offset);
+diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
+index b3379a97245c1..a34694e675c9a 100644
+--- a/include/linux/can/skb.h
++++ b/include/linux/can/skb.h
+@@ -61,21 +61,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, 
struct sock *sk)
+  */
+ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
+ {
+-      if (skb_shared(skb)) {
+-              struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
++      struct sk_buff *nskb;
+ 
+-              if (likely(nskb)) {
+-                      can_skb_set_owner(nskb, skb->sk);
+-                      consume_skb(skb);
+-                      return nskb;
+-              } else {
+-                      kfree_skb(skb);
+-                      return NULL;
+-              }
++      nskb = skb_clone(skb, GFP_ATOMIC);
++      if (unlikely(!nskb)) {
++              kfree_skb(skb);
++              return NULL;
+       }
+ 
+-      /* we can assume to have an unshared skb with proper owner */
+-      return skb;
++      can_skb_set_owner(nskb, skb->sk);
++      consume_skb(skb);
++      return nskb;
+ }
+ 
+ #endif /* !_CAN_SKB_H */
+diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
+index 95ab5cc644226..45ff1330b3393 100644
+--- a/include/linux/netfilter_ipv4.h
++++ b/include/linux/netfilter_ipv4.h
+@@ -16,7 +16,7 @@ struct ip_rt_info {
+       u_int32_t mark;
+ };
+ 
+-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned 
addr_type);
++int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, 
unsigned addr_type);
+ 
+ struct nf_queue_entry;
+ 
+diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
+index c0dc4dd78887a..47a2de582f574 100644
+--- a/include/linux/netfilter_ipv6.h
++++ b/include/linux/netfilter_ipv6.h
+@@ -36,7 +36,7 @@ struct nf_ipv6_ops {
+ };
+ 
+ #ifdef CONFIG_NETFILTER
+-int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
++int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff 
*skb);
+ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+                       unsigned int dataoff, u_int8_t protocol);
+ 
+diff --git a/include/linux/prandom.h b/include/linux/prandom.h
+index aa16e6468f91e..cc1e71334e53c 100644
+--- a/include/linux/prandom.h
++++ b/include/linux/prandom.h
+@@ -16,12 +16,44 @@ void prandom_bytes(void *buf, size_t nbytes);
+ void prandom_seed(u32 seed);
+ void prandom_reseed_late(void);
+ 
++#if BITS_PER_LONG == 64
++/*
++ * The core SipHash round function.  Each line can be executed in
++ * parallel given enough CPU resources.
++ */
++#define PRND_SIPROUND(v0, v1, v2, v3) ( \
++      v0 += v1, v1 = rol64(v1, 13),  v2 += v3, v3 = rol64(v3, 16), \
++      v1 ^= v0, v0 = rol64(v0, 32),  v3 ^= v2,                     \
++      v0 += v3, v3 = rol64(v3, 21),  v2 += v1, v1 = rol64(v1, 17), \
++      v3 ^= v0,                      v1 ^= v2, v2 = rol64(v2, 32)  \
++)
++
++#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
++#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
++
++#elif BITS_PER_LONG == 32
++/*
++ * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
++ * This is weaker, but 32-bit machines are not used for high-traffic
++ * applications, so there is less output for an attacker to analyze.
++ */
++#define PRND_SIPROUND(v0, v1, v2, v3) ( \
++      v0 += v1, v1 = rol32(v1,  5),  v2 += v3, v3 = rol32(v3,  8), \
++      v1 ^= v0, v0 = rol32(v0, 16),  v3 ^= v2,                     \
++      v0 += v3, v3 = rol32(v3,  7),  v2 += v1, v1 = rol32(v1, 13), \
++      v3 ^= v0,                      v1 ^= v2, v2 = rol32(v2, 16)  \
++)
++#define PRND_K0 0x6c796765
++#define PRND_K1 0x74656462
++
++#else
++#error Unsupported BITS_PER_LONG
++#endif
++
+ struct rnd_state {
+       __u32 s1, s2, s3, s4;
+ };
+ 
+-DECLARE_PER_CPU(struct rnd_state, net_rand_state);
+-
+ u32 prandom_u32_state(struct rnd_state *state);
+ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+diff --git a/include/linux/time64.h b/include/linux/time64.h
+index 4a45aea0f96e9..8dbdf6cae3e8b 100644
+--- a/include/linux/time64.h
++++ b/include/linux/time64.h
+@@ -138,6 +138,10 @@ static inline bool timespec64_valid_settod(const struct 
timespec64 *ts)
+  */
+ static inline s64 timespec64_to_ns(const struct timespec64 *ts)
+ {
++      /* Prevent multiplication overflow */
++      if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
++              return KTIME_MAX;
++
+       return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+ }
+ 
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 2a8c41f12d450..6f7d4e977c5cc 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -239,6 +239,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs, int verbose)
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
++      no_iotlb_memory = false;
+ 
+       if (verbose)
+               swiotlb_print_info();
+@@ -270,9 +271,11 @@ swiotlb_init(int verbose)
+       if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
+               return;
+ 
+-      if (io_tlb_start)
++      if (io_tlb_start) {
+               memblock_free_early(io_tlb_start,
+                                   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
++              io_tlb_start = 0;
++      }
+       pr_warn("Cannot allocate buffer");
+       no_iotlb_memory = true;
+ }
+@@ -376,6 +379,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
++      no_iotlb_memory = false;
+ 
+       swiotlb_print_info();
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8b94eb6437c18..b8b74a4a524c1 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5475,11 +5475,11 @@ static void perf_pmu_output_stop(struct perf_event 
*event);
+ static void perf_mmap_close(struct vm_area_struct *vma)
+ {
+       struct perf_event *event = vma->vm_file->private_data;
+-
+       struct ring_buffer *rb = ring_buffer_get(event);
+       struct user_struct *mmap_user = rb->mmap_user;
+       int mmap_locked = rb->mmap_locked;
+       unsigned long size = perf_data_size(rb);
++      bool detach_rest = false;
+ 
+       if (event->pmu->event_unmapped)
+               event->pmu->event_unmapped(event, vma->vm_mm);
+@@ -5510,7 +5510,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+               mutex_unlock(&event->mmap_mutex);
+       }
+ 
+-      atomic_dec(&rb->mmap_count);
++      if (atomic_dec_and_test(&rb->mmap_count))
++              detach_rest = true;
+ 
+       if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+               goto out_put;
+@@ -5519,7 +5520,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+       mutex_unlock(&event->mmap_mutex);
+ 
+       /* If there's still other mmap()s of this buffer, we're done. */
+-      if (atomic_read(&rb->mmap_count))
++      if (!detach_rest)
+               goto out_put;
+ 
+       /*
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 6dc725a7e7bc9..8fc0ddc38cb69 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -209,7 +209,7 @@ static inline int get_recursion_context(int *recursion)
+               rctx = 3;
+       else if (in_irq())
+               rctx = 2;
+-      else if (in_softirq())
++      else if (in_serving_softirq())
+               rctx = 1;
+       else
+               rctx = 0;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index eeaafd4064c95..65133ebddfada 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -517,7 +517,10 @@ static void exit_mm(void)
+               up_read(&mm->mmap_sem);
+ 
+               self.task = current;
+-              self.next = xchg(&core_state->dumper.next, &self);
++              if (self.task->flags & PF_SIGNALED)
++                      self.next = xchg(&core_state->dumper.next, &self);
++              else
++                      self.task = NULL;
+               /*
+                * Implies mb(), the result of xchg() must be visible
+                * to core_state->dumper.
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 52f641c00a65b..334dc4cae780e 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -856,8 +856,9 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+        */
+       if (pi_state->owner) {
+               struct task_struct *owner;
++              unsigned long flags;
+ 
+-              raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++              raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
+               owner = pi_state->owner;
+               if (owner) {
+                       raw_spin_lock(&owner->pi_lock);
+@@ -865,7 +866,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+                       raw_spin_unlock(&owner->pi_lock);
+               }
+               rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
+-              raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++              raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, 
flags);
+       }
+ 
+       if (current->pi_state_cache) {
+diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
+index 5f3e2baefca92..d532bf0c5a672 100644
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -80,6 +80,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS
+ # Generic IRQ IPI support
+ config GENERIC_IRQ_IPI
+       bool
++      select IRQ_DOMAIN_HIERARCHY
+ 
+ # Generic MSI interrupt support
+ config GENERIC_MSI_IRQ
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index 8fb44dec9ad75..45bea54f9aee0 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -539,22 +539,22 @@ static int __init reboot_setup(char *str)
+                       break;
+ 
+               case 's':
+-              {
+-                      int rc;
+-
+-                      if (isdigit(*(str+1))) {
+-                              rc = kstrtoint(str+1, 0, &reboot_cpu);
+-                              if (rc)
+-                                      return rc;
+-                      } else if (str[1] == 'm' && str[2] == 'p' &&
+-                                 isdigit(*(str+3))) {
+-                              rc = kstrtoint(str+3, 0, &reboot_cpu);
+-                              if (rc)
+-                                      return rc;
+-                      } else
++                      if (isdigit(*(str+1)))
++                              reboot_cpu = simple_strtoul(str+1, NULL, 0);
++                      else if (str[1] == 'm' && str[2] == 'p' &&
++                                                      isdigit(*(str+3)))
++                              reboot_cpu = simple_strtoul(str+3, NULL, 0);
++                      else
+                               reboot_mode = REBOOT_SOFT;
++                      if (reboot_cpu >= num_possible_cpus()) {
++                              pr_err("Ignoring the CPU number in reboot= 
option. "
++                                     "CPU %d exceeds possible cpu number 
%d\n",
++                                     reboot_cpu, num_possible_cpus());
++                              reboot_cpu = 0;
++                              break;
++                      }
+                       break;
+-              }
++
+               case 'g':
+                       reboot_mode = REBOOT_GPIO;
+                       break;
+diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
+index 9a65713c83093..2e2b335ef1018 100644
+--- a/kernel/time/itimer.c
++++ b/kernel/time/itimer.c
+@@ -154,10 +154,6 @@ static void set_cpu_itimer(struct task_struct *tsk, 
unsigned int clock_id,
+       u64 oval, nval, ointerval, ninterval;
+       struct cpu_itimer *it = &tsk->signal->it[clock_id];
+ 
+-      /*
+-       * Use the to_ktime conversion because that clamps the maximum
+-       * value to KTIME_MAX and avoid multiplication overflows.
+-       */
+       nval = ktime_to_ns(timeval_to_ktime(value->it_value));
+       ninterval = ktime_to_ns(timeval_to_ktime(value->it_interval));
+ 
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index a02e0f6b287ce..0a3cc37e4b838 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -15,6 +15,7 @@
+ #include <linux/err.h>
+ #include <linux/hrtimer.h>
+ #include <linux/interrupt.h>
++#include <linux/nmi.h>
+ #include <linux/percpu.h>
+ #include <linux/profile.h>
+ #include <linux/sched.h>
+@@ -520,6 +521,7 @@ void tick_unfreeze(void)
+               trace_suspend_resume(TPS("timekeeping_freeze"),
+                                    smp_processor_id(), false);
+       } else {
++              touch_softlockup_watchdog();
+               tick_resume_local();
+       }
+ 
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 61e41ea3a96ec..a6e88d9bb931c 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1655,13 +1655,6 @@ void update_process_times(int user_tick)
+       scheduler_tick();
+       if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+               run_posix_cpu_timers(p);
+-
+-      /* The current CPU might make use of net randoms without receiving IRQs
+-       * to renew them often enough. Let's update the net_rand_state from a
+-       * non-constant value that's not affine to the number of calls to make
+-       * sure it's updated when there's some activity (we don't care in idle).
+-       */
+-      this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
+ }
+ 
+ /**
+diff --git a/lib/random32.c b/lib/random32.c
+index b6f3325e38e43..9085b1172015e 100644
+--- a/lib/random32.c
++++ b/lib/random32.c
+@@ -40,16 +40,6 @@
+ #include <linux/sched.h>
+ #include <asm/unaligned.h>
+ 
+-#ifdef CONFIG_RANDOM32_SELFTEST
+-static void __init prandom_state_selftest(void);
+-#else
+-static inline void prandom_state_selftest(void)
+-{
+-}
+-#endif
+-
+-DEFINE_PER_CPU(struct rnd_state, net_rand_state)  __latent_entropy;
+-
+ /**
+  *    prandom_u32_state - seeded pseudo-random number generator.
+  *    @state: pointer to state structure holding seeded state.
+@@ -69,25 +59,6 @@ u32 prandom_u32_state(struct rnd_state *state)
+ }
+ EXPORT_SYMBOL(prandom_u32_state);
+ 
+-/**
+- *    prandom_u32 - pseudo random number generator
+- *
+- *    A 32 bit pseudo-random number is generated using a fast
+- *    algorithm suitable for simulation. This algorithm is NOT
+- *    considered safe for cryptographic use.
+- */
+-u32 prandom_u32(void)
+-{
+-      struct rnd_state *state = &get_cpu_var(net_rand_state);
+-      u32 res;
+-
+-      res = prandom_u32_state(state);
+-      put_cpu_var(net_rand_state);
+-
+-      return res;
+-}
+-EXPORT_SYMBOL(prandom_u32);
+-
+ /**
+  *    prandom_bytes_state - get the requested number of pseudo-random bytes
+  *
+@@ -119,20 +90,6 @@ void prandom_bytes_state(struct rnd_state *state, void 
*buf, size_t bytes)
+ }
+ EXPORT_SYMBOL(prandom_bytes_state);
+ 
+-/**
+- *    prandom_bytes - get the requested number of pseudo-random bytes
+- *    @buf: where to copy the pseudo-random bytes to
+- *    @bytes: the requested number of bytes
+- */
+-void prandom_bytes(void *buf, size_t bytes)
+-{
+-      struct rnd_state *state = &get_cpu_var(net_rand_state);
+-
+-      prandom_bytes_state(state, buf, bytes);
+-      put_cpu_var(net_rand_state);
+-}
+-EXPORT_SYMBOL(prandom_bytes);
+-
+ static void prandom_warmup(struct rnd_state *state)
+ {
+       /* Calling RNG ten times to satisfy recurrence condition */
+@@ -148,96 +105,6 @@ static void prandom_warmup(struct rnd_state *state)
+       prandom_u32_state(state);
+ }
+ 
+-static u32 __extract_hwseed(void)
+-{
+-      unsigned int val = 0;
+-
+-      (void)(arch_get_random_seed_int(&val) ||
+-             arch_get_random_int(&val));
+-
+-      return val;
+-}
+-
+-static void prandom_seed_early(struct rnd_state *state, u32 seed,
+-                             bool mix_with_hwseed)
+-{
+-#define LCG(x)         ((x) * 69069U) /* super-duper LCG */
+-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+-      state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
+-      state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
+-      state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
+-      state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+-}
+-
+-/**
+- *    prandom_seed - add entropy to pseudo random number generator
+- *    @seed: seed value
+- *
+- *    Add some additional seeding to the prandom pool.
+- */
+-void prandom_seed(u32 entropy)
+-{
+-      int i;
+-      /*
+-       * No locking on the CPUs, but then somewhat random results are, well,
+-       * expected.
+-       */
+-      for_each_possible_cpu(i) {
+-              struct rnd_state *state = &per_cpu(net_rand_state, i);
+-
+-              state->s1 = __seed(state->s1 ^ entropy, 2U);
+-              prandom_warmup(state);
+-      }
+-}
+-EXPORT_SYMBOL(prandom_seed);
+-
+-/*
+- *    Generate some initially weak seeding values to allow
+- *    to start the prandom_u32() engine.
+- */
+-static int __init prandom_init(void)
+-{
+-      int i;
+-
+-      prandom_state_selftest();
+-
+-      for_each_possible_cpu(i) {
+-              struct rnd_state *state = &per_cpu(net_rand_state, i);
+-              u32 weak_seed = (i + jiffies) ^ random_get_entropy();
+-
+-              prandom_seed_early(state, weak_seed, true);
+-              prandom_warmup(state);
+-      }
+-
+-      return 0;
+-}
+-core_initcall(prandom_init);
+-
+-static void __prandom_timer(struct timer_list *unused);
+-
+-static DEFINE_TIMER(seed_timer, __prandom_timer);
+-
+-static void __prandom_timer(struct timer_list *unused)
+-{
+-      u32 entropy;
+-      unsigned long expires;
+-
+-      get_random_bytes(&entropy, sizeof(entropy));
+-      prandom_seed(entropy);
+-
+-      /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
+-      expires = 40 + prandom_u32_max(40);
+-      seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
+-
+-      add_timer(&seed_timer);
+-}
+-
+-static void __init __prandom_start_seed_timer(void)
+-{
+-      seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
+-      add_timer(&seed_timer);
+-}
+-
+ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
+ {
+       int i;
+@@ -257,51 +124,6 @@ void prandom_seed_full_state(struct rnd_state __percpu 
*pcpu_state)
+ }
+ EXPORT_SYMBOL(prandom_seed_full_state);
+ 
+-/*
+- *    Generate better values after random number generator
+- *    is fully initialized.
+- */
+-static void __prandom_reseed(bool late)
+-{
+-      unsigned long flags;
+-      static bool latch = false;
+-      static DEFINE_SPINLOCK(lock);
+-
+-      /* Asking for random bytes might result in bytes getting
+-       * moved into the nonblocking pool and thus marking it
+-       * as initialized. In this case we would double back into
+-       * this function and attempt to do a late reseed.
+-       * Ignore the pointless attempt to reseed again if we're
+-       * already waiting for bytes when the nonblocking pool
+-       * got initialized.
+-       */
+-
+-      /* only allow initial seeding (late == false) once */
+-      if (!spin_trylock_irqsave(&lock, flags))
+-              return;
+-
+-      if (latch && !late)
+-              goto out;
+-
+-      latch = true;
+-      prandom_seed_full_state(&net_rand_state);
+-out:
+-      spin_unlock_irqrestore(&lock, flags);
+-}
+-
+-void prandom_reseed_late(void)
+-{
+-      __prandom_reseed(true);
+-}
+-
+-static int __init prandom_reseed(void)
+-{
+-      __prandom_reseed(false);
+-      __prandom_start_seed_timer();
+-      return 0;
+-}
+-late_initcall(prandom_reseed);
+-
+ #ifdef CONFIG_RANDOM32_SELFTEST
+ static struct prandom_test1 {
+       u32 seed;
+@@ -421,7 +243,28 @@ static struct prandom_test2 {
+       {  407983964U, 921U,  728767059U },
+ };
+ 
+-static void __init prandom_state_selftest(void)
++static u32 __extract_hwseed(void)
++{
++      unsigned int val = 0;
++
++      (void)(arch_get_random_seed_int(&val) ||
++             arch_get_random_int(&val));
++
++      return val;
++}
++
++static void prandom_seed_early(struct rnd_state *state, u32 seed,
++                             bool mix_with_hwseed)
++{
++#define LCG(x)         ((x) * 69069U) /* super-duper LCG */
++#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
++      state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
++      state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
++      state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
++      state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
++}
++
++static int __init prandom_state_selftest(void)
+ {
+       int i, j, errors = 0, runs = 0;
+       bool error = false;
+@@ -461,5 +304,266 @@ static void __init prandom_state_selftest(void)
+               pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
+       else
+               pr_info("prandom: %d self tests passed\n", runs);
++      return 0;
+ }
++core_initcall(prandom_state_selftest);
+ #endif
++
++/*
++ * The prandom_u32() implementation is now completely separate from the
++ * prandom_state() functions, which are retained (for now) for compatibility.
++ *
++ * Because of (ab)use in the networking code for choosing random TCP/UDP port
++ * numbers, which open DoS possibilities if guessable, we want something
++ * stronger than a standard PRNG.  But the performance requirements of
++ * the network code do not allow robust crypto for this application.
++ *
++ * So this is a homebrew Junior Spaceman implementation, based on the
++ * lowest-latency trustworthy crypto primitive available, SipHash.
++ * (The authors of SipHash have not been consulted about this abuse of
++ * their work.)
++ *
++ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
++ * one word of output.  This abbreviated version uses 2 rounds per word
++ * of output.
++ */
++
++struct siprand_state {
++      unsigned long v0;
++      unsigned long v1;
++      unsigned long v2;
++      unsigned long v3;
++};
++
++static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
++
++/*
++ * This is the core CPRNG function.  As "pseudorandom", this is not used
++ * for truly valuable things, just intended to be a PITA to guess.
++ * For maximum speed, we do just two SipHash rounds per word.  This is
++ * the same rate as 4 rounds per 64 bits that SipHash normally uses,
++ * so hopefully it's reasonably secure.
++ *
++ * There are two changes from the official SipHash finalization:
++ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
++ *   they are there only to make the output rounds distinct from the input
++ *   rounds, and this application has no input rounds.
++ * - Rather than returning v0^v1^v2^v3, return v1+v3.
++ *   If you look at the SipHash round, the last operation on v3 is
++ *   "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
++ *   Likewise "v1 ^= v2".  (The rotate of v2 makes a difference, but
++ *   it still cancels out half of the bits in v2 for no benefit.)
++ *   Second, since the last combining operation was xor, continue the
++ *   pattern of alternating xor/add for a tiny bit of extra non-linearity.
++ */
++static inline u32 siprand_u32(struct siprand_state *s)
++{
++      unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
++
++      PRND_SIPROUND(v0, v1, v2, v3);
++      PRND_SIPROUND(v0, v1, v2, v3);
++      s->v0 = v0;  s->v1 = v1;  s->v2 = v2;  s->v3 = v3;
++      return v1 + v3;
++}
++
++
++/**
++ *    prandom_u32 - pseudo random number generator
++ *
++ *    A 32 bit pseudo-random number is generated using a fast
++ *    algorithm suitable for simulation. This algorithm is NOT
++ *    considered safe for cryptographic use.
++ */
++u32 prandom_u32(void)
++{
++      struct siprand_state *state = get_cpu_ptr(&net_rand_state);
++      u32 res = siprand_u32(state);
++
++      put_cpu_ptr(&net_rand_state);
++      return res;
++}
++EXPORT_SYMBOL(prandom_u32);
++
++/**
++ *    prandom_bytes - get the requested number of pseudo-random bytes
++ *    @buf: where to copy the pseudo-random bytes to
++ *    @bytes: the requested number of bytes
++ */
++void prandom_bytes(void *buf, size_t bytes)
++{
++      struct siprand_state *state = get_cpu_ptr(&net_rand_state);
++      u8 *ptr = buf;
++
++      while (bytes >= sizeof(u32)) {
++              put_unaligned(siprand_u32(state), (u32 *)ptr);
++              ptr += sizeof(u32);
++              bytes -= sizeof(u32);
++      }
++
++      if (bytes > 0) {
++              u32 rem = siprand_u32(state);
++
++              do {
++                      *ptr++ = (u8)rem;
++                      rem >>= BITS_PER_BYTE;
++              } while (--bytes > 0);
++      }
++      put_cpu_ptr(&net_rand_state);
++}
++EXPORT_SYMBOL(prandom_bytes);
++
++/**
++ *    prandom_seed - add entropy to pseudo random number generator
++ *    @entropy: entropy value
++ *
++ *    Add some additional seed material to the prandom pool.
++ *    The "entropy" is actually our IP address (the only caller is
++ *    the network code), not for unpredictability, but to ensure that
++ *    different machines are initialized differently.
++ */
++void prandom_seed(u32 entropy)
++{
++      int i;
++
++      add_device_randomness(&entropy, sizeof(entropy));
++
++      for_each_possible_cpu(i) {
++              struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
++              unsigned long v0 = state->v0, v1 = state->v1;
++              unsigned long v2 = state->v2, v3 = state->v3;
++
++              do {
++                      v3 ^= entropy;
++                      PRND_SIPROUND(v0, v1, v2, v3);
++                      PRND_SIPROUND(v0, v1, v2, v3);
++                      v0 ^= entropy;
++              } while (unlikely(!v0 || !v1 || !v2 || !v3));
++
++              WRITE_ONCE(state->v0, v0);
++              WRITE_ONCE(state->v1, v1);
++              WRITE_ONCE(state->v2, v2);
++              WRITE_ONCE(state->v3, v3);
++      }
++}
++EXPORT_SYMBOL(prandom_seed);
++
++/*
++ *    Generate some initially weak seeding values to allow
++ *    the prandom_u32() engine to be started.
++ */
++static int __init prandom_init_early(void)
++{
++      int i;
++      unsigned long v0, v1, v2, v3;
++
++      if (!arch_get_random_long(&v0))
++              v0 = jiffies;
++      if (!arch_get_random_long(&v1))
++              v1 = random_get_entropy();
++      v2 = v0 ^ PRND_K0;
++      v3 = v1 ^ PRND_K1;
++
++      for_each_possible_cpu(i) {
++              struct siprand_state *state;
++
++              v3 ^= i;
++              PRND_SIPROUND(v0, v1, v2, v3);
++              PRND_SIPROUND(v0, v1, v2, v3);
++              v0 ^= i;
++
++              state = per_cpu_ptr(&net_rand_state, i);
++              state->v0 = v0;  state->v1 = v1;
++              state->v2 = v2;  state->v3 = v3;
++      }
++
++      return 0;
++}
++core_initcall(prandom_init_early);
++
++
++/* Stronger reseeding when available, and periodically thereafter. */
++static void prandom_reseed(struct timer_list *unused);
++
++static DEFINE_TIMER(seed_timer, prandom_reseed);
++
++static void prandom_reseed(struct timer_list *unused)
++{
++      unsigned long expires;
++      int i;
++
++      /*
++       * Reinitialize each CPU's PRNG with 128 bits of key.
++       * No locking on the CPUs, but then somewhat random results are,
++       * well, expected.
++       */
++      for_each_possible_cpu(i) {
++              struct siprand_state *state;
++              unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
++              unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
++#if BITS_PER_LONG == 32
++              int j;
++
++              /*
++               * On 32-bit machines, hash in two extra words to
++               * approximate 128-bit key length.  Not that the hash
++               * has that much security, but this prevents a trivial
++               * 64-bit brute force.
++               */
++              for (j = 0; j < 2; j++) {
++                      unsigned long m = get_random_long();
++
++                      v3 ^= m;
++                      PRND_SIPROUND(v0, v1, v2, v3);
++                      PRND_SIPROUND(v0, v1, v2, v3);
++                      v0 ^= m;
++              }
++#endif
++              /*
++               * Probably impossible in practice, but there is a
++               * theoretical risk that a race between this reseeding
++               * and the target CPU writing its state back could
++               * create the all-zero SipHash fixed point.
++               *
++               * To ensure that never happens, ensure the state
++               * we write contains no zero words.
++               */
++              state = per_cpu_ptr(&net_rand_state, i);
++              WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
++              WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
++              WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
++              WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
++      }
++
++      /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
++      expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
++      mod_timer(&seed_timer, expires);
++}
++
++/*
++ * The random ready callback can be called from almost any interrupt.
++ * To avoid worrying about whether it's safe to delay that interrupt
++ * long enough to seed all CPUs, just schedule an immediate timer event.
++ */
++static void prandom_timer_start(struct random_ready_callback *unused)
++{
++      mod_timer(&seed_timer, jiffies);
++}
++
++/*
++ * Start periodic full reseeding as soon as strong
++ * random numbers are available.
++ */
++static int __init prandom_init_late(void)
++{
++      static struct random_ready_callback random_ready = {
++              .func = prandom_timer_start
++      };
++      int ret = add_random_ready_callback(&random_ready);
++
++      if (ret == -EALREADY) {
++              prandom_timer_start(&random_ready);
++              ret = 0;
++      }
++      return ret;
++}
++late_initcall(prandom_init_late);
+diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
+index 8d2e5dc9a827d..3d670d5aea344 100644
+--- a/net/ipv4/netfilter.c
++++ b/net/ipv4/netfilter.c
+@@ -17,17 +17,19 @@
+ #include <net/netfilter/nf_queue.h>
+ 
+ /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
+-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int 
addr_type)
++int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, 
unsigned int addr_type)
+ {
+       const struct iphdr *iph = ip_hdr(skb);
+       struct rtable *rt;
+       struct flowi4 fl4 = {};
+       __be32 saddr = iph->saddr;
+-      const struct sock *sk = skb_to_full_sk(skb);
+-      __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
++      __u8 flags;
+       struct net_device *dev = skb_dst(skb)->dev;
+       unsigned int hh_len;
+ 
++      sk = sk_to_full_sk(sk);
++      flags = sk ? inet_sk_flowi_flags(sk) : 0;
++
+       if (addr_type == RTN_UNSPEC)
+               addr_type = inet_addr_type_dev_table(net, dev, saddr);
+       if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
+@@ -91,8 +93,8 @@ int nf_ip_reroute(struct sk_buff *skb, const struct 
nf_queue_entry *entry)
+                     skb->mark == rt_info->mark &&
+                     iph->daddr == rt_info->daddr &&
+                     iph->saddr == rt_info->saddr))
+-                      return ip_route_me_harder(entry->state.net, skb,
+-                                                RTN_UNSPEC);
++                      return ip_route_me_harder(entry->state.net, 
entry->state.sk,
++                                                skb, RTN_UNSPEC);
+       }
+       return 0;
+ }
+diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c 
b/net/ipv4/netfilter/ipt_SYNPROXY.c
+index 690b17ef6a44a..d64b1ef43c106 100644
+--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
++++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
+@@ -54,7 +54,7 @@ synproxy_send_tcp(struct net *net,
+ 
+       skb_dst_set_noref(nskb, skb_dst(skb));
+       nskb->protocol = htons(ETH_P_IP);
+-      if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
++      if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
+               goto free_nskb;
+ 
+       if (nfct) {
+diff --git a/net/ipv4/netfilter/iptable_mangle.c 
b/net/ipv4/netfilter/iptable_mangle.c
+index dea138ca89254..0829f46ddfddf 100644
+--- a/net/ipv4/netfilter/iptable_mangle.c
++++ b/net/ipv4/netfilter/iptable_mangle.c
+@@ -65,7 +65,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct 
nf_hook_state *state)
+                   iph->daddr != daddr ||
+                   skb->mark != mark ||
+                   iph->tos != tos) {
+-                      err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
++                      err = ip_route_me_harder(state->net, state->sk, skb, 
RTN_UNSPEC);
+                       if (err < 0)
+                               ret = NF_DROP_ERR(err);
+               }
+diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c 
b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+index 6115bf1ff6f0a..6a27766b7d0ff 100644
+--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
++++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+@@ -329,7 +329,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
+ 
+               if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+                   ct->tuplehash[!dir].tuple.src.u3.ip) {
+-                      err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
++                      err = ip_route_me_harder(state->net, state->sk, skb, 
RTN_UNSPEC);
+                       if (err < 0)
+                               ret = NF_DROP_ERR(err);
+               }
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c 
b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 5cd06ba3535df..4996db1f64a15 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -129,7 +129,7 @@ void nf_send_reset(struct net *net, struct sk_buff 
*oldskb, int hook)
+                                  ip4_dst_hoplimit(skb_dst(nskb)));
+       nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
+ 
+-      if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
++      if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
+               goto free_nskb;
+ 
+       niph = ip_hdr(nskb);
+diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c 
b/net/ipv4/netfilter/nft_chain_route_ipv4.c
+index 7d82934c46f42..61003768e52bf 100644
+--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
++++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
+@@ -50,7 +50,7 @@ static unsigned int nf_route_table_hook(void *priv,
+                   iph->daddr != daddr ||
+                   skb->mark != mark ||
+                   iph->tos != tos) {
+-                      err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
++                      err = ip_route_me_harder(state->net, state->sk, skb, 
RTN_UNSPEC);
+                       if (err < 0)
+                               ret = NF_DROP_ERR(err);
+               }
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index f66b2e6d97a79..1a06850ef3cc5 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -296,7 +296,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
+       __u32 cookie = ntohl(th->ack_seq) - 1;
+       struct sock *ret = sk;
+       struct request_sock *req;
+-      int mss;
++      int full_space, mss;
+       struct rtable *rt;
+       __u8 rcv_wscale;
+       struct flowi4 fl4;
+@@ -391,8 +391,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
+ 
+       /* Try to redo what tcp_v4_send_synack did. */
+       req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, 
RTAX_WINDOW);
++      /* limit the window selection if the user enforce a smaller rx buffer */
++      full_space = tcp_full_space(sk);
++      if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
++          (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
++              req->rsk_window_clamp = full_space;
+ 
+-      tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
++      tcp_select_initial_window(sk, full_space, req->mss,
+                                 &req->rsk_rcv_wnd, &req->rsk_window_clamp,
+                                 ireq->wscale_ok, &rcv_wscale,
+                                 dst_metric(&rt->dst, RTAX_INITRWND));
+diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
+index 6d0b1f3e927bd..5679fa3f696ad 100644
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -17,10 +17,10 @@
+ #include <net/xfrm.h>
+ #include <net/netfilter/nf_queue.h>
+ 
+-int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
++int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct 
sk_buff *skb)
+ {
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
+-      struct sock *sk = sk_to_full_sk(skb->sk);
++      struct sock *sk = sk_to_full_sk(sk_partial);
+       unsigned int hh_len;
+       struct dst_entry *dst;
+       int strict = (ipv6_addr_type(&iph->daddr) &
+@@ -81,7 +81,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
+               if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
+                   !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
+                   skb->mark != rt_info->mark)
+-                      return ip6_route_me_harder(entry->state.net, skb);
++                      return ip6_route_me_harder(entry->state.net, 
entry->state.sk, skb);
+       }
+       return 0;
+ }
+diff --git a/net/ipv6/netfilter/ip6table_mangle.c 
b/net/ipv6/netfilter/ip6table_mangle.c
+index b0524b18c4fb3..acba3757ff605 100644
+--- a/net/ipv6/netfilter/ip6table_mangle.c
++++ b/net/ipv6/netfilter/ip6table_mangle.c
+@@ -60,7 +60,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct 
nf_hook_state *state)
+            skb->mark != mark ||
+            ipv6_hdr(skb)->hop_limit != hop_limit ||
+            flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
+-              err = ip6_route_me_harder(state->net, skb);
++              err = ip6_route_me_harder(state->net, state->sk, skb);
+               if (err < 0)
+                       ret = NF_DROP_ERR(err);
+       }
+diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c 
b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+index ca6d38698b1ad..2b6a3b27f6704 100644
+--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+@@ -352,7 +352,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
+ 
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+                                     &ct->tuplehash[!dir].tuple.src.u3)) {
+-                      err = ip6_route_me_harder(state->net, skb);
++                      err = ip6_route_me_harder(state->net, state->sk, skb);
+                       if (err < 0)
+                               ret = NF_DROP_ERR(err);
+               }
+diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c 
b/net/ipv6/netfilter/nft_chain_route_ipv6.c
+index da3f1f8cb325c..afe79cb46e630 100644
+--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
++++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
+@@ -52,7 +52,7 @@ static unsigned int nf_route_table_hook(void *priv,
+            skb->mark != mark ||
+            ipv6_hdr(skb)->hop_limit != hop_limit ||
+            flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
+-              err = ip6_route_me_harder(state->net, skb);
++              err = ip6_route_me_harder(state->net, state->sk, skb);
+               if (err < 0)
+                       ret = NF_DROP_ERR(err);
+       }
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index bfed7508ba19a..98c108baf35e2 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1087,7 +1087,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+       if (tdev && !netif_is_l3_master(tdev)) {
+               int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ 
+-              dev->hard_header_len = tdev->hard_header_len + sizeof(struct 
iphdr);
+               dev->mtu = tdev->mtu - t_hlen;
+               if (dev->mtu < IPV6_MIN_MTU)
+                       dev->mtu = IPV6_MIN_MTU;
+@@ -1377,7 +1376,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
+       dev->priv_destructor    = ipip6_dev_free;
+ 
+       dev->type               = ARPHRD_SIT;
+-      dev->hard_header_len    = LL_MAX_HEADER + t_hlen;
+       dev->mtu                = ETH_DATA_LEN - t_hlen;
+       dev->min_mtu            = IPV6_MIN_MTU;
+       dev->max_mtu            = IP6_MAX_MTU - t_hlen;
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index a377be8a9fb44..ec61b67a92be0 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -141,7 +141,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
+       __u32 cookie = ntohl(th->ack_seq) - 1;
+       struct sock *ret = sk;
+       struct request_sock *req;
+-      int mss;
++      int full_space, mss;
+       struct dst_entry *dst;
+       __u8 rcv_wscale;
+       u32 tsoff = 0;
+@@ -246,7 +246,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
+       }
+ 
+       req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, 
RTAX_WINDOW);
+-      tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
++      /* limit the window selection if the user enforce a smaller rx buffer */
++      full_space = tcp_full_space(sk);
++      if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
++          (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
++              req->rsk_window_clamp = full_space;
++
++      tcp_select_initial_window(sk, full_space, req->mss,
+                                 &req->rsk_rcv_wnd, &req->rsk_window_clamp,
+                                 ireq->wscale_ok, &rcv_wscale,
+                                 dst_metric(dst, RTAX_INITRWND));
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 23a1002ed86dd..ad3e515f91f0f 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1571,7 +1571,8 @@ static int iucv_sock_shutdown(struct socket *sock, int 
how)
+               break;
+       }
+ 
+-      if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
++      if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
++          sk->sk_state == IUCV_CONNECTED) {
+               if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+                       txmsg.class = 0;
+                       txmsg.tag = 0;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 3160ffd93a153..98d048630ad2f 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1908,19 +1908,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data 
*sdata,
+ 
+ /* device xmit handlers */
+ 
++enum ieee80211_encrypt {
++      ENCRYPT_NO,
++      ENCRYPT_MGMT,
++      ENCRYPT_DATA,
++};
++
+ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
+                               struct sk_buff *skb,
+-                              int head_need, bool may_encrypt)
++                              int head_need,
++                              enum ieee80211_encrypt encrypt)
+ {
+       struct ieee80211_local *local = sdata->local;
+-      struct ieee80211_hdr *hdr;
+       bool enc_tailroom;
+       int tail_need = 0;
+ 
+-      hdr = (struct ieee80211_hdr *) skb->data;
+-      enc_tailroom = may_encrypt &&
+-                     (sdata->crypto_tx_tailroom_needed_cnt ||
+-                      ieee80211_is_mgmt(hdr->frame_control));
++      enc_tailroom = encrypt == ENCRYPT_MGMT ||
++                     (encrypt == ENCRYPT_DATA &&
++                      sdata->crypto_tx_tailroom_needed_cnt);
+ 
+       if (enc_tailroom) {
+               tail_need = IEEE80211_ENCRYPT_TAILROOM;
+@@ -1952,23 +1957,29 @@ void ieee80211_xmit(struct ieee80211_sub_if_data 
*sdata,
+ {
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+-      struct ieee80211_hdr *hdr;
++      struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       int headroom;
+-      bool may_encrypt;
++      enum ieee80211_encrypt encrypt;
+ 
+-      may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
++      if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
++              encrypt = ENCRYPT_NO;
++      else if (ieee80211_is_mgmt(hdr->frame_control))
++              encrypt = ENCRYPT_MGMT;
++      else
++              encrypt = ENCRYPT_DATA;
+ 
+       headroom = local->tx_headroom;
+-      if (may_encrypt)
++      if (encrypt != ENCRYPT_NO)
+               headroom += sdata->encrypt_headroom;
+       headroom -= skb_headroom(skb);
+       headroom = max_t(int, 0, headroom);
+ 
+-      if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
++      if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
+               ieee80211_free_txskb(&local->hw, skb);
+               return;
+       }
+ 
++      /* reload after potential resize */
+       hdr = (struct ieee80211_hdr *) skb->data;
+       info->control.vif = &sdata->vif;
+ 
+@@ -2751,7 +2762,7 @@ static struct sk_buff *ieee80211_build_hdr(struct 
ieee80211_sub_if_data *sdata,
+               head_need += sdata->encrypt_headroom;
+               head_need += local->tx_headroom;
+               head_need = max_t(int, 0, head_need);
+-              if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
++              if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
+                       ieee80211_free_txskb(&local->hw, skb);
+                       skb = NULL;
+                       return ERR_PTR(-ENOMEM);
+@@ -3414,7 +3425,7 @@ static bool ieee80211_xmit_fast(struct 
ieee80211_sub_if_data *sdata,
+       if (unlikely(ieee80211_skb_resize(sdata, skb,
+                                         max_t(int, extra_head + hw_headroom -
+                                                    skb_headroom(skb), 0),
+-                                        false))) {
++                                        ENCRYPT_NO))) {
+               kfree_skb(skb);
+               return true;
+       }
+diff --git a/net/netfilter/ipset/ip_set_core.c 
b/net/netfilter/ipset/ip_set_core.c
+index 36ebc40a4313c..0427e66bc4786 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -488,13 +488,14 @@ ip_set_match_extensions(struct ip_set *set, const struct 
ip_set_ext *ext,
+       if (SET_WITH_COUNTER(set)) {
+               struct ip_set_counter *counter = ext_counter(data, set);
+ 
++              ip_set_update_counter(counter, ext, flags);
++
+               if (flags & IPSET_FLAG_MATCH_COUNTERS &&
+                   !(ip_set_match_counter(ip_set_get_packets(counter),
+                               mext->packets, mext->packets_op) &&
+                     ip_set_match_counter(ip_set_get_bytes(counter),
+                               mext->bytes, mext->bytes_op)))
+                       return false;
+-              ip_set_update_counter(counter, ext, flags);
+       }
+       if (SET_WITH_SKBINFO(set))
+               ip_set_get_skbinfo(ext_skbinfo(data, set),
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index d5e4329579e28..acaeeaf814415 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -725,12 +725,12 @@ static int ip_vs_route_me_harder(struct netns_ipvs 
*ipvs, int af,
+               struct dst_entry *dst = skb_dst(skb);
+ 
+               if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
+-                  ip6_route_me_harder(ipvs->net, skb) != 0)
++                  ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0)
+                       return 1;
+       } else
+ #endif
+               if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+-                  ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
++                  ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0)
+                       return 1;
+ 
+       return 0;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index bd96fd261dba3..4e15913e7519e 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -1116,10 +1116,13 @@ static void dev_deactivate_queue(struct net_device 
*dev,
+                                void *_qdisc_default)
+ {
+       struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc);
++      struct Qdisc *qdisc_default = _qdisc_default;
+ 
+       if (qdisc) {
+               if (!(qdisc->flags & TCQ_F_BUILTIN))
+                       set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
++
++              rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+       }
+ }
+ 
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index ec9a7137d2677..1c4733153d749 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -671,12 +671,18 @@ static int tipc_topsrv_start(struct net *net)
+ 
+       ret = tipc_topsrv_work_start(srv);
+       if (ret < 0)
+-              return ret;
++              goto err_start;
+ 
+       ret = tipc_topsrv_create_listener(srv);
+       if (ret < 0)
+-              tipc_topsrv_work_stop(srv);
++              goto err_create;
+ 
++      return 0;
++
++err_create:
++      tipc_topsrv_work_stop(srv);
++err_start:
++      kfree(srv);
+       return ret;
+ }
+ 
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 935aebf150107..c7825b951f725 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -3374,7 +3374,7 @@ static void print_rd_rules(const struct 
ieee80211_regdomain *rd)
+               power_rule = &reg_rule->power_rule;
+ 
+               if (reg_rule->flags & NL80211_RRF_AUTO_BW)
+-                      snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
++                      snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO",
+                                freq_range->max_bandwidth_khz,
+                                reg_get_max_bandwidth(rd, reg_rule));
+               else
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index bd1cbbfe5924b..372f4194db5a0 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -824,7 +824,7 @@ static int x25_connect(struct socket *sock, struct 
sockaddr *uaddr,
+       sock->state = SS_CONNECTED;
+       rc = 0;
+ out_put_neigh:
+-      if (rc) {
++      if (rc && x25->neighbour) {
+               read_lock_bh(&x25_list_lock);
+               x25_neigh_put(x25->neighbour);
+               x25->neighbour = NULL;
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index a649d7c2f48ca..84dea0ad16661 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1825,6 +1825,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 
high)
+       int err = -ENOENT;
+       __be32 minspi = htonl(low);
+       __be32 maxspi = htonl(high);
++      __be32 newspi = 0;
+       u32 mark = x->mark.v & x->mark.m;
+ 
+       spin_lock_bh(&x->lock);
+@@ -1843,21 +1844,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 
high)
+                       xfrm_state_put(x0);
+                       goto unlock;
+               }
+-              x->id.spi = minspi;
++              newspi = minspi;
+       } else {
+               u32 spi = 0;
+               for (h = 0; h < high-low+1; h++) {
+                       spi = low + prandom_u32()%(high-low+1);
+                       x0 = xfrm_state_lookup(net, mark, &x->id.daddr, 
htonl(spi), x->id.proto, x->props.family);
+                       if (x0 == NULL) {
+-                              x->id.spi = htonl(spi);
++                              newspi = htonl(spi);
+                               break;
+                       }
+                       xfrm_state_put(x0);
+               }
+       }
+-      if (x->id.spi) {
++      if (newspi) {
+               spin_lock_bh(&net->xfrm.xfrm_state_lock);
++              x->id.spi = newspi;
+               h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, 
x->props.family);
+               hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
+               spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
+index 0a4b89d482977..cb05ae28ce009 100644
+--- a/security/selinux/ibpkey.c
++++ b/security/selinux/ibpkey.c
+@@ -161,8 +161,10 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 
pkey_num, u32 *sid)
+        * is valid, it just won't be added to the cache.
+        */
+       new = kzalloc(sizeof(*new), GFP_ATOMIC);
+-      if (!new)
++      if (!new) {
++              ret = -ENOMEM;
+               goto out;
++      }
+ 
+       new->psec.subnet_prefix = subnet_prefix;
+       new->psec.pkey = pkey_num;
+diff --git a/sound/hda/ext/hdac_ext_controller.c 
b/sound/hda/ext/hdac_ext_controller.c
+index 84b44cdae28a1..b96abebcfd1a4 100644
+--- a/sound/hda/ext/hdac_ext_controller.c
++++ b/sound/hda/ext/hdac_ext_controller.c
+@@ -156,6 +156,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct 
hdac_bus *bus,
+               return NULL;
+       if (bus->idx != bus_idx)
+               return NULL;
++      if (addr < 0 || addr > 31)
++              return NULL;
+ 
+       list_for_each_entry(hlink, &bus->hlink_list, list) {
+               for (i = 0; i < HDA_MAX_CODECS; i++) {
+diff --git a/tools/perf/util/scripting-engines/trace-event-python.c 
b/tools/perf/util/scripting-engines/trace-event-python.c
+index 9569cc06e0a73..2814251df06b4 100644
+--- a/tools/perf/util/scripting-engines/trace-event-python.c
++++ b/tools/perf/util/scripting-engines/trace-event-python.c
+@@ -1493,7 +1493,6 @@ static void _free_command_line(wchar_t **command_line, 
int num)
+ static int python_start_script(const char *script, int argc, const char 
**argv)
+ {
+       struct tables *tables = &tables_global;
+-      PyMODINIT_FUNC (*initfunc)(void);
+ #if PY_MAJOR_VERSION < 3
+       const char **command_line;
+ #else
+@@ -1508,20 +1507,18 @@ static int python_start_script(const char *script, int 
argc, const char **argv)
+       FILE *fp;
+ 
+ #if PY_MAJOR_VERSION < 3
+-      initfunc = initperf_trace_context;
+       command_line = malloc((argc + 1) * sizeof(const char *));
+       command_line[0] = script;
+       for (i = 1; i < argc + 1; i++)
+               command_line[i] = argv[i - 1];
++      PyImport_AppendInittab(name, initperf_trace_context);
+ #else
+-      initfunc = PyInit_perf_trace_context;
+       command_line = malloc((argc + 1) * sizeof(wchar_t *));
+       command_line[0] = Py_DecodeLocale(script, NULL);
+       for (i = 1; i < argc + 1; i++)
+               command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
++      PyImport_AppendInittab(name, PyInit_perf_trace_context);
+ #endif
+-
+-      PyImport_AppendInittab(name, initfunc);
+       Py_Initialize();
+ 
+ #if PY_MAJOR_VERSION < 3
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index f016d1b330e54..6a2037b52098b 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -488,6 +488,7 @@ static void perf_event__mmap2_swap(union perf_event *event,
+       event->mmap2.maj   = bswap_32(event->mmap2.maj);
+       event->mmap2.min   = bswap_32(event->mmap2.min);
+       event->mmap2.ino   = bswap_64(event->mmap2.ino);
++      event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
+ 
+       if (sample_id_all) {
+               void *data = &event->mmap2.filename;
+diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c 
b/tools/testing/selftests/proc/proc-loadavg-001.c
+index fcff7047000da..8edaafc2b92fd 100644
+--- a/tools/testing/selftests/proc/proc-loadavg-001.c
++++ b/tools/testing/selftests/proc/proc-loadavg-001.c
+@@ -14,7 +14,6 @@
+  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+  */
+ /* Test that /proc/loadavg correctly reports last pid in pid namespace. */
+-#define _GNU_SOURCE
+ #include <errno.h>
+ #include <sched.h>
+ #include <sys/types.h>
+diff --git a/tools/testing/selftests/proc/proc-self-syscall.c 
b/tools/testing/selftests/proc/proc-self-syscall.c
+index 5ab5f4810e43a..7b9018fad092a 100644
+--- a/tools/testing/selftests/proc/proc-self-syscall.c
++++ b/tools/testing/selftests/proc/proc-self-syscall.c
+@@ -13,7 +13,6 @@
+  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+  */
+-#define _GNU_SOURCE
+ #include <unistd.h>
+ #include <sys/syscall.h>
+ #include <sys/types.h>
+diff --git a/tools/testing/selftests/proc/proc-uptime-002.c 
b/tools/testing/selftests/proc/proc-uptime-002.c
+index 30e2b78490898..e7ceabed7f51f 100644
+--- a/tools/testing/selftests/proc/proc-uptime-002.c
++++ b/tools/testing/selftests/proc/proc-uptime-002.c
+@@ -15,7 +15,6 @@
+  */
+ // Test that values in /proc/uptime increment monotonically
+ // while shifting across CPUs.
+-#define _GNU_SOURCE
+ #undef NDEBUG
+ #include <assert.h>
+ #include <unistd.h>

Reply via email to