commit:     b9d80258c889404645a24e42c8b16c4be14eff1c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 11 12:35:18 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Aug 11 12:35:18 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b9d80258

Linux patch 5.4.210

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |   4 +
 1209_linux-5.4.210.patch | 950 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 954 insertions(+)

diff --git a/0000_README b/0000_README
index b02651d3..af9d9a39 100644
--- a/0000_README
+++ b/0000_README
@@ -879,6 +879,10 @@ Patch:  1208_linux-5.4.209.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.4.209
 
+Patch:  1209_linux-5.4.210.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.4.210
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1209_linux-5.4.210.patch b/1209_linux-5.4.210.patch
new file mode 100644
index 00000000..518f8030
--- /dev/null
+++ b/1209_linux-5.4.210.patch
@@ -0,0 +1,950 @@
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst 
b/Documentation/admin-guide/hw-vuln/spectre.rst
+index 6bd97cd50d625..7e061ed449aaa 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -422,6 +422,14 @@ The possible values in this file are:
+   'RSB filling'   Protection of RSB on context switch enabled
+   =============   ===========================================
+ 
++  - EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:
++
++  ===========================  
=======================================================
++  'PBRSB-eIBRS: SW sequence'   CPU is affected and protection of RSB on 
VMEXIT enabled
++  'PBRSB-eIBRS: Vulnerable'    CPU is vulnerable
++  'PBRSB-eIBRS: Not affected'  CPU is not affected by PBRSB
++  ===========================  
=======================================================
++
+ Full mitigation might require a microcode update from the CPU
+ vendor. When the necessary microcode is not available, the kernel will
+ report vulnerability.
+diff --git a/Makefile b/Makefile
+index 7093e3b03b9f7..74abb7e389f33 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 209
++SUBLEVEL = 210
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index 8c28a2365a92b..a3e32bc938562 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -286,6 +286,7 @@
+ #define X86_FEATURE_CQM_MBM_LOCAL     (11*32+ 3) /* LLC Local MBM monitoring 
*/
+ #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry 
SWAPGS path */
+ #define X86_FEATURE_FENCE_SWAPGS_KERNEL       (11*32+ 5) /* "" LFENCE in 
kernel entry SWAPGS path */
++#define X86_FEATURE_RSB_VMEXIT_LITE   (11*32+ 6) /* "" Fill RSB on VM exit 
when EIBRS is enabled */
+ 
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+ #define X86_FEATURE_AVX512_BF16               (12*32+ 5) /* AVX512 BFLOAT16 
instructions */
+@@ -406,5 +407,6 @@
+ #define X86_BUG_ITLB_MULTIHIT         X86_BUG(23) /* CPU may incur MCE during 
certain page attribute changes */
+ #define X86_BUG_SRBDS                 X86_BUG(24) /* CPU may leak RNG bits if 
not mitigated */
+ #define X86_BUG_MMIO_STALE_DATA               X86_BUG(25) /* CPU is affected 
by Processor MMIO Stale Data vulnerabilities */
++#define X86_BUG_EIBRS_PBRSB           X86_BUG(26) /* EIBRS is vulnerable to 
Post Barrier RSB Predictions */
+ 
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/msr-index.h 
b/arch/x86/include/asm/msr-index.h
+index c56042916a7c3..cef4eba03ff36 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -129,6 +129,10 @@
+                                                * bit available to control VERW
+                                                * behavior.
+                                                */
++#define ARCH_CAP_PBRSB_NO             BIT(24) /*
++                                               * Not susceptible to 
Post-Barrier
++                                               * Return Stack Buffer 
Predictions.
++                                               */
+ 
+ #define MSR_IA32_FLUSH_CMD            0x0000010b
+ #define L1D_FLUSH                     BIT(0)  /*
+diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
+index ece2b2c6d020d..1e5df3ccdd5cb 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -61,7 +61,16 @@
+ 774:                                          \
+       dec     reg;                            \
+       jnz     771b;                           \
+-      add     $(BITS_PER_LONG/8) * nr, sp;
++      add     $(BITS_PER_LONG/8) * nr, sp;    \
++      /* barrier for jnz misprediction */     \
++      lfence;
++
++#define __ISSUE_UNBALANCED_RET_GUARD(sp)      \
++      call    881f;                           \
++      int3;                                   \
++881:                                          \
++      add     $(BITS_PER_LONG/8), sp;         \
++      lfence;
+ 
+ #ifdef __ASSEMBLY__
+ 
+@@ -130,6 +139,14 @@
+ #else
+       call    *\reg
+ #endif
++.endm
++
++.macro ISSUE_UNBALANCED_RET_GUARD ftr:req
++      ANNOTATE_NOSPEC_ALTERNATIVE
++      ALTERNATIVE "jmp .Lskip_pbrsb_\@",                              \
++              __stringify(__ISSUE_UNBALANCED_RET_GUARD(%_ASM_SP))     \
++              \ftr
++.Lskip_pbrsb_\@:
+ .endm
+ 
+  /*
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 09d02b1f6f71f..57efa90f3fbd0 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1043,6 +1043,49 @@ static enum spectre_v2_mitigation __init 
spectre_v2_select_retpoline(void)
+       return SPECTRE_V2_RETPOLINE;
+ }
+ 
++static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum 
spectre_v2_mitigation mode)
++{
++      /*
++       * Similar to context switches, there are two types of RSB attacks
++       * after VM exit:
++       *
++       * 1) RSB underflow
++       *
++       * 2) Poisoned RSB entry
++       *
++       * When retpoline is enabled, both are mitigated by filling/clearing
++       * the RSB.
++       *
++       * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
++       * prediction isolation protections, RSB still needs to be cleared
++       * because of #2.  Note that SMEP provides no protection here, unlike
++       * user-space-poisoned RSB entries.
++       *
++       * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
++       * bug is present then a LITE version of RSB protection is required,
++       * just a single call needs to retire before a RET is executed.
++       */
++      switch (mode) {
++      case SPECTRE_V2_NONE:
++      /* These modes already fill RSB at vmexit */
++      case SPECTRE_V2_LFENCE:
++      case SPECTRE_V2_RETPOLINE:
++      case SPECTRE_V2_EIBRS_RETPOLINE:
++              return;
++
++      case SPECTRE_V2_EIBRS_LFENCE:
++      case SPECTRE_V2_EIBRS:
++              if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
++                      setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
++                      pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL 
on VMEXIT\n");
++              }
++              return;
++      }
++
++      pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM 
exit");
++      dump_stack();
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+       enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -1135,6 +1178,8 @@ static void __init spectre_v2_select_mitigation(void)
+       setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+       pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context 
switch\n");
+ 
++      spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
++
+       /*
+        * Retpoline means the kernel is safe because it has no indirect
+        * branches. Enhanced IBRS protects firmware too, so, enable restricted
+@@ -1879,6 +1924,19 @@ static char *ibpb_state(void)
+       return "";
+ }
+ 
++static char *pbrsb_eibrs_state(void)
++{
++      if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
++              if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
++                  boot_cpu_has(X86_FEATURE_RETPOLINE))
++                      return ", PBRSB-eIBRS: SW sequence";
++              else
++                      return ", PBRSB-eIBRS: Vulnerable";
++      } else {
++              return ", PBRSB-eIBRS: Not affected";
++      }
++}
++
+ static ssize_t spectre_v2_show_state(char *buf)
+ {
+       if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+@@ -1891,12 +1949,13 @@ static ssize_t spectre_v2_show_state(char *buf)
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+               return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged 
eBPF and SMT\n");
+ 
+-      return sprintf(buf, "%s%s%s%s%s%s\n",
++      return sprintf(buf, "%s%s%s%s%s%s%s\n",
+                      spectre_v2_strings[spectre_v2_enabled],
+                      ibpb_state(),
+                      boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+                      stibp_state(),
+                      boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : 
"",
++                     pbrsb_eibrs_state(),
+                      spectre_v2_module_string());
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 305f30e45f3d3..b926b7244d42d 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1025,6 +1025,7 @@ static void identify_cpu_without_cpuid(struct 
cpuinfo_x86 *c)
+ #define NO_SWAPGS             BIT(6)
+ #define NO_ITLB_MULTIHIT      BIT(7)
+ #define NO_SPECTRE_V2         BIT(8)
++#define NO_EIBRS_PBRSB                BIT(9)
+ 
+ #define VULNWL(_vendor, _family, _model, _whitelist)  \
+       { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+@@ -1065,7 +1066,7 @@ static const __initconst struct x86_cpu_id 
cpu_vuln_whitelist[] = {
+ 
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | 
NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS | 
NO_ITLB_MULTIHIT),
+-      VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | 
NO_ITLB_MULTIHIT),
++      VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | 
NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
+ 
+       /*
+        * Technically, swapgs isn't serializing on AMD (despite it previously
+@@ -1075,7 +1076,9 @@ static const __initconst struct x86_cpu_id 
cpu_vuln_whitelist[] = {
+        * good enough for our purposes.
+        */
+ 
+-      VULNWL_INTEL(ATOM_TREMONT_D,            NO_ITLB_MULTIHIT),
++      VULNWL_INTEL(ATOM_TREMONT,              NO_EIBRS_PBRSB),
++      VULNWL_INTEL(ATOM_TREMONT_L,            NO_EIBRS_PBRSB),
++      VULNWL_INTEL(ATOM_TREMONT_D,            NO_ITLB_MULTIHIT | 
NO_EIBRS_PBRSB),
+ 
+       /* AMD Family 0xf - 0x12 */
+       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | 
NO_SWAPGS | NO_ITLB_MULTIHIT),
+@@ -1236,6 +1239,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 
*c)
+           !arch_cap_mmio_immune(ia32_cap))
+               setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ 
++      if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
++          !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
++          !(ia32_cap & ARCH_CAP_PBRSB_NO))
++              setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
++
+       if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+               return;
+ 
+diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
+index ca4252f81bf81..946d9205c3b6d 100644
+--- a/arch/x86/kvm/vmx/vmenter.S
++++ b/arch/x86/kvm/vmx/vmenter.S
+@@ -92,6 +92,7 @@ ENTRY(vmx_vmexit)
+       pop %_ASM_AX
+ .Lvmexit_skip_rsb:
+ #endif
++      ISSUE_UNBALANCED_RET_GUARD X86_FEATURE_RSB_VMEXIT_LITE
+       ret
+ ENDPROC(vmx_vmexit)
+ 
+diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
+index 76b7539a37a93..a06f35528c9a7 100644
+--- a/drivers/acpi/apei/bert.c
++++ b/drivers/acpi/apei/bert.c
+@@ -29,16 +29,26 @@
+ 
+ #undef pr_fmt
+ #define pr_fmt(fmt) "BERT: " fmt
++
++#define ACPI_BERT_PRINT_MAX_RECORDS 5
+ #define ACPI_BERT_PRINT_MAX_LEN 1024
+ 
+ static int bert_disable;
+ 
++/*
++ * Print "all" the error records in the BERT table, but avoid huge spam to
++ * the console if the BIOS included oversize records, or too many records.
++ * Skipping some records here does not lose anything because the full
++ * data is available to user tools in:
++ *    /sys/firmware/acpi/tables/data/BERT
++ */
+ static void __init bert_print_all(struct acpi_bert_region *region,
+                                 unsigned int region_len)
+ {
+       struct acpi_hest_generic_status *estatus =
+               (struct acpi_hest_generic_status *)region;
+       int remain = region_len;
++      int printed = 0, skipped = 0;
+       u32 estatus_len;
+ 
+       while (remain >= sizeof(struct acpi_bert_region)) {
+@@ -46,24 +56,26 @@ static void __init bert_print_all(struct acpi_bert_region 
*region,
+               if (remain < estatus_len) {
+                       pr_err(FW_BUG "Truncated status block (length: %u).\n",
+                              estatus_len);
+-                      return;
++                      break;
+               }
+ 
+               /* No more error records. */
+               if (!estatus->block_status)
+-                      return;
++                      break;
+ 
+               if (cper_estatus_check(estatus)) {
+                       pr_err(FW_BUG "Invalid error record.\n");
+-                      return;
++                      break;
+               }
+ 
+-              pr_info_once("Error records from previous boot:\n");
+-              if (region_len < ACPI_BERT_PRINT_MAX_LEN)
++              if (estatus_len < ACPI_BERT_PRINT_MAX_LEN &&
++                  printed < ACPI_BERT_PRINT_MAX_RECORDS) {
++                      pr_info_once("Error records from previous boot:\n");
+                       cper_estatus_print(KERN_INFO HW_ERR, estatus);
+-              else
+-                      pr_info_once("Max print length exceeded, table data is 
available at:\n"
+-                                   "/sys/firmware/acpi/tables/data/BERT");
++                      printed++;
++              } else {
++                      skipped++;
++              }
+ 
+               /*
+                * Because the boot error source is "one-time polled" type,
+@@ -75,6 +87,9 @@ static void __init bert_print_all(struct acpi_bert_region 
*region,
+               estatus = (void *)estatus + estatus_len;
+               remain -= estatus_len;
+       }
++
++      if (skipped)
++              pr_info(HW_ERR "Skipped %d error records\n", skipped);
+ }
+ 
+ static int __init setup_bert_disable(char *str)
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index de4142723ff48..3b972ca536896 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -387,7 +387,6 @@ static const struct dmi_system_id video_detect_dmi_table[] 
= {
+       .callback = video_detect_force_native,
+       .ident = "Clevo NL5xRU",
+       .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+               DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+               },
+       },
+@@ -395,59 +394,75 @@ static const struct dmi_system_id 
video_detect_dmi_table[] = {
+       .callback = video_detect_force_native,
+       .ident = "Clevo NL5xRU",
+       .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+-              DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
++              DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
++              DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
+               },
+       },
+       {
+       .callback = video_detect_force_native,
+       .ident = "Clevo NL5xRU",
+       .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+-              DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
++              DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
++              DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
+               },
+       },
+       {
+       .callback = video_detect_force_native,
+-      .ident = "Clevo NL5xRU",
++      .ident = "Clevo NL5xNU",
+       .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+-              DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
++              DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+               },
+       },
++      /*
++       * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 
Gen10,
++       * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the 
Clevo
++       * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description
++       * above.
++       */
+       {
+       .callback = video_detect_force_native,
+-      .ident = "Clevo NL5xRU",
++      .ident = "TongFang PF5PU1G",
+       .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+-              DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
++              DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
+               },
+       },
+       {
+       .callback = video_detect_force_native,
+-      .ident = "Clevo NL5xNU",
++      .ident = "TongFang PF4NU1F",
++      .matches = {
++              DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"),
++              },
++      },
++      {
++      .callback = video_detect_force_native,
++      .ident = "TongFang PF4NU1F",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+-              DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
++              DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"),
+               },
+       },
+       {
+       .callback = video_detect_force_native,
+-      .ident = "Clevo NL5xNU",
++      .ident = "TongFang PF5NU1G",
+       .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+-              DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
++              DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"),
+               },
+       },
+       {
+       .callback = video_detect_force_native,
+-      .ident = "Clevo NL5xNU",
++      .ident = "TongFang PF5NU1G",
+       .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+-              DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
++              DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
++              DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"),
++              },
++      },
++      {
++      .callback = video_detect_force_native,
++      .ident = "TongFang PF5LUXG",
++      .matches = {
++              DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
+               },
+       },
+-
+       /*
+        * Desktops which falsely report a backlight and which our heuristics
+        * for this do not catch.
+diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
+index e49d1f287a175..c37d5fce86f79 100644
+--- a/drivers/macintosh/adb.c
++++ b/drivers/macintosh/adb.c
+@@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req)
+ 
+       switch(req->data[1]) {
+       case ADB_QUERY_GETDEVINFO:
+-              if (req->nbytes < 3)
++              if (req->nbytes < 3 || req->data[2] >= 16)
+                       break;
+               mutex_lock(&adb_handler_mutex);
+               req->reply[0] = adb_handler[req->data[2]].original_address;
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c 
b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index 639dc8d45e603..d56837c04a81a 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -460,19 +460,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct 
v4l2_m2m_ctx *m2m_ctx,
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
+ 
+-int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+-                    struct v4l2_buffer *buf)
++static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
++                                     struct v4l2_buffer *buf)
+ {
+-      struct vb2_queue *vq;
+-      int ret = 0;
+-      unsigned int i;
+-
+-      vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+-      ret = vb2_querybuf(vq, buf);
+-
+       /* Adjust MMAP memory offsets for the CAPTURE queue */
+       if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
+               if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
++                      unsigned int i;
++
+                       for (i = 0; i < buf->length; ++i)
+                               buf->m.planes[i].m.mem_offset
+                                       += DST_QUEUE_OFF_BASE;
+@@ -480,8 +475,23 @@ int v4l2_m2m_querybuf(struct file *file, struct 
v4l2_m2m_ctx *m2m_ctx,
+                       buf->m.offset += DST_QUEUE_OFF_BASE;
+               }
+       }
++}
+ 
+-      return ret;
++int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
++                    struct v4l2_buffer *buf)
++{
++      struct vb2_queue *vq;
++      int ret;
++
++      vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
++      ret = vb2_querybuf(vq, buf);
++      if (ret)
++              return ret;
++
++      /* Adjust MMAP memory offsets for the CAPTURE queue */
++      v4l2_m2m_adjust_mem_offset(vq, buf);
++
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
+ 
+@@ -500,10 +510,16 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx 
*m2m_ctx,
+               return -EPERM;
+       }
+       ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
+-      if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
++      if (ret)
++              return ret;
++
++      /* Adjust MMAP memory offsets for the CAPTURE queue */
++      v4l2_m2m_adjust_mem_offset(vq, buf);
++
++      if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
+               v4l2_m2m_try_schedule(m2m_ctx);
+ 
+-      return ret;
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
+ 
+@@ -511,9 +527,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx 
*m2m_ctx,
+                  struct v4l2_buffer *buf)
+ {
+       struct vb2_queue *vq;
++      int ret;
+ 
+       vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+-      return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
++      ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
++      if (ret)
++              return ret;
++
++      /* Adjust MMAP memory offsets for the CAPTURE queue */
++      v4l2_m2m_adjust_mem_offset(vq, buf);
++
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
+ 
+@@ -522,9 +546,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct 
v4l2_m2m_ctx *m2m_ctx,
+ {
+       struct video_device *vdev = video_devdata(file);
+       struct vb2_queue *vq;
++      int ret;
+ 
+       vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+-      return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
++      ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
++      if (ret)
++              return ret;
++
++      /* Adjust MMAP memory offsets for the CAPTURE queue */
++      v4l2_m2m_adjust_mem_offset(vq, buf);
++
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
+ 
+diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
+index 68d0c181ec7bb..1f38da5da6e45 100644
+--- a/drivers/thermal/of-thermal.c
++++ b/drivers/thermal/of-thermal.c
+@@ -91,7 +91,7 @@ static int of_thermal_get_temp(struct thermal_zone_device 
*tz,
+ {
+       struct __thermal_zone *data = tz->devdata;
+ 
+-      if (!data->ops->get_temp)
++      if (!data->ops || !data->ops->get_temp)
+               return -EINVAL;
+ 
+       return data->ops->get_temp(data->sensor_data, temp);
+@@ -188,6 +188,9 @@ static int of_thermal_set_emul_temp(struct 
thermal_zone_device *tz,
+ {
+       struct __thermal_zone *data = tz->devdata;
+ 
++      if (!data->ops || !data->ops->set_emul_temp)
++              return -EINVAL;
++
+       return data->ops->set_emul_temp(data->sensor_data, temp);
+ }
+ 
+@@ -196,7 +199,7 @@ static int of_thermal_get_trend(struct thermal_zone_device 
*tz, int trip,
+ {
+       struct __thermal_zone *data = tz->devdata;
+ 
+-      if (!data->ops->get_trend)
++      if (!data->ops || !data->ops->get_trend)
+               return -EINVAL;
+ 
+       return data->ops->get_trend(data->sensor_data, trip, trend);
+@@ -336,7 +339,7 @@ static int of_thermal_set_trip_temp(struct 
thermal_zone_device *tz, int trip,
+       if (trip >= data->ntrips || trip < 0)
+               return -EDOM;
+ 
+-      if (data->ops->set_trip_temp) {
++      if (data->ops && data->ops->set_trip_temp) {
+               int ret;
+ 
+               ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 34262d83dce11..f705d3752fe0d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5083,6 +5083,7 @@ static int adjust_scalar_min_max_vals(struct 
bpf_verifier_env *env,
+               coerce_reg_to_size(dst_reg, 4);
+       }
+ 
++      __update_reg_bounds(dst_reg);
+       __reg_deduce_bounds(dst_reg);
+       __reg_bound_offset(dst_reg);
+       return 0;
+diff --git a/tools/arch/x86/include/asm/cpufeatures.h 
b/tools/arch/x86/include/asm/cpufeatures.h
+index 4133c721af6ed..59f924e92c284 100644
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -284,6 +284,7 @@
+ #define X86_FEATURE_CQM_MBM_LOCAL     (11*32+ 3) /* LLC Local MBM monitoring 
*/
+ #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry 
SWAPGS path */
+ #define X86_FEATURE_FENCE_SWAPGS_KERNEL       (11*32+ 5) /* "" LFENCE in 
kernel entry SWAPGS path */
++#define X86_FEATURE_RSB_VMEXIT_LITE   (11*32+ 6) /* "" Fill RSB on VM-Exit 
when EIBRS is enabled */
+ 
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+ #define X86_FEATURE_AVX512_BF16               (12*32+ 5) /* AVX512 BFLOAT16 
instructions */
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 0bfad86ec960a..cb0631098f918 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -3068,7 +3068,8 @@ struct bpf_sock {
+       __u32 src_ip4;
+       __u32 src_ip6[4];
+       __u32 src_port;         /* host byte order */
+-      __u32 dst_port;         /* network byte order */
++      __be16 dst_port;        /* network byte order */
++      __u16 :16;              /* zero padding */
+       __u32 dst_ip4;
+       __u32 dst_ip6[4];
+       __u32 state;
+diff --git a/tools/testing/selftests/bpf/test_align.c 
b/tools/testing/selftests/bpf/test_align.c
+index 0262f7b374f9c..4b9a26caa2c2e 100644
+--- a/tools/testing/selftests/bpf/test_align.c
++++ b/tools/testing/selftests/bpf/test_align.c
+@@ -359,15 +359,15 @@ static struct bpf_align_test tests[] = {
+                        * is still (4n), fixed offset is not changed.
+                        * Also, we create a new reg->id.
+                        */
+-                      {29, 
"R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
++                      {29, 
"R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off 
(18)
+                        * which is 20.  Then the variable offset is (4n), so
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {33, 
"R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
+-                      {33, 
"R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
++                      {33, 
"R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
++                      {33, 
"R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
+               },
+       },
+       {
+@@ -410,15 +410,15 @@ static struct bpf_align_test tests[] = {
+                       /* Adding 14 makes R6 be (4n+2) */
+                       {9, 
"R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+                       /* Packet pointer has (4n+2) offset */
+-                      {11, 
"R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+-                      {13, 
"R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
++                      {11, 
"R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
++                      {13, 
"R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+                        * which is 2.  Then the variable offset is (4n+2), so
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {15, 
"R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
++                      {15, 
"R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+                       /* Newly read value in R6 was shifted left by 2, so has
+                        * known alignment of 4.
+                        */
+@@ -426,15 +426,15 @@ static struct bpf_align_test tests[] = {
+                       /* Added (4n) to packet pointer's (4n+2) var_off, giving
+                        * another (4n+2).
+                        */
+-                      {19, 
"R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
+-                      {21, 
"R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
++                      {19, 
"R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
++                      {21, 
"R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+                        * which is 2.  Then the variable offset is (4n+2), so
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {23, 
"R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
++                      {23, 
"R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+               },
+       },
+       {
+@@ -469,16 +469,16 @@ static struct bpf_align_test tests[] = {
+               .matches = {
+                       {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
+                       /* (ptr - ptr) << 2 == unknown, (4n) */
+-                      {6, 
"R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0;
 0xfffffffffffffffc))"},
++                      {6, 
"R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0;
 0xfffffffffffffffc)"},
+                       /* (4n) + 14 == (4n+2).  We blow our bounds, because
+                        * the add could overflow.
+                        */
+-                      {7, "R5_w=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
++                      {7, 
"R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2;
 0xfffffffffffffffc)"},
+                       /* Checked s>=0 */
+-                      {9, 
"R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 
0x7ffffffffffffffc))"},
++                      {9, 
"R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 
0x7ffffffffffffffc)"},
+                       /* packet pointer + nonnegative (4n+2) */
+-                      {11, 
"R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2;
 0x7ffffffffffffffc))"},
+-                      {13, 
"R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2;
 0x7ffffffffffffffc))"},
++                      {11, 
"R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2;
 0x7ffffffffffffffc)"},
++                      {13, 
"R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2;
 0x7ffffffffffffffc)"},
+                       /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
+                        * We checked the bounds, but it might have been able
+                        * to overflow if the packet pointer started in the
+@@ -486,7 +486,7 @@ static struct bpf_align_test tests[] = {
+                        * So we did not get a 'range' on R6, and the access
+                        * attempt will fail.
+                        */
+-                      {15, 
"R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2;
 0x7ffffffffffffffc))"},
++                      {15, 
"R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2;
 0x7ffffffffffffffc)"},
+               }
+       },
+       {
+@@ -528,7 +528,7 @@ static struct bpf_align_test tests[] = {
+                       /* New unknown value in R7 is (4n) */
+                       {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 
0x3fc))"},
+                       /* Subtracting it from R6 blows our unsigned bounds */
+-                      {12, 
"R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 
0xfffffffffffffffc))"},
++                      {12, 
"R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2;
 0xfffffffffffffffc)"},
+                       /* Checked s>= 0 */
+                       {14, 
"R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
+                       /* At the time the word size load is performed from R5,
+@@ -537,7 +537,8 @@ static struct bpf_align_test tests[] = {
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {20, 
"R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
++                      {20, 
"R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
++
+               },
+       },
+       {
+@@ -579,18 +580,18 @@ static struct bpf_align_test tests[] = {
+                       /* Adding 14 makes R6 be (4n+2) */
+                       {11, 
"R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
+                       /* Subtracting from packet pointer overflows ubounds */
+-                      {13, 
"R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82;
 0x7c))"},
++                      {13, 
"R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82;
 0x7c)"},
+                       /* New unknown value in R7 is (4n), >= 76 */
+                       {15, 
"R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
+                       /* Adding it to packet pointer gives nice bounds again 
*/
+-                      {16, 
"R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
++                      {16, 
"R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 
0xfffffffc)"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+                        * which is 2.  Then the variable offset is (4n+2), so
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {20, 
"R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
++                      {20, 
"R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+               },
+       },
+ };
+diff --git a/tools/testing/selftests/bpf/verifier/bounds.c 
b/tools/testing/selftests/bpf/verifier/bounds.c
+index 92c02e4a1b626..313b345eddcc3 100644
+--- a/tools/testing/selftests/bpf/verifier/bounds.c
++++ b/tools/testing/selftests/bpf/verifier/bounds.c
+@@ -411,16 +411,14 @@
+       BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
+       /* r1 = 0xffff'fffe (NOT 0!) */
+       BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
+-      /* computes OOB pointer */
++      /* error on computing OOB pointer */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+-      /* OOB access */
+-      BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+       /* exit */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 3 },
+-      .errstr = "R0 invalid mem access",
++      .errstr = "math between map_value pointer and 4294967294 is not 
allowed",
+       .result = REJECT,
+ },
+ {
+diff --git a/tools/testing/selftests/bpf/verifier/sock.c 
b/tools/testing/selftests/bpf/verifier/sock.c
+index 9ed192e14f5fe..b2ce50bb935b8 100644
+--- a/tools/testing/selftests/bpf/verifier/sock.c
++++ b/tools/testing/selftests/bpf/verifier/sock.c
+@@ -121,7 +121,25 @@
+       .result = ACCEPT,
+ },
+ {
+-      "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
++      "sk_fullsock(skb->sk): sk->dst_port [word load] (backward 
compatibility)",
++      .insns = {
++      BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, 
sk)),
++      BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
++      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, 
dst_port)),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      },
++      .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
++      .result = ACCEPT,
++},
++{
++      "sk_fullsock(skb->sk): sk->dst_port [half load]",
+       .insns = {
+       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, 
sk)),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+@@ -139,7 +157,64 @@
+       .result = ACCEPT,
+ },
+ {
+-      "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
++      "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)",
++      .insns = {
++      BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, 
sk)),
++      BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
++      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, 
dst_port) + 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      },
++      .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
++      .result = REJECT,
++      .errstr = "invalid sock access",
++},
++{
++      "sk_fullsock(skb->sk): sk->dst_port [byte load]",
++      .insns = {
++      BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, 
sk)),
++      BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
++      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, 
dst_port)),
++      BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, 
dst_port) + 1),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      },
++      .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
++      .result = ACCEPT,
++},
++{
++      "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)",
++      .insns = {
++      BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, 
sk)),
++      BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
++      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, 
dst_port) + 2),
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      },
++      .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
++      .result = REJECT,
++      .errstr = "invalid sock access",
++},
++{
++      "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)",
+       .insns = {
+       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, 
sk)),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+@@ -149,7 +224,7 @@
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+-      BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, 
dst_port) + 1),
++      BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, 
dst_port)),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c 
b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+index 6cd91970fbad3..3b2a426070c44 100644
+--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
++++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+@@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
+ 
+ void ucall(uint64_t cmd, int nargs, ...)
+ {
+-      struct ucall uc = {
+-              .cmd = cmd,
+-      };
++      struct ucall uc = {};
+       va_list va;
+       int i;
+ 
++      WRITE_ONCE(uc.cmd, cmd);
+       nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
+ 
+       va_start(va, nargs);
+       for (i = 0; i < nargs; ++i)
+-              uc.args[i] = va_arg(va, uint64_t);
++              WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
+       va_end(va);
+ 
+-      *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
++      WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
+ }
+ 
+ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 287444e52ccf8..4b445dddb7985 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -3329,8 +3329,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
+               kvm_put_kvm(kvm);
+               mutex_lock(&kvm->lock);
+               list_del(&dev->vm_node);
++              if (ops->release)
++                      ops->release(dev);
+               mutex_unlock(&kvm->lock);
+-              ops->destroy(dev);
++              if (ops->destroy)
++                      ops->destroy(dev);
+               return ret;
+       }
+ 

Reply via email to