Here is a diff to backport two crash fixes for aarch64.

One was a crash I was only experiencing on aarch64 when building
run-times like compiler-rt and openmp. The other is the

LLVM ERROR: Invalid size request on a scalable vector.

error when building SVE code in some ports such as svt-av1.


Index: 16/Makefile
===================================================================
RCS file: /cvs/ports/devel/llvm/16/Makefile,v
retrieving revision 1.38
diff -u -p -u -p -r1.38 Makefile
--- 16/Makefile 28 Dec 2024 22:29:24 -0000      1.38
+++ 16/Makefile 25 Apr 2025 03:18:59 -0000
@@ -2,7 +2,7 @@ LLVM_MAJOR =    16
 LLVM_VERSION = ${LLVM_MAJOR}.0.6
 LLVM_PKGSPEC = >=16,<17
 
-REVISION-main =                32
+REVISION-main =                33
 REVISION-lldb =                11
 REVISION-python =      4
 
Index: 16/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
===================================================================
RCS file: 16/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
diff -N 16/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ 16/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp    25 Apr 2025 
03:18:59 -0000
@@ -0,0 +1,16 @@
+- Fix crash lowering stack guard on OpenBSD/aarch64.
+  c180e249d0013474d502cd779ec65b33cf7e9468
+
+Index: llvm/lib/CodeGen/TargetLoweringBase.cpp
+--- llvm/lib/CodeGen/TargetLoweringBase.cpp.orig
++++ llvm/lib/CodeGen/TargetLoweringBase.cpp
+@@ -1988,6 +1988,9 @@ void TargetLoweringBase::insertSSPDeclarations(Module 
+ // Currently only support "standard" __stack_chk_guard.
+ // TODO: add LOAD_STACK_GUARD support.
+ Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
++  if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
++    return M.getNamedValue("__guard_local");
++  }
+   return M.getNamedValue("__stack_chk_guard");
+ }
+ 
Index: 16/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp
===================================================================
RCS file: 
/cvs/ports/devel/llvm/16/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp,v
retrieving revision 1.1.1.1
diff -u -p -u -p -r1.1.1.1 patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp
--- 16/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp    3 Sep 
2023 16:00:04 -0000       1.1.1.1
+++ 16/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp    25 Apr 
2025 03:18:59 -0000
@@ -1,7 +1,30 @@
+- [AArch64][SVE] Don't require 16-byte aligned SVE loads/stores with 
+strict-align
+  3b17d041dd775e033cca499f2a25548c8c22bb86
+
 Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
 --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp.orig
 +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
-@@ -22256,7 +22256,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
+@@ -2228,6 +2228,19 @@ MVT AArch64TargetLowering::getScalarShiftAmountTy(cons
+ bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
+     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags 
Flags,
+     unsigned *Fast) const {
++
++  // Allow SVE loads/stores where the alignment >= the size of the element 
type,
++  // even with +strict-align. Predicated SVE loads/stores (e.g. ld1/st1), used
++  // for stores that come from IR, only require element-size alignment (even 
if
++  // unaligned accesses are disabled). Without this, these will be forced to
++  // have 16-byte alignment with +strict-align (and fail to lower as we don't
++  // yet support TLI.expandUnalignedLoad() and TLI.expandUnalignedStore()).
++  if (VT.isScalableVector()) {
++    unsigned ElementSizeBits = VT.getScalarSizeInBits();
++    if (ElementSizeBits % 8 == 0 && Alignment >= Align(ElementSizeBits / 8))
++      return true;
++  }
++
+   if (Subtarget->requiresStrictAlign())
+     return false;
+ 
+@@ -22256,7 +22269,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
  }
  
  bool AArch64TargetLowering::useLoadStackGuardNode() const {
Index: 18/Makefile
===================================================================
RCS file: /cvs/ports/devel/llvm/18/Makefile,v
retrieving revision 1.16
diff -u -p -u -p -r1.16 Makefile
--- 18/Makefile 28 Dec 2024 22:29:24 -0000      1.16
+++ 18/Makefile 25 Apr 2025 03:18:59 -0000
@@ -2,7 +2,7 @@ LLVM_MAJOR =    18
 LLVM_VERSION = ${LLVM_MAJOR}.1.8
 LLVM_PKGSPEC = >=18,<19
 
-REVISION-main =        3
+REVISION-main =        4
 REVISION-python = 0
 REVISION-lldb =        4
 
Index: 18/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
===================================================================
RCS file: 18/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
diff -N 18/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ 18/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp    25 Apr 2025 
03:18:59 -0000
@@ -0,0 +1,16 @@
+- Fix crash lowering stack guard on OpenBSD/aarch64.
+  c180e249d0013474d502cd779ec65b33cf7e9468
+
+Index: llvm/lib/CodeGen/TargetLoweringBase.cpp
+--- llvm/lib/CodeGen/TargetLoweringBase.cpp.orig
++++ llvm/lib/CodeGen/TargetLoweringBase.cpp
+@@ -2035,6 +2035,9 @@ void TargetLoweringBase::insertSSPDeclarations(Module 
+ // Currently only support "standard" __stack_chk_guard.
+ // TODO: add LOAD_STACK_GUARD support.
+ Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
++  if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
++    return M.getNamedValue("__guard_local");
++  }
+   return M.getNamedValue("__stack_chk_guard");
+ }
+ 
Index: 18/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp
===================================================================
RCS file: 
/cvs/ports/devel/llvm/18/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp,v
retrieving revision 1.3
diff -u -p -u -p -r1.3 patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp
--- 18/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp    23 May 
2024 12:07:35 -0000      1.3
+++ 18/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp    25 Apr 
2025 03:18:59 -0000
@@ -1,7 +1,30 @@
+- [AArch64][SVE] Don't require 16-byte aligned SVE loads/stores with 
+strict-align
+  3b17d041dd775e033cca499f2a25548c8c22bb86
+
 Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
 --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp.orig
 +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
-@@ -24960,7 +24960,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
+@@ -2289,6 +2289,19 @@ MVT AArch64TargetLowering::getScalarShiftAmountTy(cons
+ bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
+     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags 
Flags,
+     unsigned *Fast) const {
++
++  // Allow SVE loads/stores where the alignment >= the size of the element 
type,
++  // even with +strict-align. Predicated SVE loads/stores (e.g. ld1/st1), used
++  // for stores that come from IR, only require element-size alignment (even 
if
++  // unaligned accesses are disabled). Without this, these will be forced to
++  // have 16-byte alignment with +strict-align (and fail to lower as we don't
++  // yet support TLI.expandUnalignedLoad() and TLI.expandUnalignedStore()).
++  if (VT.isScalableVector()) {
++    unsigned ElementSizeBits = VT.getScalarSizeInBits();
++    if (ElementSizeBits % 8 == 0 && Alignment >= Align(ElementSizeBits / 8))
++      return true;
++  }
++
+   if (Subtarget->requiresStrictAlign())
+     return false;
+ 
+@@ -24960,7 +24973,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
  }
  
  bool AArch64TargetLowering::useLoadStackGuardNode() const {
Index: 19/Makefile
===================================================================
RCS file: /cvs/ports/devel/llvm/19/Makefile,v
retrieving revision 1.11
diff -u -p -u -p -r1.11 Makefile
--- 19/Makefile 10 Mar 2025 13:56:45 -0000      1.11
+++ 19/Makefile 25 Apr 2025 03:18:59 -0000
@@ -2,7 +2,7 @@ LLVM_MAJOR =    19
 LLVM_VERSION = ${LLVM_MAJOR}.1.7
 LLVM_PKGSPEC = >=19,<20
 
-REVISION-main =        3
+REVISION-main =        4
 
 SHARED_LIBS += LLVM            0.0 \
                LTO             0.0 \
Index: 19/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
===================================================================
RCS file: 19/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
diff -N 19/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ 19/patches/patch-llvm_lib_CodeGen_TargetLoweringBase_cpp    25 Apr 2025 
03:18:59 -0000
@@ -0,0 +1,16 @@
+- Fix crash lowering stack guard on OpenBSD/aarch64.
+  c180e249d0013474d502cd779ec65b33cf7e9468
+
+Index: llvm/lib/CodeGen/TargetLoweringBase.cpp
+--- llvm/lib/CodeGen/TargetLoweringBase.cpp.orig
++++ llvm/lib/CodeGen/TargetLoweringBase.cpp
+@@ -1961,6 +1961,9 @@ void TargetLoweringBase::insertSSPDeclarations(Module 
+ // Currently only support "standard" __stack_chk_guard.
+ // TODO: add LOAD_STACK_GUARD support.
+ Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
++  if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
++    return M.getNamedValue("__guard_local");
++  }
+   return M.getNamedValue("__stack_chk_guard");
+ }
+ 
Index: 19/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp
===================================================================
RCS file: 
/cvs/ports/devel/llvm/19/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp,v
retrieving revision 1.2
diff -u -p -u -p -r1.2 patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp
--- 19/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp    20 Dec 
2024 10:12:20 -0000      1.2
+++ 19/patches/patch-llvm_lib_Target_AArch64_AArch64ISelLowering_cpp    25 Apr 
2025 03:18:59 -0000
@@ -1,7 +1,30 @@
+- [AArch64][SVE] Don't require 16-byte aligned SVE loads/stores with 
+strict-align
+  3b17d041dd775e033cca499f2a25548c8c22bb86
+
 Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
 --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp.orig
 +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
-@@ -26406,7 +26406,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
+@@ -2471,6 +2471,19 @@ MVT AArch64TargetLowering::getScalarShiftAmountTy(cons
+ bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
+     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags 
Flags,
+     unsigned *Fast) const {
++
++   // Allow SVE loads/stores where the alignment >= the size of the element 
type,
++   // even with +strict-align. Predicated SVE loads/stores (e.g. ld1/st1), 
used
++   // for stores that come from IR, only require element-size alignment (even 
if
++   // unaligned accesses are disabled). Without this, these will be forced to
++   // have 16-byte alignment with +strict-align (and fail to lower as we don't
++   // yet support TLI.expandUnalignedLoad() and TLI.expandUnalignedStore()).
++   if (VT.isScalableVector()) {
++     unsigned ElementSizeBits = VT.getScalarSizeInBits();
++     if (ElementSizeBits % 8 == 0 && Alignment >= Align(ElementSizeBits / 8))
++       return true;
++   }
++
+   if (Subtarget->requiresStrictAlign())
+     return false;
+ 
+@@ -26406,7 +26419,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
  }
  
  bool AArch64TargetLowering::useLoadStackGuardNode() const {

Reply via email to