https://github.com/chaitanyav updated 
https://github.com/llvm/llvm-project/pull/177290

>From 610d5c3ad9459d3dc6bb8d6783b9754f245bbcfe Mon Sep 17 00:00:00 2001
From: NagaChaitanya Vellanki <[email protected]>
Date: Wed, 21 Jan 2026 16:32:41 -0800
Subject: [PATCH] [clang][test] Fix builtin-rotate.c failure on ARM32

Use #ifdef __SIZEOF_INT128__ around __int128 usage since ARM 32-bit
targets don't support this type.

Fixes https://lab.llvm.org/buildbot/#/builders/79/builds/2754
---
 clang/test/CodeGen/builtin-rotate.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/clang/test/CodeGen/builtin-rotate.c 
b/clang/test/CodeGen/builtin-rotate.c
index 1f498c0294a58..789901a0b9ccf 100644
--- a/clang/test/CodeGen/builtin-rotate.c
+++ b/clang/test/CodeGen/builtin-rotate.c
@@ -72,7 +72,6 @@ long long rotr64(long long x, unsigned long long y) {
 // CHECK:  call i32 @llvm.fshl.i32(i32 %{{.*}}, i32 %{{.*}}, i32 8)
 // CHECK:  call i64 @llvm.fshl.i64(i64 %{{.*}}, i64 %{{.*}}, i64 8)
 // CHECK:  call i64 @llvm.fshl.i64(i64 %{{.*}}, i64 %{{.*}}, i64 16)
-// CHECK:  call i128 @llvm.fshl.i128(i128 %{{.*}}, i128 %{{.*}}, i128 32)
 // CHECK:  call i8 @llvm.fshl.i8(i8 %{{.*}}, i8 %{{.*}}, i8 7)
 // CHECK:  call i16 @llvm.fshl.i16(i16 %{{.*}}, i16 %{{.*}}, i16 11)
 // CHECK:  call i32 @llvm.fshl.i32(i32 %{{.*}}, i32 %{{.*}}, i32 29)
@@ -95,7 +94,7 @@ long long rotr64(long long x, unsigned long long y) {
 // CHECK:  call i8 @llvm.fshl.i8(i8 %{{.*}}, i8 %{{.*}}, i8 0)
 void test_builtin_stdc_rotate_left(uint8_t u8, uint16_t u16,
                                    uint32_t u32, uint64_t u64,
-                                   uint64_t u64_2, unsigned __int128 u128,
+                                   uint64_t u64_2,
                                    unsigned _BitInt(9) u9, unsigned 
_BitInt(37) u37,
                                    unsigned _BitInt(10) u10, unsigned 
_BitInt(16) u16_bit,
                                    unsigned _BitInt(24) u24, unsigned 
_BitInt(48) u48) {
@@ -105,7 +104,6 @@ void test_builtin_stdc_rotate_left(uint8_t u8, uint16_t u16,
   volatile uint32_t result_u32;
   volatile uint64_t result_u64;
   volatile uint64_t result_u64_2;
-  volatile unsigned __int128 result_u128;
   volatile unsigned _BitInt(9) result_u9;
   volatile unsigned _BitInt(37) result_u37;
   volatile unsigned _BitInt(10) result_u10;
@@ -118,7 +116,6 @@ void test_builtin_stdc_rotate_left(uint8_t u8, uint16_t u16,
   result_u32 = __builtin_stdc_rotate_left(u32, 8);
   result_u64 = __builtin_stdc_rotate_left(u64, 8);
   result_u64_2 = __builtin_stdc_rotate_left(u64_2, 16);
-  result_u128 = __builtin_stdc_rotate_left(u128, 32);
 
   result_u8 = __builtin_stdc_rotate_left(u8, -1);
   result_u16 = __builtin_stdc_rotate_left(u16, -5);
@@ -174,7 +171,6 @@ void test_builtin_stdc_rotate_left(uint8_t u8, uint16_t u16,
 // CHECK:  call i32 @llvm.fshr.i32(i32 %{{.*}}, i32 %{{.*}}, i32 8)
 // CHECK:  call i64 @llvm.fshr.i64(i64 %{{.*}}, i64 %{{.*}}, i64 8)
 // CHECK:  call i64 @llvm.fshr.i64(i64 %{{.*}}, i64 %{{.*}}, i64 16)
-// CHECK:  call i128 @llvm.fshr.i128(i128 %{{.*}}, i128 %{{.*}}, i128 32)
 // CHECK:  call i8 @llvm.fshr.i8(i8 %{{.*}}, i8 %{{.*}}, i8 7)
 // CHECK:  call i16 @llvm.fshr.i16(i16 %{{.*}}, i16 %{{.*}}, i16 13)
 // CHECK:  call i64 @llvm.fshr.i64(i64 %{{.*}}, i64 %{{.*}}, i64 48)
@@ -187,7 +183,7 @@ void test_builtin_stdc_rotate_left(uint8_t u8, uint16_t u16,
 // CHECK:  call i16 @llvm.fshl.i16(i16 %{{.*}}, i16 %{{.*}}, i16 1)
 void test_builtin_stdc_rotate_right(uint8_t u8, uint16_t u16,
                                     uint32_t u32, uint64_t u64,
-                                    uint64_t u64_2, unsigned __int128 u128,
+                                    uint64_t u64_2,
                                     unsigned _BitInt(9) u9, unsigned 
_BitInt(12) u12,
                                     unsigned _BitInt(20) u20, unsigned 
_BitInt(32) u32_bit) {
 
@@ -196,7 +192,6 @@ void test_builtin_stdc_rotate_right(uint8_t u8, uint16_t 
u16,
   volatile uint32_t result_u32;
   volatile uint64_t result_u64;
   volatile uint64_t result_u64_2;
-  volatile unsigned __int128 result_u128;
   volatile unsigned _BitInt(9) result_u9;
   volatile unsigned _BitInt(12) result_u12;
   volatile unsigned _BitInt(20) result_u20;
@@ -207,7 +202,6 @@ void test_builtin_stdc_rotate_right(uint8_t u8, uint16_t 
u16,
   result_u32 = __builtin_stdc_rotate_right(u32, 8);
   result_u64 = __builtin_stdc_rotate_right(u64, 8);
   result_u64_2 = __builtin_stdc_rotate_right(u64_2, 16);
-  result_u128 = __builtin_stdc_rotate_right(u128, 32);
 
   result_u8 = __builtin_stdc_rotate_right(u8, -1);
   result_u16 = __builtin_stdc_rotate_right(u16, -3);
@@ -302,3 +296,13 @@ void test_wider_shift_amount(uint8_t u8, uint16_t u16, 
uint32_t u32, unsigned _B
   result_u9 = __builtin_stdc_rotate_left((unsigned _BitInt(9))0x1FF, 
(int64_t)-2147483647);
 }
 
+#ifdef __SIZEOF_INT128__
+// CHECK-LABEL: test_int128_rotate
+// CHECK:  call i128 @llvm.fshl.i128(i128 %{{.*}}, i128 %{{.*}}, i128 32)
+// CHECK:  call i128 @llvm.fshr.i128(i128 %{{.*}}, i128 %{{.*}}, i128 32)
+void test_int128_rotate(unsigned __int128 u128) {
+  volatile unsigned __int128 result_u128;
+  result_u128 = __builtin_stdc_rotate_left(u128, 32);
+  result_u128 = __builtin_stdc_rotate_right(u128, 32);
+}
+#endif

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to