tlively created this revision.
tlively added a reviewer: aheejin.
Herald added subscribers: wingo, ecnelises, sunfish, jgravelle-google, sbc100, 
dschuff.
tlively requested review of this revision.
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

- Add `const` to the pointer operands of load builtins.
- Make the inputs to all narrowing builtins signed, which is how they are 
interpreted by the underlying instructions (only the result changes sign 
between instructions).


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D101883

Files:
  clang/include/clang/Basic/BuiltinsWebAssembly.def
  clang/test/CodeGen/builtins-wasm.c

Index: clang/test/CodeGen/builtins-wasm.c
===================================================================
--- clang/test/CodeGen/builtins-wasm.c
+++ clang/test/CodeGen/builtins-wasm.c
@@ -284,28 +284,28 @@
   // WEBASSEMBLY-NEXT: ret
 }
 
-i8x16 load8_lane(signed char *p, i8x16 v) {
+i8x16 load8_lane(const signed char *p, i8x16 v) {
   return __builtin_wasm_load8_lane(p, v, 0);
   // WEBASSEMBLY: tail call <16 x i8> @llvm.wasm.load8.lane(
   // WEBASSEMBLY-SAME: i8* %p, <16 x i8> %v, i32 0)
   // WEBASSEMBLY-NEXT: ret
 }
 
-i16x8 load16_lane(short *p, i16x8 v) {
+i16x8 load16_lane(const short *p, i16x8 v) {
   return __builtin_wasm_load16_lane(p, v, 0);
   // WEBASSEMBLY: tail call <8 x i16> @llvm.wasm.load16.lane(
   // WEBASSEMBLY-SAME: i16* %p, <8 x i16> %v, i32 0)
   // WEBASSEMBLY-NEXT: ret
 }
 
-i32x4 load32_lane(int *p, i32x4 v) {
+i32x4 load32_lane(const int *p, i32x4 v) {
   return __builtin_wasm_load32_lane(p, v, 0);
   // WEBASSEMBLY: tail call <4 x i32> @llvm.wasm.load32.lane(
   // WEBASSEMBLY-SAME: i32* %p, <4 x i32> %v, i32 0)
   // WEBASSEMBLY-NEXT: ret
 }
 
-i64x2 load64_lane(long long *p, i64x2 v) {
+i64x2 load64_lane(const long long *p, i64x2 v) {
   return __builtin_wasm_load64_lane(p, v, 0);
   // WEBASSEMBLY: tail call <2 x i64> @llvm.wasm.load64.lane(
   // WEBASSEMBLY-SAME: i64* %p, <2 x i64> %v, i32 0)
@@ -857,7 +857,7 @@
   // WEBASSEMBLY: ret
 }
 
-u8x16 narrow_u_i8x16_i16x8(u16x8 low, u16x8 high) {
+u8x16 narrow_u_i8x16_i16x8(i16x8 low, i16x8 high) {
   return __builtin_wasm_narrow_u_i8x16_i16x8(low, high);
   // WEBASSEMBLY: call <16 x i8> @llvm.wasm.narrow.unsigned.v16i8.v8i16(
   // WEBASSEMBLY-SAME: <8 x i16> %low, <8 x i16> %high)
@@ -871,7 +871,7 @@
   // WEBASSEMBLY: ret
 }
 
-u16x8 narrow_u_i16x8_i32x4(u32x4 low, u32x4 high) {
+u16x8 narrow_u_i16x8_i32x4(i32x4 low, i32x4 high) {
   return __builtin_wasm_narrow_u_i16x8_i32x4(low, high);
   // WEBASSEMBLY: call <8 x i16> @llvm.wasm.narrow.unsigned.v8i16.v4i32(
   // WEBASSEMBLY-SAME: <4 x i32> %low, <4 x i32> %high)
@@ -904,13 +904,13 @@
   // WEBASSEMBLY: ret
 }
 
-i32x4 load32_zero(int *p) {
+i32x4 load32_zero(const int *p) {
   return __builtin_wasm_load32_zero(p);
   // WEBASSEMBLY: call <4 x i32> @llvm.wasm.load32.zero(i32* %p)
   // WEBASSEMBLY: ret
 }
 
-i64x2 load64_zero(long long *p) {
+i64x2 load64_zero(const long long *p) {
   return __builtin_wasm_load64_zero(p);
   // WEBASSEMBLY: call <2 x i64> @llvm.wasm.load64.zero(i64* %p)
   // WEBASSEMBLY: ret
Index: clang/include/clang/Basic/BuiltinsWebAssembly.def
===================================================================
--- clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -185,22 +185,22 @@
 TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i32x4_f32x4, "V4iV4f", "nc", "simd128")
 
 TARGET_BUILTIN(__builtin_wasm_narrow_s_i8x16_i16x8, "V16ScV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16UcV8UsV8Us", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16UcV8sV8s", "nc", "simd128")
 TARGET_BUILTIN(__builtin_wasm_narrow_s_i16x8_i32x4, "V8sV4iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4UiV4Ui", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4iV4i", "nc", "simd128")
 
 TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4, "V4iV2d", "nc", "simd128")
 TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4, "V4UiV2d", "nc", "simd128")
 TARGET_BUILTIN(__builtin_wasm_demote_zero_f64x2_f32x4, "V4fV2d", "nc", "simd128")
 TARGET_BUILTIN(__builtin_wasm_promote_low_f32x4_f64x2, "V2dV4f", "nc", "simd128")
 
-TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*C", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*C", "n", "simd128")
 
-TARGET_BUILTIN(__builtin_wasm_load8_lane, "V16ScSc*V16ScIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load16_lane, "V8ss*V8sIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load32_lane, "V4ii*V4iIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load64_lane, "V2LLiLLi*V2LLiIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load8_lane, "V16ScSc*CV16ScIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load16_lane, "V8ss*CV8sIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load32_lane, "V4ii*CV4iIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load64_lane, "V2LLiLLi*CV2LLiIi", "n", "simd128")
 TARGET_BUILTIN(__builtin_wasm_store8_lane, "vSc*V16ScIi", "n", "simd128")
 TARGET_BUILTIN(__builtin_wasm_store16_lane, "vs*V8sIi", "n", "simd128")
 TARGET_BUILTIN(__builtin_wasm_store32_lane, "vi*V4iIi", "n", "simd128")
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D101883: [WebAssembl... Thomas Lively via Phabricator via cfe-commits

Reply via email to