Skip to content

[NVPTX] Update various intrinsic attributes #140119

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from

Conversation

AlexMaclean
Copy link
Member

No description provided.

@llvmbot
Copy link
Member

llvmbot commented May 15, 2025

@llvm/pr-subscribers-llvm-ir

Author: Alex MacLean (AlexMaclean)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/140119.diff

1 Files Affected:

  • (modified) llvm/include/llvm/IR/IntrinsicsNVVM.td (+115-106)
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index a95c739f1331d..3e5f8f83210d6 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -831,7 +831,7 @@ let TargetPrefix = "nvvm" in {
 //
 // Sad
 //
-  let IntrProperties = [IntrNoMem, Commutative, IntrSpeculatable] in {
+  let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
     foreach sign = ["", "u"] in {
       def int_nvvm_sad_ # sign # s : NVVMBuiltin,
           DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty]>;
@@ -1150,41 +1150,40 @@ let TargetPrefix = "nvvm" in {
       def int_nvvm_bf2h_rn # ftz : NVVMBuiltin,
           DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_bfloat_ty]>;
     }
-  }
-  let IntrProperties = [IntrNoMem, IntrNoCallback] in {
+
     foreach rnd = ["rn", "rz"] in {
       foreach relu = ["", "_relu"] in {
         def int_nvvm_ff2bf16x2_ # rnd # relu : NVVMBuiltin,
-            Intrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty]>;
+            DefaultAttrsIntrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty]>;
 
         def int_nvvm_ff2f16x2_ # rnd # relu : NVVMBuiltin,
-            Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty]>;
+            DefaultAttrsIntrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty]>;
 
         def int_nvvm_f2bf16_ # rnd # relu : NVVMBuiltin,
-            Intrinsic<[llvm_bfloat_ty], [llvm_float_ty]>;
+            DefaultAttrsIntrinsic<[llvm_bfloat_ty], [llvm_float_ty]>;
       }
     }
 
     foreach satfinite = ["", "_satfinite"] in {
       def int_nvvm_f2tf32_rna # satfinite : NVVMBuiltin,
-          Intrinsic<[llvm_i32_ty], [llvm_float_ty]>;
+          DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty]>;
 
       foreach rnd = ["rn", "rz"] in
         foreach relu = ["", "_relu"] in
           def int_nvvm_f2tf32_ # rnd # relu # satfinite : NVVMBuiltin,
-              Intrinsic<[llvm_i32_ty], [llvm_float_ty]>;
+              DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty]>;
     }
 
     foreach type = ["e4m3x2", "e5m2x2"] in {
       foreach relu = ["", "_relu"] in {
         def int_nvvm_ff_to_ # type # _rn # relu : NVVMBuiltin,
-            Intrinsic<[llvm_i16_ty], [llvm_float_ty, llvm_float_ty]>;
+            DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_float_ty, llvm_float_ty]>;
 
         def int_nvvm_f16x2_to_ # type # _rn # relu : NVVMBuiltin,
-            Intrinsic<[llvm_i16_ty], [llvm_v2f16_ty]>;
+            DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_v2f16_ty]>;
 
         def int_nvvm_ # type # _to_f16x2_rn # relu : NVVMBuiltin,
-            Intrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>;
+            DefaultAttrsIntrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>;
       }
     }
 
@@ -1222,8 +1221,9 @@ let TargetPrefix = "nvvm" in {
     }
 
     def int_nvvm_ue8m0x2_to_bf16x2 : NVVMBuiltin,
-        Intrinsic<[llvm_v2bf16_ty], [llvm_i16_ty]>;
-  }
+        DefaultAttrsIntrinsic<[llvm_v2bf16_ty], [llvm_i16_ty]>;
+
+  } // IntrProperties = [IntrNoMem, IntrSpeculatable]
 
 // FNS
   def int_nvvm_fns : NVVMBuiltin,
@@ -1440,18 +1440,22 @@ def int_nvvm_internal_addrspace_wrap :
 
 // Move intrinsics, used in nvvm internally
 
-def int_nvvm_move_i16 : Intrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem]>;
-def int_nvvm_move_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
-def int_nvvm_move_i64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
-def int_nvvm_move_float : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
-def int_nvvm_move_double : Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
-def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty], [IntrNoMem, NoCapture<ArgIndex<0>>]>;
+let IntrProperties = [IntrNoMem] in {
+  def int_nvvm_move_i16 : DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i16_ty]>;
+  def int_nvvm_move_i32 : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty]>;
+  def int_nvvm_move_i64 : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty]>;
+  def int_nvvm_move_float : DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty]>;
+  def int_nvvm_move_double : DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty]>;
+  def int_nvvm_move_ptr : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty]>;
+}
 
 // For getting the handle from a texture or surface variable
-def int_nvvm_texsurf_handle
-  : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyptr_ty], [IntrNoMem]>;
-def int_nvvm_texsurf_handle_internal
-  : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty], [IntrNoMem]>;
+let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
+  def int_nvvm_texsurf_handle
+    : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyptr_ty]>;
+  def int_nvvm_texsurf_handle_internal
+    : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+}
 
 /// Error / Warn
 def int_nvvm_compiler_error : Intrinsic<[], [llvm_anyptr_ty]>;
@@ -1472,106 +1476,111 @@ foreach i = 0...31 in
     DefaultAttrsIntrinsic<[llvm_i32_ty], [],
               [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>]>;
 
+//
+// Texture Fetch
+//
+let IntrProperties = [IntrReadMem] in {
+  foreach is_unified = [true, false] in {
+    defvar mode = !if(is_unified, "_unified", "");
+    defvar addr_args = !if(is_unified, [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]);
 
-foreach is_unified = [true, false] in {
-  defvar mode = !if(is_unified, "_unified", "");
-  defvar addr_args = !if(is_unified, [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]);
-
-  // Texture Fetch
-  foreach vec = [V4F32, V4S32, V4U32] in {
-    foreach is_array = [true, false] in {
-      defvar array = !if(is_array, "_array", "");
-      defvar array_args = !if(is_array, [llvm_i32_ty], []<LLVMType>);
-
-      def int_nvvm_tex # mode # _1d # array # _ # vec.Name # _s32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_i32_ty, 1))>;
-      def int_nvvm_tex # mode # _1d # array # _ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 1))>;
-      def int_nvvm_tex # mode # _1d # array # _level_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 2))>;
-      def int_nvvm_tex # mode # _1d # array # _grad_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
-
-      def int_nvvm_tex # mode # _2d # array # _ # vec.Name # _s32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_i32_ty, 2))>;
-      def int_nvvm_tex # mode # _2d # array # _ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 2))>;
-      def int_nvvm_tex # mode # _2d # array # _level_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
-      def int_nvvm_tex # mode # _2d # array # _grad_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 6))>;
+    foreach vec = [V4F32, V4S32, V4U32] in {
+      foreach is_array = [true, false] in {
+        defvar array = !if(is_array, "_array", "");
+        defvar array_args = !if(is_array, [llvm_i32_ty], []<LLVMType>);
 
-      if !not(is_array) then {
-        def int_nvvm_tex # mode # _3d_ # vec.Name # _s32
+        def int_nvvm_tex # mode # _1d # array # _ # vec.Name # _s32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, !listsplat(llvm_i32_ty, 3))>;
-        def int_nvvm_tex # mode # _3d_ # vec.Name # _f32
+                      !listconcat(addr_args, array_args, !listsplat(llvm_i32_ty, 1))>;
+        def int_nvvm_tex # mode # _1d # array # _ # vec.Name # _f32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, !listsplat(llvm_float_ty, 3))>;
-        def int_nvvm_tex # mode # _3d_level_ # vec.Name # _f32
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 1))>;
+        def int_nvvm_tex # mode # _1d # array # _level_ # vec.Name # _f32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, !listsplat(llvm_float_ty, 4))>;
-        def int_nvvm_tex # mode # _3d_grad_ # vec.Name # _f32
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 2))>;
+        def int_nvvm_tex # mode # _1d # array # _grad_ # vec.Name # _f32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, !listsplat(llvm_float_ty, 9))>;
-      }
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
 
-      def int_nvvm_tex # mode # _cube # array # _ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
-      def int_nvvm_tex # mode # _cube # array # _level_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 4))>;
+        def int_nvvm_tex # mode # _2d # array # _ # vec.Name # _s32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_i32_ty, 2))>;
+        def int_nvvm_tex # mode # _2d # array # _ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 2))>;
+        def int_nvvm_tex # mode # _2d # array # _level_ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
+        def int_nvvm_tex # mode # _2d # array # _grad_ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 6))>;
+
+        if !not(is_array) then {
+          def int_nvvm_tex # mode # _3d_ # vec.Name # _s32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, !listsplat(llvm_i32_ty, 3))>;
+          def int_nvvm_tex # mode # _3d_ # vec.Name # _f32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, !listsplat(llvm_float_ty, 3))>;
+          def int_nvvm_tex # mode # _3d_level_ # vec.Name # _f32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, !listsplat(llvm_float_ty, 4))>;
+          def int_nvvm_tex # mode # _3d_grad_ # vec.Name # _f32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, !listsplat(llvm_float_ty, 9))>;
+        }
 
-      if is_unified then
-        def int_nvvm_tex # mode # _cube # array # _grad_ # vec.Name # _f32
+        def int_nvvm_tex # mode # _cube # array # _ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
+        def int_nvvm_tex # mode # _cube # array # _level_ # vec.Name # _f32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 9))>;
-    } // is_array
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 4))>;
 
-    foreach comp = ["r", "g", "b", "a"] in {
-      def int_nvvm_tld4 # mode # _ # comp # _2d_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, !listsplat(llvm_float_ty, 2))>;
-    } // comp
-  } // vec
-} // is_unified
+        if is_unified then
+          def int_nvvm_tex # mode # _cube # array # _grad_ # vec.Name # _f32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 9))>;
+      } // is_array
+
+      foreach comp = ["r", "g", "b", "a"] in {
+        def int_nvvm_tld4 # mode # _ # comp # _2d_ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, !listsplat(llvm_float_ty, 2))>;
+      } // comp
+    } // vec
+  } // is_unified
+} // IntrProperties = [IntrReadMem]
 
 //=== Surface Load
-foreach clamp = ["clamp", "trap", "zero"] in {
-  foreach vec = [TV_I8, TV_I16, TV_I32, TV_I64,
-                 TV_V2I8, TV_V2I16, TV_V2I32, TV_V2I64,
-                 TV_V4I8, TV_V4I16, TV_V4I32] in {
+let IntrProperties = [IntrReadMem] in {
+  foreach clamp = ["clamp", "trap", "zero"] in {
+    foreach vec = [TV_I8, TV_I16, TV_I32, TV_I64,
+                  TV_V2I8, TV_V2I16, TV_V2I32, TV_V2I64,
+                  TV_V4I8, TV_V4I16, TV_V4I32] in {
 
-    def int_nvvm_suld_1d_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty]>;
+      def int_nvvm_suld_1d_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty]>;
 
-    def int_nvvm_suld_1d_array_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty]>;
+      def int_nvvm_suld_1d_array_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty]>;
 
-    def int_nvvm_suld_2d_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty]>;
+      def int_nvvm_suld_2d_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty]>;
 
-    def int_nvvm_suld_2d_array_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
+      def int_nvvm_suld_2d_array_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
 
-    def int_nvvm_suld_3d_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
-  } // vec
-} // clamp
+      def int_nvvm_suld_3d_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
+    } // vec
+  } // clamp
+} // IntrProperties = [IntrReadMem]
 
 //===- Texture Query ------------------------------------------------------===//
 

@llvmbot
Copy link
Member

llvmbot commented May 15, 2025

@llvm/pr-subscribers-backend-nvptx

Author: Alex MacLean (AlexMaclean)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/140119.diff

1 Files Affected:

  • (modified) llvm/include/llvm/IR/IntrinsicsNVVM.td (+115-106)
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index a95c739f1331d..3e5f8f83210d6 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -831,7 +831,7 @@ let TargetPrefix = "nvvm" in {
 //
 // Sad
 //
-  let IntrProperties = [IntrNoMem, Commutative, IntrSpeculatable] in {
+  let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
     foreach sign = ["", "u"] in {
       def int_nvvm_sad_ # sign # s : NVVMBuiltin,
           DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty]>;
@@ -1150,41 +1150,40 @@ let TargetPrefix = "nvvm" in {
       def int_nvvm_bf2h_rn # ftz : NVVMBuiltin,
           DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_bfloat_ty]>;
     }
-  }
-  let IntrProperties = [IntrNoMem, IntrNoCallback] in {
+
     foreach rnd = ["rn", "rz"] in {
       foreach relu = ["", "_relu"] in {
         def int_nvvm_ff2bf16x2_ # rnd # relu : NVVMBuiltin,
-            Intrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty]>;
+            DefaultAttrsIntrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty]>;
 
         def int_nvvm_ff2f16x2_ # rnd # relu : NVVMBuiltin,
-            Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty]>;
+            DefaultAttrsIntrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty]>;
 
         def int_nvvm_f2bf16_ # rnd # relu : NVVMBuiltin,
-            Intrinsic<[llvm_bfloat_ty], [llvm_float_ty]>;
+            DefaultAttrsIntrinsic<[llvm_bfloat_ty], [llvm_float_ty]>;
       }
     }
 
     foreach satfinite = ["", "_satfinite"] in {
       def int_nvvm_f2tf32_rna # satfinite : NVVMBuiltin,
-          Intrinsic<[llvm_i32_ty], [llvm_float_ty]>;
+          DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty]>;
 
       foreach rnd = ["rn", "rz"] in
         foreach relu = ["", "_relu"] in
           def int_nvvm_f2tf32_ # rnd # relu # satfinite : NVVMBuiltin,
-              Intrinsic<[llvm_i32_ty], [llvm_float_ty]>;
+              DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty]>;
     }
 
     foreach type = ["e4m3x2", "e5m2x2"] in {
       foreach relu = ["", "_relu"] in {
         def int_nvvm_ff_to_ # type # _rn # relu : NVVMBuiltin,
-            Intrinsic<[llvm_i16_ty], [llvm_float_ty, llvm_float_ty]>;
+            DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_float_ty, llvm_float_ty]>;
 
         def int_nvvm_f16x2_to_ # type # _rn # relu : NVVMBuiltin,
-            Intrinsic<[llvm_i16_ty], [llvm_v2f16_ty]>;
+            DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_v2f16_ty]>;
 
         def int_nvvm_ # type # _to_f16x2_rn # relu : NVVMBuiltin,
-            Intrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>;
+            DefaultAttrsIntrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>;
       }
     }
 
@@ -1222,8 +1221,9 @@ let TargetPrefix = "nvvm" in {
     }
 
     def int_nvvm_ue8m0x2_to_bf16x2 : NVVMBuiltin,
-        Intrinsic<[llvm_v2bf16_ty], [llvm_i16_ty]>;
-  }
+        DefaultAttrsIntrinsic<[llvm_v2bf16_ty], [llvm_i16_ty]>;
+
+  } // IntrProperties = [IntrNoMem, IntrSpeculatable]
 
 // FNS
   def int_nvvm_fns : NVVMBuiltin,
@@ -1440,18 +1440,22 @@ def int_nvvm_internal_addrspace_wrap :
 
 // Move intrinsics, used in nvvm internally
 
-def int_nvvm_move_i16 : Intrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem]>;
-def int_nvvm_move_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
-def int_nvvm_move_i64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
-def int_nvvm_move_float : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
-def int_nvvm_move_double : Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
-def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty], [IntrNoMem, NoCapture<ArgIndex<0>>]>;
+let IntrProperties = [IntrNoMem] in {
+  def int_nvvm_move_i16 : DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i16_ty]>;
+  def int_nvvm_move_i32 : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty]>;
+  def int_nvvm_move_i64 : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty]>;
+  def int_nvvm_move_float : DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty]>;
+  def int_nvvm_move_double : DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty]>;
+  def int_nvvm_move_ptr : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty]>;
+}
 
 // For getting the handle from a texture or surface variable
-def int_nvvm_texsurf_handle
-  : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyptr_ty], [IntrNoMem]>;
-def int_nvvm_texsurf_handle_internal
-  : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty], [IntrNoMem]>;
+let IntrProperties = [IntrNoMem, IntrSpeculatable] in {
+  def int_nvvm_texsurf_handle
+    : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyptr_ty]>;
+  def int_nvvm_texsurf_handle_internal
+    : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+}
 
 /// Error / Warn
 def int_nvvm_compiler_error : Intrinsic<[], [llvm_anyptr_ty]>;
@@ -1472,106 +1476,111 @@ foreach i = 0...31 in
     DefaultAttrsIntrinsic<[llvm_i32_ty], [],
               [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>]>;
 
+//
+// Texture Fetch
+//
+let IntrProperties = [IntrReadMem] in {
+  foreach is_unified = [true, false] in {
+    defvar mode = !if(is_unified, "_unified", "");
+    defvar addr_args = !if(is_unified, [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]);
 
-foreach is_unified = [true, false] in {
-  defvar mode = !if(is_unified, "_unified", "");
-  defvar addr_args = !if(is_unified, [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]);
-
-  // Texture Fetch
-  foreach vec = [V4F32, V4S32, V4U32] in {
-    foreach is_array = [true, false] in {
-      defvar array = !if(is_array, "_array", "");
-      defvar array_args = !if(is_array, [llvm_i32_ty], []<LLVMType>);
-
-      def int_nvvm_tex # mode # _1d # array # _ # vec.Name # _s32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_i32_ty, 1))>;
-      def int_nvvm_tex # mode # _1d # array # _ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 1))>;
-      def int_nvvm_tex # mode # _1d # array # _level_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 2))>;
-      def int_nvvm_tex # mode # _1d # array # _grad_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
-
-      def int_nvvm_tex # mode # _2d # array # _ # vec.Name # _s32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_i32_ty, 2))>;
-      def int_nvvm_tex # mode # _2d # array # _ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 2))>;
-      def int_nvvm_tex # mode # _2d # array # _level_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
-      def int_nvvm_tex # mode # _2d # array # _grad_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 6))>;
+    foreach vec = [V4F32, V4S32, V4U32] in {
+      foreach is_array = [true, false] in {
+        defvar array = !if(is_array, "_array", "");
+        defvar array_args = !if(is_array, [llvm_i32_ty], []<LLVMType>);
 
-      if !not(is_array) then {
-        def int_nvvm_tex # mode # _3d_ # vec.Name # _s32
+        def int_nvvm_tex # mode # _1d # array # _ # vec.Name # _s32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, !listsplat(llvm_i32_ty, 3))>;
-        def int_nvvm_tex # mode # _3d_ # vec.Name # _f32
+                      !listconcat(addr_args, array_args, !listsplat(llvm_i32_ty, 1))>;
+        def int_nvvm_tex # mode # _1d # array # _ # vec.Name # _f32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, !listsplat(llvm_float_ty, 3))>;
-        def int_nvvm_tex # mode # _3d_level_ # vec.Name # _f32
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 1))>;
+        def int_nvvm_tex # mode # _1d # array # _level_ # vec.Name # _f32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, !listsplat(llvm_float_ty, 4))>;
-        def int_nvvm_tex # mode # _3d_grad_ # vec.Name # _f32
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 2))>;
+        def int_nvvm_tex # mode # _1d # array # _grad_ # vec.Name # _f32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, !listsplat(llvm_float_ty, 9))>;
-      }
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
 
-      def int_nvvm_tex # mode # _cube # array # _ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
-      def int_nvvm_tex # mode # _cube # array # _level_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 4))>;
+        def int_nvvm_tex # mode # _2d # array # _ # vec.Name # _s32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_i32_ty, 2))>;
+        def int_nvvm_tex # mode # _2d # array # _ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 2))>;
+        def int_nvvm_tex # mode # _2d # array # _level_ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
+        def int_nvvm_tex # mode # _2d # array # _grad_ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 6))>;
+
+        if !not(is_array) then {
+          def int_nvvm_tex # mode # _3d_ # vec.Name # _s32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, !listsplat(llvm_i32_ty, 3))>;
+          def int_nvvm_tex # mode # _3d_ # vec.Name # _f32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, !listsplat(llvm_float_ty, 3))>;
+          def int_nvvm_tex # mode # _3d_level_ # vec.Name # _f32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, !listsplat(llvm_float_ty, 4))>;
+          def int_nvvm_tex # mode # _3d_grad_ # vec.Name # _f32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, !listsplat(llvm_float_ty, 9))>;
+        }
 
-      if is_unified then
-        def int_nvvm_tex # mode # _cube # array # _grad_ # vec.Name # _f32
+        def int_nvvm_tex # mode # _cube # array # _ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 3))>;
+        def int_nvvm_tex # mode # _cube # array # _level_ # vec.Name # _f32
           : Intrinsic<vec.Types,
-                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 9))>;
-    } // is_array
+                      !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 4))>;
 
-    foreach comp = ["r", "g", "b", "a"] in {
-      def int_nvvm_tld4 # mode # _ # comp # _2d_ # vec.Name # _f32
-        : Intrinsic<vec.Types,
-                    !listconcat(addr_args, !listsplat(llvm_float_ty, 2))>;
-    } // comp
-  } // vec
-} // is_unified
+        if is_unified then
+          def int_nvvm_tex # mode # _cube # array # _grad_ # vec.Name # _f32
+            : Intrinsic<vec.Types,
+                        !listconcat(addr_args, array_args, !listsplat(llvm_float_ty, 9))>;
+      } // is_array
+
+      foreach comp = ["r", "g", "b", "a"] in {
+        def int_nvvm_tld4 # mode # _ # comp # _2d_ # vec.Name # _f32
+          : Intrinsic<vec.Types,
+                      !listconcat(addr_args, !listsplat(llvm_float_ty, 2))>;
+      } // comp
+    } // vec
+  } // is_unified
+} // IntrProperties = [IntrReadMem]
 
 //=== Surface Load
-foreach clamp = ["clamp", "trap", "zero"] in {
-  foreach vec = [TV_I8, TV_I16, TV_I32, TV_I64,
-                 TV_V2I8, TV_V2I16, TV_V2I32, TV_V2I64,
-                 TV_V4I8, TV_V4I16, TV_V4I32] in {
+let IntrProperties = [IntrReadMem] in {
+  foreach clamp = ["clamp", "trap", "zero"] in {
+    foreach vec = [TV_I8, TV_I16, TV_I32, TV_I64,
+                  TV_V2I8, TV_V2I16, TV_V2I32, TV_V2I64,
+                  TV_V4I8, TV_V4I16, TV_V4I32] in {
 
-    def int_nvvm_suld_1d_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty]>;
+      def int_nvvm_suld_1d_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty]>;
 
-    def int_nvvm_suld_1d_array_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty]>;
+      def int_nvvm_suld_1d_array_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty]>;
 
-    def int_nvvm_suld_2d_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty]>;
+      def int_nvvm_suld_2d_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty]>;
 
-    def int_nvvm_suld_2d_array_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
+      def int_nvvm_suld_2d_array_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
 
-    def int_nvvm_suld_3d_ # vec.Name # _ # clamp
-      : Intrinsic<vec.Types,
-                  [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
-  } // vec
-} // clamp
+      def int_nvvm_suld_3d_ # vec.Name # _ # clamp
+        : Intrinsic<vec.Types,
+                    [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
+    } // vec
+  } // clamp
+} // IntrProperties = [IntrReadMem]
 
 //===- Texture Query ------------------------------------------------------===//
 

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants