nvfuser-cu121-torch25 0.2.25.dev20250201__cp310-cp310-manylinux_2_28_x86_64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (242) hide show
  1. nvfuser/_C.cpython-310-x86_64-linux-gnu.so +0 -0
  2. nvfuser/__init__.py +618 -0
  3. nvfuser/__init__.pyi +4 -0
  4. nvfuser/contrib/__init__.py +9 -0
  5. nvfuser/contrib/nn/__init__.py +13 -0
  6. nvfuser/contrib/nn/normalization.py +725 -0
  7. nvfuser/include/nvfuser/alias_analysis.h +116 -0
  8. nvfuser/include/nvfuser/bfs.h +929 -0
  9. nvfuser/include/nvfuser/codegen.h +26 -0
  10. nvfuser/include/nvfuser/compute_at.h +28 -0
  11. nvfuser/include/nvfuser/compute_at_map.h +394 -0
  12. nvfuser/include/nvfuser/contiguity.h +351 -0
  13. nvfuser/include/nvfuser/cuda_utils.h +50 -0
  14. nvfuser/include/nvfuser/debug.h +50 -0
  15. nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
  16. nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
  17. nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
  18. nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
  19. nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
  20. nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
  21. nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
  22. nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
  23. nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
  24. nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
  25. nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
  26. nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
  27. nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
  28. nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
  29. nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
  30. nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
  31. nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
  32. nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
  33. nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
  34. nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
  35. nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
  36. nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
  37. nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
  38. nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
  39. nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
  40. nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
  41. nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
  42. nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
  43. nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
  44. nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
  45. nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
  46. nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
  47. nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
  48. nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
  49. nvfuser/include/nvfuser/device_lower/utils.h +382 -0
  50. nvfuser/include/nvfuser/device_lower/validation.h +74 -0
  51. nvfuser/include/nvfuser/disjoint_set.h +556 -0
  52. nvfuser/include/nvfuser/dispatch.h +334 -0
  53. nvfuser/include/nvfuser/driver_api.h +49 -0
  54. nvfuser/include/nvfuser/dynamic_transform.h +316 -0
  55. nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
  56. nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
  57. nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
  58. nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
  59. nvfuser/include/nvfuser/evaluator_common.h +295 -0
  60. nvfuser/include/nvfuser/exceptions.h +283 -0
  61. nvfuser/include/nvfuser/expr_evaluator.h +125 -0
  62. nvfuser/include/nvfuser/expr_simplifier.h +218 -0
  63. nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
  64. nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
  65. nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
  66. nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
  67. nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
  68. nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
  69. nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
  70. nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
  71. nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
  72. nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
  73. nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
  74. nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
  75. nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
  76. nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
  77. nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
  78. nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
  79. nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
  80. nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
  81. nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
  82. nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
  83. nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
  84. nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
  85. nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
  86. nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
  87. nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
  88. nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
  89. nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
  90. nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
  91. nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
  92. nvfuser/include/nvfuser/fusion.h +511 -0
  93. nvfuser/include/nvfuser/fusion_guard.h +37 -0
  94. nvfuser/include/nvfuser/fusion_profiler.h +311 -0
  95. nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
  96. nvfuser/include/nvfuser/global_allocator.h +27 -0
  97. nvfuser/include/nvfuser/grouped_reduction.h +47 -0
  98. nvfuser/include/nvfuser/host_ir/container.h +60 -0
  99. nvfuser/include/nvfuser/host_ir/executor.h +152 -0
  100. nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
  101. nvfuser/include/nvfuser/host_ir/lower.h +35 -0
  102. nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
  103. nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
  104. nvfuser/include/nvfuser/id_model/id_model.h +359 -0
  105. nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
  106. nvfuser/include/nvfuser/id_model/indexing.h +208 -0
  107. nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
  108. nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
  109. nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
  110. nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
  111. nvfuser/include/nvfuser/id_model/schedule.h +54 -0
  112. nvfuser/include/nvfuser/id_model/to_string.h +87 -0
  113. nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
  114. nvfuser/include/nvfuser/id_model/utils.h +176 -0
  115. nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
  116. nvfuser/include/nvfuser/index_compute.h +651 -0
  117. nvfuser/include/nvfuser/instrumentation.h +107 -0
  118. nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
  119. nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
  120. nvfuser/include/nvfuser/ir/builder.h +215 -0
  121. nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
  122. nvfuser/include/nvfuser/ir/cloner.h +185 -0
  123. nvfuser/include/nvfuser/ir/container.h +226 -0
  124. nvfuser/include/nvfuser/ir/graphviz.h +119 -0
  125. nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
  126. nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
  127. nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
  128. nvfuser/include/nvfuser/ir/iostream.h +98 -0
  129. nvfuser/include/nvfuser/ir/printer.h +57 -0
  130. nvfuser/include/nvfuser/ir/utils.h +801 -0
  131. nvfuser/include/nvfuser/iter_visitor.h +661 -0
  132. nvfuser/include/nvfuser/kernel.h +299 -0
  133. nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
  134. nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
  135. nvfuser/include/nvfuser/kernel_ir.h +1457 -0
  136. nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
  137. nvfuser/include/nvfuser/linked_hash_map.h +97 -0
  138. nvfuser/include/nvfuser/logical_domain_map.h +577 -0
  139. nvfuser/include/nvfuser/macros.h +23 -0
  140. nvfuser/include/nvfuser/mma_type.h +257 -0
  141. nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
  142. nvfuser/include/nvfuser/multidevice/communication.h +232 -0
  143. nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
  144. nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
  145. nvfuser/include/nvfuser/multidevice/executor.h +107 -0
  146. nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
  147. nvfuser/include/nvfuser/multidevice/utils.h +187 -0
  148. nvfuser/include/nvfuser/non_divisible_split.h +86 -0
  149. nvfuser/include/nvfuser/opaque_type.h +129 -0
  150. nvfuser/include/nvfuser/ops/alias.h +192 -0
  151. nvfuser/include/nvfuser/ops/all_ops.h +13 -0
  152. nvfuser/include/nvfuser/ops/arith.h +712 -0
  153. nvfuser/include/nvfuser/ops/composite.h +130 -0
  154. nvfuser/include/nvfuser/ops/indexing.h +55 -0
  155. nvfuser/include/nvfuser/ops/normalization.h +263 -0
  156. nvfuser/include/nvfuser/ops/utils.h +127 -0
  157. nvfuser/include/nvfuser/options.h +313 -0
  158. nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
  159. nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
  160. nvfuser/include/nvfuser/polymorphic_value.h +432 -0
  161. nvfuser/include/nvfuser/predicate_compute.h +213 -0
  162. nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
  163. nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
  164. nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
  165. nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
  166. nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
  167. nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
  168. nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
  169. nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
  170. nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
  171. nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
  172. nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
  173. nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
  174. nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
  175. nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
  176. nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
  177. nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
  178. nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
  179. nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
  180. nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
  181. nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
  182. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
  183. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
  184. nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
  185. nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
  186. nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
  187. nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
  188. nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
  189. nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
  190. nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
  191. nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
  192. nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
  193. nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
  194. nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
  195. nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
  196. nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
  197. nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
  198. nvfuser/include/nvfuser/scheduler/registry.h +97 -0
  199. nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
  200. nvfuser/include/nvfuser/scheduler/resize.h +41 -0
  201. nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
  202. nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
  203. nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
  204. nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
  205. nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
  206. nvfuser/include/nvfuser/scheduler/utils.h +771 -0
  207. nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
  208. nvfuser/include/nvfuser/serde/factory.h +55 -0
  209. nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
  210. nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
  211. nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
  212. nvfuser/include/nvfuser/serde/utils.h +34 -0
  213. nvfuser/include/nvfuser/struct.inl +127 -0
  214. nvfuser/include/nvfuser/swizzle.h +54 -0
  215. nvfuser/include/nvfuser/sys_utils.h +40 -0
  216. nvfuser/include/nvfuser/tensor_metadata.h +118 -0
  217. nvfuser/include/nvfuser/tma.h +124 -0
  218. nvfuser/include/nvfuser/transform_iter.h +522 -0
  219. nvfuser/include/nvfuser/transform_replay.h +297 -0
  220. nvfuser/include/nvfuser/transform_rfactor.h +33 -0
  221. nvfuser/include/nvfuser/transform_view.h +136 -0
  222. nvfuser/include/nvfuser/type.h +1125 -0
  223. nvfuser/include/nvfuser/type_promotion.h +61 -0
  224. nvfuser/include/nvfuser/utils.h +619 -0
  225. nvfuser/include/nvfuser/val_graph.h +446 -0
  226. nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
  227. nvfuser/include/nvfuser/validator_utils.h +92 -0
  228. nvfuser/include/nvfuser/vectorization_info.h +31 -0
  229. nvfuser/include/nvfuser/visibility.h +21 -0
  230. nvfuser/lib/libnvfuser_codegen.so +0 -0
  231. nvfuser/nvfuser_version.py +69 -0
  232. nvfuser/pytorch_utils.py +184 -0
  233. nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
  234. nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
  235. nvfuser/utils.py +18 -0
  236. nvfuser/version.py +1 -0
  237. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
  238. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +20 -0
  239. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
  240. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
  241. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
  242. nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,712 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <exceptions.h>
11
+ #include <visibility.h>
12
+
13
+ #include <ir/base_nodes.h>
14
+ #include <ir/builder.h>
15
+ #include <ir/interface_nodes.h>
16
+ #include <type.h>
17
+ #include <type_promotion.h>
18
+
19
+ /*
20
+ * The operations defined in this header is intended as user facing functions.
21
+ * Generally users should not directly instantiate temporary TensorViews they
22
+ * should instead use the functions below which will automatically create IR
23
+ * nodes, and return a resulting TensorView of correctly tracked shapes.
24
+ */
25
+
26
+ namespace nvfuser {
27
+
28
+ // Insertion of casting op to dtype, returns new resulting val
29
+ NVF_API Val* castOp(DataType dtype, Val* v1);
30
+ NVF_API TensorView* castOp(DataType dtype, TensorView* v1);
31
+ // If v1 is not dtype, insert a cast op, otherwise return v1
32
+ NVF_API Val* maybeCastOp(DataType dtype, Val* v1);
33
+ NVF_API TensorView* maybeCastOp(DataType dtype, TensorView* v1);
34
+
35
+ NVF_API Val* bitCastOp(DataType dtype, Val* v1);
36
+ NVF_API TensorView* bitCastOp(DataType dtype, TensorView* v1);
37
+
38
+ // Perform unary op type and return the output
39
+ NVF_API Val* unaryOp(UnaryOpType type, Val* v1);
40
+ NVF_API TensorView* unaryOp(UnaryOpType type, TensorView* v1);
41
+ NVF_API Val* unaryIsOp(UnaryOpType type, Val* v1);
42
+ TensorView* unaryIsOp(UnaryOpType type, TensorView* v1);
43
+ NVF_API Val* unaryOp(
44
+ UnaryOpType type,
45
+ Val* v1,
46
+ const TypePromotionConfig& config);
47
+ NVF_API TensorView* unaryOp(
48
+ UnaryOpType type,
49
+ TensorView* v1,
50
+ const TypePromotionConfig& config);
51
+
52
+ // Perform binary op type on v1 and v2 and return a type promoted output.
53
+ // Mod, CeilDiv, and LT are considered Int only output operations for now.
54
+ NVF_API Val* binaryOp(
55
+ BinaryOpType type,
56
+ Val* v1,
57
+ Val* v2,
58
+ DataType out_dtype = DataType::Null);
59
+ NVF_API TensorView* binaryOp(
60
+ BinaryOpType type,
61
+ TensorView* v1,
62
+ Val* v2,
63
+ DataType out_dtype = DataType::Null);
64
+ NVF_API TensorView* binaryOp(
65
+ BinaryOpType type,
66
+ Val* v1,
67
+ TensorView* v2,
68
+ DataType out_dtype = DataType::Null);
69
+ NVF_API TensorView* binaryOp(
70
+ BinaryOpType type,
71
+ TensorView* v1,
72
+ TensorView* v2,
73
+ DataType out_dtype = DataType::Null);
74
+
75
+ NVF_API Val* binaryOp(
76
+ BinaryOpType type,
77
+ Val* v1,
78
+ Val* v2,
79
+ const TypePromotionConfig& config);
80
+ NVF_API TensorView* binaryOp(
81
+ BinaryOpType type,
82
+ TensorView* v1,
83
+ Val* v2,
84
+ const TypePromotionConfig& config);
85
+ NVF_API TensorView* binaryOp(
86
+ BinaryOpType type,
87
+ Val* v1,
88
+ TensorView* v2,
89
+ const TypePromotionConfig& config);
90
+ NVF_API TensorView* binaryOp(
91
+ BinaryOpType type,
92
+ TensorView* v1,
93
+ TensorView* v2,
94
+ const TypePromotionConfig& config);
95
+
96
+ // Return a new TensorView consistent with reducing `tv` on specified `axes`
97
+ NVF_API TensorView* newForReduction(
98
+ TensorView* tv,
99
+ const std::vector<unsigned int>& axes,
100
+ DataType data_type = DataType::Null);
101
+
102
+ // Perform a reduction operation on v1, initial value for reduction is init,
103
+ // reduces across axes, and reduction operation defined by BinaryOp. Reduction
104
+ // of size-1 dimension is automatically converted to squeeze.
105
+ NVF_API TensorView* reductionOp(
106
+ BinaryOpType reduction_op_type,
107
+ const std::vector<int64_t>& axes,
108
+ Val* init,
109
+ TensorView* v1,
110
+ bool keep_dim = false,
111
+ DataType dtype = DataType::Null);
112
+
113
+ // Just create a ReductionOp, don't try to simplify it. Don't convert size-1
114
+ // reduction into squeeze and don't convert size-0 reduction into full.
115
+ NVF_API TensorView* reductionOpRaw(
116
+ BinaryOpType reduction_op_type,
117
+ const std::vector<int64_t>& axes,
118
+ Val* init,
119
+ TensorView* v1,
120
+ bool keep_dim = false,
121
+ DataType dtype = DataType::Null);
122
+
123
+ //! Auxiliary Struct holding result of
124
+ //! a single welford op in ternsorview
125
+ class WelfordResult {
126
+ public:
127
+ TensorView* avg;
128
+ TensorView* var_sum;
129
+ TensorView* n;
130
+
131
+ explicit WelfordResult(
132
+ TensorView* in_avg,
133
+ TensorView* in_var_sum,
134
+ TensorView* in_n,
135
+ const bool check_definition = true);
136
+ };
137
+
138
+ //! Welford operator on specified axes. This is currently the only scan op with
139
+ //! multiple outputs that is supported. May consider generalization if more scan
140
+ //! ops are added.
141
+ NVF_API WelfordResult Welford(
142
+ TensorView* tv,
143
+ const std::vector<int64_t>& axes,
144
+ TensorView* init_avg = nullptr,
145
+ TensorView* init_var = nullptr,
146
+ // Initializes to 0 in function definition, doing this so we don't have to
147
+ // import IrBuilder just for this one interface.
148
+ Val* init_N = nullptr);
149
+
150
+ //! Create a raw WelfordOp. Don't convert size-1 or size-0 reduction into
151
+ //! squeeze/full.
152
+ WelfordResult WelfordRaw(
153
+ TensorView* tv,
154
+ const std::vector<int64_t>& axes,
155
+ TensorView* init_avg = nullptr,
156
+ TensorView* init_var = nullptr,
157
+ // Initializes to 0 in function definition, doing this so we don't have to
158
+ // import IrBuilder just for this one interface.
159
+ Val* init_N = nullptr);
160
+
161
+ // RNG OPERATIONS
162
+ NVF_API TensorView* rand(
163
+ const std::vector<Val*>& shape,
164
+ DataType dtype,
165
+ Val* philox_seed = nullptr,
166
+ Val* philox_offset = nullptr,
167
+ bool maybe_symbolic = true);
168
+ NVF_API TensorView* rand_like(
169
+ TensorView*,
170
+ Val* philox_seed,
171
+ Val* philox_offset);
172
+ // Note that overloading these would be convenient, but overloaded functions are
173
+ // difficult to cast correctly. In the serde method
174
+ // RecordFunctorFactory::setupFunctionMaps(), the op is cast to, for example
175
+ // nvfuser::Val* (*)(nvfuser::Val*). In order to avoid errors due to that
176
+ // static_cast, we just implement the unary and ternary versions of the random
177
+ // *_like operators as separate functions.
178
+ NVF_API Val* rand_like(Val*, Val* philox_seed, Val* philox_offset);
179
+ NVF_API TensorView* rand_like(TensorView* tv);
180
+ NVF_API Val* rand_like(Val* val);
181
+
182
+ NVF_API TensorView* randn(
183
+ const std::vector<Val*>& shape,
184
+ DataType dtype,
185
+ Val* philox_seed = nullptr,
186
+ Val* philox_offset = nullptr,
187
+ bool maybe_symbolic = true);
188
+ NVF_API TensorView* randn_like(
189
+ TensorView*,
190
+ Val* philox_seed,
191
+ Val* philox_offset);
192
+ NVF_API Val* randn_like(Val*, Val* philox_seed, Val* philox_offset);
193
+ NVF_API TensorView* randn_like(TensorView* tv);
194
+ NVF_API Val* randn_like(Val* val);
195
+
196
+ NVF_API TensorView* uniform(
197
+ const std::vector<Val*>& shape,
198
+ Val* low,
199
+ Val* high,
200
+ DataType dtype,
201
+ Val* philox_seed = nullptr,
202
+ Val* philox_offset = nullptr,
203
+ bool maybe_symbolic = true);
204
+ NVF_API TensorView* normal(
205
+ const std::vector<Val*>& shape,
206
+ Val* mean,
207
+ Val* std,
208
+ DataType dtype,
209
+ Val* philox_seed = nullptr,
210
+ Val* philox_offset = nullptr,
211
+ bool maybe_symbolic = true);
212
+
213
+ // TENSOR FACTORIES
214
+ NVF_API TensorView* full(
215
+ const std::vector<Val*>& shape,
216
+ Val* fill_value,
217
+ DataType dtype,
218
+ bool maybe_symbolic = true);
219
+ NVF_API TensorView* full_like(TensorView* tv, Val* fill_value, DataType dtype);
220
+ NVF_API TensorView* full_like(TensorView* tv, Val* fill_value);
221
+ Val* full_like(Val* tv, Val* fill_value);
222
+ NVF_API TensorView* zeros(
223
+ const std::vector<Val*>& shape,
224
+ DataType dtype,
225
+ bool maybe_symbolic = true);
226
+ NVF_API TensorView* zeros_like(TensorView*);
227
+ Val* zeros_like(Val*);
228
+ NVF_API TensorView* ones(
229
+ const std::vector<Val*>& shape,
230
+ DataType dtype,
231
+ bool maybe_symbolic = true);
232
+ NVF_API TensorView* ones_like(TensorView*);
233
+ Val* ones_like(Val*);
234
+ NVF_API TensorView* iota(
235
+ Val* length,
236
+ Val* start = nullptr,
237
+ Val* step = nullptr,
238
+ DataType dtype = DataType::Int);
239
+ //! WARNING: giving invalid combinations of the start, end and step
240
+ //! arguments can result in undefined behavior. Specifically, the
241
+ //! signs of `end - start` and step must be the same.
242
+ NVF_API TensorView* arange(Val* end, DataType dtype = DataType::Int);
243
+ NVF_API TensorView* arange(
244
+ Val* start,
245
+ Val* end,
246
+ DataType dtype = DataType::Int);
247
+ NVF_API TensorView* arange(
248
+ Val* start,
249
+ Val* end,
250
+ Val* step,
251
+ DataType dtype = DataType::Int);
252
+ NVF_API TensorView* eye(Val* size, DataType dtype);
253
+ NVF_API TensorView* eye(Val* rows, Val* cols, DataType dtype);
254
+
255
+ // UNARY OPERATIONS
256
+ // abs
257
+ NVF_API Val* abs(Val*);
258
+ NVF_API TensorView* abs(TensorView*);
259
+ // acos
260
+ NVF_API Val* acos(Val*);
261
+ NVF_API TensorView* acos(TensorView*);
262
+ // acosh
263
+ NVF_API Val* acosh(Val*);
264
+ NVF_API TensorView* acosh(TensorView*);
265
+ // asin
266
+ NVF_API Val* asin(Val*);
267
+ NVF_API TensorView* asin(TensorView*);
268
+ // asinh
269
+ NVF_API Val* asinh(Val*);
270
+ NVF_API TensorView* asinh(TensorView*);
271
+ // atan
272
+ NVF_API Val* atan(Val*);
273
+ NVF_API TensorView* atan(TensorView*);
274
+ // atanh
275
+ NVF_API Val* atanh(Val*);
276
+ NVF_API TensorView* atanh(TensorView*);
277
+ // ceil
278
+ NVF_API Val* ceil(Val*);
279
+ NVF_API TensorView* ceil(TensorView*);
280
+ // cos
281
+ NVF_API Val* cos(Val*);
282
+ NVF_API TensorView* cos(TensorView*);
283
+ // cosh
284
+ NVF_API Val* cosh(Val*);
285
+ NVF_API TensorView* cosh(TensorView*);
286
+ // exp
287
+ NVF_API Val* exp(Val*);
288
+ NVF_API TensorView* exp(TensorView*);
289
+ // exp2
290
+ NVF_API Val* exp2(Val*);
291
+ NVF_API TensorView* exp2(TensorView*);
292
+ // expm1
293
+ NVF_API Val* expm1(Val*);
294
+ NVF_API TensorView* expm1(TensorView*);
295
+ // erf
296
+ NVF_API Val* erf(Val*);
297
+ NVF_API TensorView* erf(TensorView*);
298
+ // erfc
299
+ NVF_API Val* erfc(Val*);
300
+ NVF_API TensorView* erfc(TensorView*);
301
+ // erfinv
302
+ NVF_API Val* erfinv(Val*);
303
+ NVF_API TensorView* erfinv(TensorView*);
304
+ // erfcinv
305
+ NVF_API Val* erfcinv(Val*);
306
+ NVF_API TensorView* erfcinv(TensorView*);
307
+ // floor
308
+ NVF_API Val* floor(Val*);
309
+ NVF_API TensorView* floor(TensorView*);
310
+ // frac
311
+ NVF_API Val* frac(Val*);
312
+ NVF_API TensorView* frac(TensorView*);
313
+ // silu
314
+ NVF_API Val* silu(Val*);
315
+ NVF_API TensorView* silu(TensorView*);
316
+ // lgamma
317
+ NVF_API Val* lgamma(Val*);
318
+ NVF_API TensorView* lgamma(TensorView*);
319
+ // log
320
+ NVF_API Val* log(Val*);
321
+ NVF_API TensorView* log(TensorView*);
322
+ // log10
323
+ NVF_API Val* log10(Val*);
324
+ NVF_API TensorView* log10(TensorView*);
325
+ // log1p
326
+ NVF_API Val* log1p(Val*);
327
+ NVF_API TensorView* log1p(TensorView*);
328
+ // log2
329
+ NVF_API Val* log2(Val*);
330
+ NVF_API TensorView* log2(TensorView*);
331
+ // neg
332
+ NVF_API Val* neg(Val*);
333
+ NVF_API TensorView* neg(TensorView*);
334
+ // logical_not
335
+ NVF_API Val* logical_not(Val*);
336
+ NVF_API TensorView* logical_not(TensorView*);
337
+ // bitwise_not
338
+ NVF_API Val* bitwise_not(Val*);
339
+ NVF_API TensorView* bitwise_not(TensorView*);
340
+ // real
341
+ NVF_API Val* real(Val*);
342
+ NVF_API TensorView* real(TensorView*);
343
+ // reciprocal
344
+ NVF_API Val* reciprocal(Val*);
345
+ NVF_API TensorView* reciprocal(TensorView*);
346
+ // relu
347
+ NVF_API Val* relu(Val*);
348
+ NVF_API TensorView* relu(TensorView*);
349
+ // rsqrt
350
+ NVF_API Val* rsqrt(Val*);
351
+ NVF_API TensorView* rsqrt(TensorView*);
352
+ // round
353
+ NVF_API Val* round(Val*);
354
+ NVF_API TensorView* round(TensorView*);
355
+ // sigmoid
356
+ NVF_API Val* sigmoid(Val*);
357
+ NVF_API TensorView* sigmoid(TensorView*);
358
+ // signbit
359
+ NVF_API Val* signbit(Val*);
360
+ NVF_API TensorView* signbit(TensorView*);
361
+ // sin
362
+ NVF_API Val* sin(Val*);
363
+ NVF_API TensorView* sin(TensorView*);
364
+ // sinh
365
+ NVF_API Val* sinh(Val*);
366
+ NVF_API TensorView* sinh(TensorView*);
367
+ // sqrt
368
+ NVF_API Val* sqrt(Val*);
369
+ NVF_API TensorView* sqrt(TensorView*);
370
+ // tan
371
+ NVF_API Val* tan(Val*);
372
+ NVF_API TensorView* tan(TensorView*);
373
+ // tanh
374
+ NVF_API Val* tanh(Val*);
375
+ NVF_API TensorView* tanh(TensorView*);
376
+ // trunc
377
+ NVF_API Val* trunc(Val*);
378
+ NVF_API TensorView* trunc(TensorView*);
379
+ // bitwise_not
380
+ NVF_API Val* bitwise_not(Val*);
381
+ NVF_API TensorView* bitwise_not(TensorView*);
382
+ // imag
383
+ NVF_API Val* imag(Val*);
384
+ NVF_API TensorView* imag(TensorView*);
385
+ // isfinite
386
+ NVF_API Val* isfinite(Val*);
387
+ NVF_API TensorView* isfinite(TensorView*);
388
+ // isinf
389
+ NVF_API Val* isinf(Val*);
390
+ NVF_API TensorView* isinf(TensorView*);
391
+ // isnan
392
+ NVF_API Val* isnan(Val*);
393
+ NVF_API TensorView* isnan(TensorView*);
394
+ // isneginf
395
+ NVF_API Val* isneginf(Val*);
396
+ NVF_API TensorView* isneginf(TensorView*);
397
+ // isposinf
398
+ NVF_API Val* isposinf(Val*);
399
+ NVF_API TensorView* isposinf(TensorView*);
400
+ // isreal
401
+ NVF_API Val* isreal(Val*);
402
+ NVF_API TensorView* isreal(TensorView*);
403
+ // print
404
+ NVF_API Val* print(Val*);
405
+ NVF_API TensorView* print(TensorView*);
406
+
407
+ // This is a function used to give the symbolic shape of a tensor for use
408
+ // with functions like broadcast_in_dim that take a shape vector
409
+ // to use to expand an input tensor
410
+ NVF_API std::vector<Val*> shape(TensorView* inp);
411
+ // Get the symbolic size of a specific dimension of a tensor
412
+ NVF_API Val* size(TensorView* inp, int64_t dim);
413
+ NVF_API Val* at(const std::vector<Val*>& inp, int64_t index);
414
+
415
+ // BINARY OPERATIONS
416
+ // add
417
+ NVF_API Val* add(Val* v1, Val* v2);
418
+ NVF_API TensorView* add(TensorView* v1, Val* v2);
419
+ NVF_API TensorView* add(Val* v1, TensorView* v2);
420
+ NVF_API TensorView* add(TensorView* v1, TensorView* v2);
421
+ // atan2
422
+ NVF_API Val* atan2(Val* v1, Val* v2);
423
+ NVF_API TensorView* atan2(TensorView* v1, Val* v2);
424
+ NVF_API TensorView* atan2(Val* v1, TensorView* v2);
425
+ NVF_API TensorView* atan2(TensorView* v1, TensorView* v2);
426
+ // truediv: promote to float for integer division, has the same semantics as the
427
+ // python's operator /
428
+ NVF_API Val* truediv(Val* v1, Val* v2);
429
+ NVF_API TensorView* truediv(TensorView* v1, Val* v2);
430
+ NVF_API TensorView* truediv(Val* v1, TensorView* v2);
431
+ NVF_API TensorView* truediv(TensorView* v1, TensorView* v2);
432
+ // div: don't promote to float, instead, truncate the result, this has the same
433
+ // semantics as the C++'s operator /
434
+ NVF_API Val* div(Val* v1, Val* v2);
435
+ NVF_API TensorView* div(TensorView* v1, Val* v2);
436
+ NVF_API TensorView* div(Val* v1, TensorView* v2);
437
+ NVF_API TensorView* div(TensorView* v1, TensorView* v2);
438
+ // fmod
439
+ NVF_API Val* fmod(Val* v1, Val* v2);
440
+ NVF_API TensorView* fmod(TensorView* v1, Val* v2);
441
+ NVF_API TensorView* fmod(Val* v1, TensorView* v2);
442
+ NVF_API TensorView* fmod(TensorView* v1, TensorView* v2);
443
+ // mul
444
+ NVF_API Val* mul(Val* v1, Val* v2);
445
+ NVF_API TensorView* mul(TensorView* v1, Val* v2);
446
+ NVF_API TensorView* mul(Val* v1, TensorView* v2);
447
+ NVF_API TensorView* mul(TensorView* v1, TensorView* v2);
448
+ // pow
449
+ NVF_API Val* pow(Val* v1, Val* v2);
450
+ NVF_API TensorView* pow(TensorView* v1, Val* v2);
451
+ NVF_API TensorView* pow(Val* v1, TensorView* v2);
452
+ NVF_API TensorView* pow(TensorView* v1, TensorView* v2);
453
+ // remainder
454
+ NVF_API Val* remainder(Val* v1, Val* v2);
455
+ NVF_API TensorView* remainder(TensorView* v1, Val* v2);
456
+ NVF_API TensorView* remainder(Val* v1, TensorView* v2);
457
+ NVF_API TensorView* remainder(TensorView* v1, TensorView* v2);
458
+ // sub
459
+ NVF_API Val* sub(Val* v1, Val* v2);
460
+ NVF_API TensorView* sub(TensorView* v1, Val* v2);
461
+ NVF_API TensorView* sub(Val* v1, TensorView* v2);
462
+ NVF_API TensorView* sub(TensorView* v1, TensorView* v2);
463
+ // maximum
464
+ NVF_API Val* maximum(Val* v1, Val* v2);
465
+ NVF_API TensorView* maximum(TensorView* v1, Val* v2);
466
+ NVF_API TensorView* maximum(Val* v1, TensorView* v2);
467
+ NVF_API TensorView* maximum(TensorView* v1, TensorView* v2);
468
+ // minimum
469
+ NVF_API Val* minimum(Val* v1, Val* v2);
470
+ NVF_API TensorView* minimum(TensorView* v1, Val* v2);
471
+ NVF_API TensorView* minimum(Val* v1, TensorView* v2);
472
+ NVF_API TensorView* minimum(TensorView* v1, TensorView* v2);
473
+ // nextafter: Only single- or double-precision
474
+ // floating point types (after promotion) are supported.
475
+ NVF_API Val* nextafter(Val* v1, Val* v2);
476
+ NVF_API TensorView* nextafter(TensorView* v1, Val* v2);
477
+ NVF_API TensorView* nextafter(Val* v1, TensorView* v2);
478
+ NVF_API TensorView* nextafter(TensorView* v1, TensorView* v2);
479
+ // Integer binary ops
480
+ // mod
481
+ NVF_API Val* mod(Val* v1, Val* v2);
482
+ NVF_API TensorView* mod(TensorView* v1, Val* v2);
483
+ NVF_API TensorView* mod(Val* v1, TensorView* v2);
484
+ NVF_API TensorView* mod(TensorView* v1, TensorView* v2);
485
+ // ceilDiv
486
+ NVF_API Val* ceilDiv(Val* v1, Val* v2);
487
+ TensorView* ceilDiv(TensorView* v1, Val* v2);
488
+ TensorView* ceilDiv(Val* v1, TensorView* v2);
489
+ TensorView* ceilDiv(TensorView* v1, TensorView* v2);
490
+ // Bitwise and logical binary ops
491
+ // bitwise_and
492
+ NVF_API Val* bitwise_and(Val* v1, Val* v2);
493
+ NVF_API TensorView* bitwise_and(TensorView* v1, Val* v2);
494
+ NVF_API TensorView* bitwise_and(Val* v1, TensorView* v2);
495
+ NVF_API TensorView* bitwise_and(TensorView* v1, TensorView* v2);
496
+ // logical_and
497
+ NVF_API Val* logical_and(Val* v1, Val* v2);
498
+ NVF_API TensorView* logical_and(TensorView* v1, Val* v2);
499
+ NVF_API TensorView* logical_and(Val* v1, TensorView* v2);
500
+ NVF_API TensorView* logical_and(TensorView* v1, TensorView* v2);
501
+ // bitwise_left_shift
502
+ NVF_API Val* bitwise_left_shift(Val* v1, Val* v2);
503
+ NVF_API TensorView* bitwise_left_shift(TensorView* v1, Val* v2);
504
+ NVF_API TensorView* bitwise_left_shift(Val* v1, TensorView* v2);
505
+ NVF_API TensorView* bitwise_left_shift(TensorView* v1, TensorView* v2);
506
+ // bitwise_right_shift
507
+ NVF_API Val* bitwise_right_shift(Val* v1, Val* v2);
508
+ NVF_API TensorView* bitwise_right_shift(TensorView* v1, Val* v2);
509
+ NVF_API TensorView* bitwise_right_shift(Val* v1, TensorView* v2);
510
+ NVF_API TensorView* bitwise_right_shift(TensorView* v1, TensorView* v2);
511
+ // logical_right_shift
512
+ NVF_API TensorView* logical_right_shift(TensorView* x, TensorView* shift);
513
+ NVF_API TensorView* logical_right_shift(TensorView* x, Val* shift);
514
+ NVF_API TensorView* logical_right_shift(Val* x, TensorView* shift);
515
+ NVF_API Val* logical_right_shift(Val* x, Val* shift);
516
+ // bitwise_or
517
+ NVF_API Val* bitwise_or(Val* v1, Val* v2);
518
+ NVF_API TensorView* bitwise_or(TensorView* v1, Val* v2);
519
+ NVF_API TensorView* bitwise_or(Val* v1, TensorView* v2);
520
+ NVF_API TensorView* bitwise_or(TensorView* v1, TensorView* v2);
521
+ // logical_or
522
+ NVF_API Val* logical_or(Val* v1, Val* v2);
523
+ NVF_API TensorView* logical_or(TensorView* v1, Val* v2);
524
+ NVF_API TensorView* logical_or(Val* v1, TensorView* v2);
525
+ NVF_API TensorView* logical_or(TensorView* v1, TensorView* v2);
526
+ // bitwise_xor
527
+ NVF_API Val* bitwise_xor(Val* v1, Val* v2);
528
+ NVF_API TensorView* bitwise_xor(TensorView* v1, Val* v2);
529
+ NVF_API TensorView* bitwise_xor(Val* v1, TensorView* v2);
530
+ NVF_API TensorView* bitwise_xor(TensorView* v1, TensorView* v2);
531
+ // gcd
532
+ NVF_API Val* gcd(Val* v1, Val* v2);
533
+ NVF_API TensorView* gcd(TensorView* v1, Val* v2);
534
+ NVF_API TensorView* gcd(Val* v1, TensorView* v2);
535
+ NVF_API TensorView* gcd(TensorView* v1, TensorView* v2);
536
+ // Logical binary ops
537
+ // eq
538
+ NVF_API Val* eq(Val* v1, Val* v2);
539
+ NVF_API TensorView* eq(TensorView* v1, Val* v2);
540
+ NVF_API TensorView* eq(Val* v1, TensorView* v2);
541
+ NVF_API TensorView* eq(TensorView* v1, TensorView* v2);
542
+ // ge
543
+ NVF_API Val* ge(Val* v1, Val* v2);
544
+ NVF_API TensorView* ge(TensorView* v1, Val* v2);
545
+ NVF_API TensorView* ge(Val* v1, TensorView* v2);
546
+ NVF_API TensorView* ge(TensorView* v1, TensorView* v2);
547
+ // gt
548
+ NVF_API Val* gt(Val* v1, Val* v2);
549
+ NVF_API TensorView* gt(TensorView* v1, Val* v2);
550
+ NVF_API TensorView* gt(Val* v1, TensorView* v2);
551
+ NVF_API TensorView* gt(TensorView* v1, TensorView* v2);
552
+ // le
553
+ NVF_API Val* le(Val* v1, Val* v2);
554
+ NVF_API TensorView* le(TensorView* v1, Val* v2);
555
+ NVF_API TensorView* le(Val* v1, TensorView* v2);
556
+ NVF_API TensorView* le(TensorView* v1, TensorView* v2);
557
+ // lt
558
+ NVF_API Val* lt(Val* v1, Val* v2);
559
+ NVF_API NVF_API TensorView* lt(TensorView* v1, Val* v2);
560
+ NVF_API TensorView* lt(Val* v1, TensorView* v2);
561
+ NVF_API TensorView* lt(TensorView* v1, TensorView* v2);
562
+ // ne
563
+ NVF_API Val* ne(Val* v1, Val* v2);
564
+ NVF_API TensorView* ne(TensorView* v1, Val* v2);
565
+ NVF_API TensorView* ne(Val* v1, TensorView* v2);
566
+ NVF_API TensorView* ne(TensorView* v1, TensorView* v2);
567
+
568
+ // complex
569
+ Val* complex(Val* v1, Val* v2);
570
+ TensorView* complex(TensorView* v1, Val* v2);
571
+ TensorView* complex(Val* v1, TensorView* v2);
572
+ TensorView* complex(TensorView* v1, TensorView* v2);
573
+
574
+ // REDUCTION OPERATIONS
575
+ NVF_API TensorView* sum(
576
+ TensorView* v1,
577
+ const std::vector<int64_t>& reduction_axes,
578
+ bool keep_dim = false,
579
+ DataType dtype = DataType::Null);
580
+
581
+ NVF_API TensorView* prod(
582
+ TensorView* v1,
583
+ const std::vector<int64_t>& reduction_axes,
584
+ bool keep_dim = false,
585
+ DataType dtype = DataType::Null);
586
+
587
+ NVF_API TensorView* max(
588
+ TensorView* v1,
589
+ const std::vector<int64_t>& reduction_axes,
590
+ bool keep_dim = false,
591
+ DataType dtype = DataType::Null);
592
+
593
+ NVF_API TensorView* min(
594
+ TensorView* v1,
595
+ const std::vector<int64_t>& reduction_axes,
596
+ bool keep_dim = false,
597
+ DataType dtype = DataType::Null);
598
+
599
+ // COMPOUND OPERATIONS
600
+ // add_alpha
601
+ NVF_API Val* add_alpha(Val* v1, Val* v2, Val* s);
602
+ NVF_API TensorView* add_alpha(TensorView* v1, Val* v2, Val* s);
603
+ NVF_API TensorView* add_alpha(Val* v1, TensorView* v2, Val* s);
604
+ NVF_API TensorView* add_alpha(TensorView* v1, TensorView* v2, Val* s);
605
+ // sub_alpha
606
+ NVF_API Val* sub_alpha(Val* v1, Val* v2, Val* s);
607
+ NVF_API TensorView* sub_alpha(TensorView* v1, Val* v2, Val* s);
608
+ NVF_API TensorView* sub_alpha(Val* v1, TensorView* v2, Val* s);
609
+ NVF_API TensorView* sub_alpha(TensorView* v1, TensorView* v2, Val* s);
610
+ // lerp
611
+ NVF_API Val* lerp(Val* start, Val* end, Val* weight);
612
+ NVF_API TensorView* lerp(TensorView* start, Val* end, Val* weight);
613
+ NVF_API TensorView* lerp(Val* start, TensorView* end, Val* weight);
614
+ NVF_API TensorView* lerp(Val* start, Val* end, TensorView* weight);
615
+ NVF_API TensorView* lerp(TensorView* start, TensorView* end, Val* weight);
616
+ NVF_API TensorView* lerp(TensorView* start, Val* end, TensorView* weight);
617
+ NVF_API TensorView* lerp(Val* start, TensorView* end, TensorView* weight);
618
+ NVF_API TensorView* lerp(
619
+ TensorView* start,
620
+ TensorView* end,
621
+ TensorView* weight);
622
+
623
+ // addcmul
624
+ NVF_API Val* addcmul(Val* v1, Val* v2, Val* v3, Val* s);
625
+ NVF_API TensorView* addcmul(TensorView* v1, Val* v2, Val* v3, Val* s);
626
+ NVF_API TensorView* addcmul(Val* v1, TensorView* v2, Val* v3, Val* s);
627
+ NVF_API TensorView* addcmul(Val* v1, Val* v2, TensorView* v3, Val* s);
628
+ NVF_API TensorView* addcmul(TensorView* v1, TensorView* v2, Val* v3, Val* s);
629
+ NVF_API TensorView* addcmul(TensorView* v1, Val* v2, TensorView* v3, Val* s);
630
+ NVF_API TensorView* addcmul(Val* v1, TensorView* v2, TensorView* v3, Val* s);
631
+ NVF_API TensorView* addcmul(
632
+ TensorView* v1,
633
+ TensorView* v2,
634
+ TensorView* v3,
635
+ Val* s);
636
+
637
+ // TERNARY OPERATIONS
638
+ // where
639
+ NVF_API Val* where(Val* c, Val* v1, Val* v2);
640
+ NVF_API TensorView* where(TensorView* c, Val* v1, Val* v2);
641
+ NVF_API TensorView* where(Val* c, TensorView* v1, Val* v2);
642
+ NVF_API TensorView* where(Val* c, Val* v1, TensorView* v2);
643
+ NVF_API TensorView* where(TensorView* c, TensorView* v1, Val* v2);
644
+ NVF_API TensorView* where(TensorView* c, Val* v1, TensorView* v2);
645
+ NVF_API TensorView* where(Val* c, TensorView* v1, TensorView* v2);
646
+ NVF_API TensorView* where(TensorView* c, TensorView* v1, TensorView* v2);
647
+ // threshold
648
+ NVF_API Val* threshold(Val* in, Val* thresh, Val* value);
649
+ NVF_API TensorView* threshold(TensorView* in, Val* thresh, Val* value);
650
+ // clamp
651
+ NVF_API Val* clamp(Val* in, Val* min_val, Val* max_val);
652
+ NVF_API TensorView* clamp(TensorView* in, Val* min_val, Val* max_val);
653
+
654
+ //! Internal operator for supporting backward graphs
655
+ //!
656
+ //! example:
657
+ //! v1 = T1 [I0(10),I1(20),I2(30),I3(40)]
658
+ //! v2 = sum_to(v1,{30,1}) ------> v2 = T2[I2,R3 (keep_dim)]
659
+ //!
660
+ //! This operator will return v1* directly if sizes of v1 root domain
661
+ //! is already the same as shape.
662
+ //!
663
+ //! Name of sum_to is different from NV fuser naming,
664
+ //! this is to align with the operator name of at::sum_to.
665
+
666
+ NVF_API TensorView* sum_to(
667
+ TensorView* v1,
668
+ const std::vector<Val*>& sum_to_size);
669
+
670
+ NVF_API TensorView* sum_to(
671
+ TensorView* v1,
672
+ const std::vector<int64_t>& sum_to_size);
673
+
674
+ // Append a new IterDomain to the end of a TenorView to allow
675
+ // iterating on a vector type. The input tensor must have
676
+ // vector dtype.
677
+ TensorView* viewAsScalar(TensorView* inp);
678
+
679
+ //! A fused pointwise multiply and sum
680
+ //! operator that instantiates the following
681
+ //! fused pattern:
682
+ //! c = mul(tv_a, tv_b);
683
+ //! return sum(c, axes)
684
+ //!
685
+ //! \param tv_a first multiply operand
686
+ //! \param tv_b second multiply operand
687
+ //! \param axes axes to sum over, relative to output loop domain
688
+ //! \param init sum initial value
689
+ //! \param axis_mapping_opt mapping from output axes to operand axes
690
+ //!
691
+ //! Note & TODO:
692
+ //! currently only support lowering to a mma op
693
+ //! through this interface and only support fp16 inputs.
694
+ //! will support converting back to multiply and reduce in
695
+ //! a follow up.
696
+ NVF_API TensorView* fusedMultiplySum(
697
+ TensorView* tv_a,
698
+ TensorView* tv_b,
699
+ const std::vector<int64_t>& axes,
700
+ Val* init = nullptr,
701
+ const std::optional<MmaOp::AxisMapping>& axis_mapping_opt = std::nullopt);
702
+
703
+ // Create a tensor view from the given value. The given value can be a single
704
+ // scalar, an array of scalars, or a nested array of scalars.
705
+ NVF_API TensorView* tensor(Val* val);
706
+
707
+ template <typename T>
708
+ NVF_API TensorView* tensor(const std::vector<T>& vals) {
709
+ return tensor(IrBuilder::arrayExpr(vals));
710
+ }
711
+
712
+ } // namespace nvfuser