nvfuser-cu121-torch25 0.2.25.dev20250201__cp310-cp310-manylinux_2_28_x86_64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (242) hide show
  1. nvfuser/_C.cpython-310-x86_64-linux-gnu.so +0 -0
  2. nvfuser/__init__.py +618 -0
  3. nvfuser/__init__.pyi +4 -0
  4. nvfuser/contrib/__init__.py +9 -0
  5. nvfuser/contrib/nn/__init__.py +13 -0
  6. nvfuser/contrib/nn/normalization.py +725 -0
  7. nvfuser/include/nvfuser/alias_analysis.h +116 -0
  8. nvfuser/include/nvfuser/bfs.h +929 -0
  9. nvfuser/include/nvfuser/codegen.h +26 -0
  10. nvfuser/include/nvfuser/compute_at.h +28 -0
  11. nvfuser/include/nvfuser/compute_at_map.h +394 -0
  12. nvfuser/include/nvfuser/contiguity.h +351 -0
  13. nvfuser/include/nvfuser/cuda_utils.h +50 -0
  14. nvfuser/include/nvfuser/debug.h +50 -0
  15. nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
  16. nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
  17. nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
  18. nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
  19. nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
  20. nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
  21. nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
  22. nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
  23. nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
  24. nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
  25. nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
  26. nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
  27. nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
  28. nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
  29. nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
  30. nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
  31. nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
  32. nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
  33. nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
  34. nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
  35. nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
  36. nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
  37. nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
  38. nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
  39. nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
  40. nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
  41. nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
  42. nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
  43. nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
  44. nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
  45. nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
  46. nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
  47. nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
  48. nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
  49. nvfuser/include/nvfuser/device_lower/utils.h +382 -0
  50. nvfuser/include/nvfuser/device_lower/validation.h +74 -0
  51. nvfuser/include/nvfuser/disjoint_set.h +556 -0
  52. nvfuser/include/nvfuser/dispatch.h +334 -0
  53. nvfuser/include/nvfuser/driver_api.h +49 -0
  54. nvfuser/include/nvfuser/dynamic_transform.h +316 -0
  55. nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
  56. nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
  57. nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
  58. nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
  59. nvfuser/include/nvfuser/evaluator_common.h +295 -0
  60. nvfuser/include/nvfuser/exceptions.h +283 -0
  61. nvfuser/include/nvfuser/expr_evaluator.h +125 -0
  62. nvfuser/include/nvfuser/expr_simplifier.h +218 -0
  63. nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
  64. nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
  65. nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
  66. nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
  67. nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
  68. nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
  69. nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
  70. nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
  71. nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
  72. nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
  73. nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
  74. nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
  75. nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
  76. nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
  77. nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
  78. nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
  79. nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
  80. nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
  81. nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
  82. nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
  83. nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
  84. nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
  85. nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
  86. nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
  87. nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
  88. nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
  89. nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
  90. nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
  91. nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
  92. nvfuser/include/nvfuser/fusion.h +511 -0
  93. nvfuser/include/nvfuser/fusion_guard.h +37 -0
  94. nvfuser/include/nvfuser/fusion_profiler.h +311 -0
  95. nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
  96. nvfuser/include/nvfuser/global_allocator.h +27 -0
  97. nvfuser/include/nvfuser/grouped_reduction.h +47 -0
  98. nvfuser/include/nvfuser/host_ir/container.h +60 -0
  99. nvfuser/include/nvfuser/host_ir/executor.h +152 -0
  100. nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
  101. nvfuser/include/nvfuser/host_ir/lower.h +35 -0
  102. nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
  103. nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
  104. nvfuser/include/nvfuser/id_model/id_model.h +359 -0
  105. nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
  106. nvfuser/include/nvfuser/id_model/indexing.h +208 -0
  107. nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
  108. nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
  109. nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
  110. nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
  111. nvfuser/include/nvfuser/id_model/schedule.h +54 -0
  112. nvfuser/include/nvfuser/id_model/to_string.h +87 -0
  113. nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
  114. nvfuser/include/nvfuser/id_model/utils.h +176 -0
  115. nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
  116. nvfuser/include/nvfuser/index_compute.h +651 -0
  117. nvfuser/include/nvfuser/instrumentation.h +107 -0
  118. nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
  119. nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
  120. nvfuser/include/nvfuser/ir/builder.h +215 -0
  121. nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
  122. nvfuser/include/nvfuser/ir/cloner.h +185 -0
  123. nvfuser/include/nvfuser/ir/container.h +226 -0
  124. nvfuser/include/nvfuser/ir/graphviz.h +119 -0
  125. nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
  126. nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
  127. nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
  128. nvfuser/include/nvfuser/ir/iostream.h +98 -0
  129. nvfuser/include/nvfuser/ir/printer.h +57 -0
  130. nvfuser/include/nvfuser/ir/utils.h +801 -0
  131. nvfuser/include/nvfuser/iter_visitor.h +661 -0
  132. nvfuser/include/nvfuser/kernel.h +299 -0
  133. nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
  134. nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
  135. nvfuser/include/nvfuser/kernel_ir.h +1457 -0
  136. nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
  137. nvfuser/include/nvfuser/linked_hash_map.h +97 -0
  138. nvfuser/include/nvfuser/logical_domain_map.h +577 -0
  139. nvfuser/include/nvfuser/macros.h +23 -0
  140. nvfuser/include/nvfuser/mma_type.h +257 -0
  141. nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
  142. nvfuser/include/nvfuser/multidevice/communication.h +232 -0
  143. nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
  144. nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
  145. nvfuser/include/nvfuser/multidevice/executor.h +107 -0
  146. nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
  147. nvfuser/include/nvfuser/multidevice/utils.h +187 -0
  148. nvfuser/include/nvfuser/non_divisible_split.h +86 -0
  149. nvfuser/include/nvfuser/opaque_type.h +129 -0
  150. nvfuser/include/nvfuser/ops/alias.h +192 -0
  151. nvfuser/include/nvfuser/ops/all_ops.h +13 -0
  152. nvfuser/include/nvfuser/ops/arith.h +712 -0
  153. nvfuser/include/nvfuser/ops/composite.h +130 -0
  154. nvfuser/include/nvfuser/ops/indexing.h +55 -0
  155. nvfuser/include/nvfuser/ops/normalization.h +263 -0
  156. nvfuser/include/nvfuser/ops/utils.h +127 -0
  157. nvfuser/include/nvfuser/options.h +313 -0
  158. nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
  159. nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
  160. nvfuser/include/nvfuser/polymorphic_value.h +432 -0
  161. nvfuser/include/nvfuser/predicate_compute.h +213 -0
  162. nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
  163. nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
  164. nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
  165. nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
  166. nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
  167. nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
  168. nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
  169. nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
  170. nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
  171. nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
  172. nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
  173. nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
  174. nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
  175. nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
  176. nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
  177. nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
  178. nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
  179. nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
  180. nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
  181. nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
  182. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
  183. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
  184. nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
  185. nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
  186. nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
  187. nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
  188. nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
  189. nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
  190. nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
  191. nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
  192. nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
  193. nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
  194. nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
  195. nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
  196. nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
  197. nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
  198. nvfuser/include/nvfuser/scheduler/registry.h +97 -0
  199. nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
  200. nvfuser/include/nvfuser/scheduler/resize.h +41 -0
  201. nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
  202. nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
  203. nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
  204. nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
  205. nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
  206. nvfuser/include/nvfuser/scheduler/utils.h +771 -0
  207. nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
  208. nvfuser/include/nvfuser/serde/factory.h +55 -0
  209. nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
  210. nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
  211. nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
  212. nvfuser/include/nvfuser/serde/utils.h +34 -0
  213. nvfuser/include/nvfuser/struct.inl +127 -0
  214. nvfuser/include/nvfuser/swizzle.h +54 -0
  215. nvfuser/include/nvfuser/sys_utils.h +40 -0
  216. nvfuser/include/nvfuser/tensor_metadata.h +118 -0
  217. nvfuser/include/nvfuser/tma.h +124 -0
  218. nvfuser/include/nvfuser/transform_iter.h +522 -0
  219. nvfuser/include/nvfuser/transform_replay.h +297 -0
  220. nvfuser/include/nvfuser/transform_rfactor.h +33 -0
  221. nvfuser/include/nvfuser/transform_view.h +136 -0
  222. nvfuser/include/nvfuser/type.h +1125 -0
  223. nvfuser/include/nvfuser/type_promotion.h +61 -0
  224. nvfuser/include/nvfuser/utils.h +619 -0
  225. nvfuser/include/nvfuser/val_graph.h +446 -0
  226. nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
  227. nvfuser/include/nvfuser/validator_utils.h +92 -0
  228. nvfuser/include/nvfuser/vectorization_info.h +31 -0
  229. nvfuser/include/nvfuser/visibility.h +21 -0
  230. nvfuser/lib/libnvfuser_codegen.so +0 -0
  231. nvfuser/nvfuser_version.py +69 -0
  232. nvfuser/pytorch_utils.py +184 -0
  233. nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
  234. nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
  235. nvfuser/utils.py +18 -0
  236. nvfuser/version.py +1 -0
  237. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
  238. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +20 -0
  239. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
  240. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
  241. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
  242. nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,382 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+
9
+ #pragma once
10
+
11
+ #include <exceptions.h>
12
+ #include <visibility.h>
13
+
14
+ #include <compute_at_map.h>
15
+ #include <ir/all_nodes.h>
16
+ #include <kernel_ir.h>
17
+ #include <parallel_type_bitmap.h>
18
+ #include <val_graph.h>
19
+
20
+ #include <bitset>
21
+ #include <map>
22
+
23
+ // Provides utilities for dealing with nested ForLoop and IfThenElse scopes
24
+
25
+ namespace nvfuser {
26
+
27
+ class ThreadPredicateMap;
28
+
29
+ using IterDomainMap = std::unordered_map<IterDomain*, IterDomain*>;
30
+
31
+ namespace scope_utils {
32
+
33
+ //! Create an **empty** Forloop and copy the metadata.
34
+ ForLoop* cloneForLoop(ForLoop* for_loop);
35
+
36
+ //! Create an **empty** IfThenElse and copy the metadata.
37
+ kir::IfThenElse* cloneIfThenElse(kir::IfThenElse* ite);
38
+
39
+ } // namespace scope_utils
40
+
41
+ namespace ir_utils {
42
+
43
+ // Somtimes we want to temporarily view a tensorview with another tensordomain.
44
+ // This isn't a permanent transformation, but in indexing we want to index
45
+ // producers with a consumer set of indices, so we need to view the producer
46
+ // transformed like consumer while we index. This will set the tv with td for
47
+ // the life of this context guard.
48
+ class TVDomainGuard {
49
+ private:
50
+ TensorView* tv_;
51
+ TensorDomain* prev_domain_;
52
+
53
+ public:
54
+ explicit TVDomainGuard(TensorView* tv, TensorDomain* td);
55
+ TVDomainGuard(const TVDomainGuard&) = delete;
56
+ NVF_API TVDomainGuard(TVDomainGuard&&);
57
+
58
+ //! An utility to access the tensordomain before the temporary
59
+ //! view. This is used to retrieve information, like swizzle
60
+ //! information that can only be reliably kept at the original domain.
61
+ const TensorDomain* prevDomain() const {
62
+ return prev_domain_;
63
+ }
64
+
65
+ NVF_API ~TVDomainGuard();
66
+ };
67
+
68
+ // Create a TVDomainGuard that temporarily view a TensorView with specified
69
+ // all-true or all-false contiguity.
70
+ NVF_API ir_utils::TVDomainGuard overrideContiguityGuard(
71
+ TensorView* tv,
72
+ bool contiguity);
73
+
74
+ // Create a TVDomainGuard that temporarily setting allocation domain as
75
+ // getLogicalDomain() from a TensorView, contiguity are filled all true or
76
+ // all false
77
+ ir_utils::TVDomainGuard allocateToLogicalDomainGuard(
78
+ TensorView* tv,
79
+ bool contiguity);
80
+
81
+ //! Return inputs of provided IterDomains that are IterDomains. A list
82
+ //! of input IterDomain can be optionally given. Otherwise,
83
+ //! IterDomains with no defining expression are returned.
84
+ std::vector<IterDomain*> iterDomainInputsOf(
85
+ const std::vector<IterDomain*>& input_ids,
86
+ const std::vector<IterDomain*>& all_inputs = {});
87
+
88
+ // Return inputs of provided IterDomains that are IterDomains, order as the
89
+ // second provided vector.
90
+ std::vector<IterDomain*> iterDomainInputsOfOrderedAs(
91
+ const std::vector<IterDomain*>& of,
92
+ const std::vector<IterDomain*>& order);
93
+
94
+ // Returns if Val is a TensorView or TensorIndex
95
+ bool isTV(const Val* const);
96
+
97
+ // Returns if Expr is a TensorView or TensorIndex Expr.
98
+ NVF_API bool isTvOp(const Expr*);
99
+
100
+ //! Returns the iterdomain that maps to the thread dimension grouped
101
+ //! to warps. Returns nullopt if the reduction is not to be lowered to
102
+ //! a warp reduction.
103
+ std::optional<std::pair<IterDomain*, IterDomain*>> getMaybeWarpReductionDim(
104
+ const Val* output,
105
+ const Val* input);
106
+
107
+ bool isScalarOp(const Expr*);
108
+
109
+ bool isIterDomainOp(const Expr*);
110
+
111
+ //! Get TensorView potentially via kir::TensorIndex. Returns nullptr if
112
+ //! cast fails.
113
+ TensorView* getTv(Val*);
114
+ const TensorView* getTv(const Val*);
115
+
116
+ //! Get only TensorView potentially via kir::TensorIndex.
117
+ std::vector<TensorView*> getTvs(const std::vector<Val*>& vals);
118
+
119
+ std::unordered_map<ParallelType, IterDomain*> getParallelDomains(
120
+ const Val* val);
121
+
122
+ //! Returns true if the expression will be lowered to
123
+ //! a ldmatrix intrinsic.
124
+ bool isLdMatrixOp(const Expr* expr);
125
+
126
+ bool isStMatrixOp(const Expr* expr);
127
+
128
+ //! Returns true if the expression will be lowered to
129
+ //! a cp.async intrinsic.
130
+ bool isCpAsyncOp(const Expr* expr);
131
+
132
+ //! Returns true if the expression will be lowered to
133
+ //! a cp.async.bulk (a.k.a. TMA) intrinsic.
134
+ bool isCpAsyncBulkLoad(const Expr* expr);
135
+ bool isCpAsyncBulkStore(const Expr* expr);
136
+ bool isCpAsyncBulk(const Expr* expr);
137
+
138
+ //! Short-cut for detecting initialization for cpAsync op.
139
+ bool isCpAsyncInit(const Expr* expr);
140
+
141
+ //! Short-cut for matching a singleton expr in a if statement,
142
+ //! which likely becomes a predicated instruction in ptx, eg.:
143
+ //! if(...) {expr;}
144
+ //! Returns the expr if it is this pattern.
145
+ //! Returns nullptr if the pattern doesn't match.
146
+ std::optional<Expr*> getMaybePredicatedSingleton(Expr* expr);
147
+
148
+ //! Short-cut for checking if the expression loads from global memory.
149
+ bool isGlobalLoad(const Expr* expr);
150
+
151
+ //! Short-cut for checking if the given expression initializes buffers
152
+ //! for global memory load.
153
+ bool isGlobalLoadInit(const Expr* expr);
154
+
155
+ //! Returns true if the given expression fills the output
156
+ //! tensor with a single scalar.
157
+ bool isTensorScalarFillOp(const Expr* expr);
158
+
159
+ //! Flattens all the scoped exprs, i.e. ForLoop and IfThenElse,
160
+ //! and returns all the exprs in all scopes in the original
161
+ //! linear textural order.
162
+ NVF_API std::vector<Expr*> flattenScopedExprs(
163
+ const std::vector<Expr*>& loop_nests);
164
+
165
+ //! Returns all swizzle ops between the set of iterdomains
166
+ //! in `from` and `to`.
167
+ std::vector<Expr*> getAllSwizzlesBetween(
168
+ std::vector<IterDomain*> from,
169
+ std::vector<IterDomain*> to);
170
+
171
+ // Replace value pass on Kernel IR.
172
+ // Replace each use of any Val* that apears in the given `replacement_map`
173
+ // Keeps the predicate carried by each expr
174
+ //
175
+ // Warning: Blindly replaces all use based on pointer
176
+ // Warning: May invalidate indexing if replacing uses of allocated values
177
+ std::vector<Expr*> replaceInputsInExpr(
178
+ const std::vector<Expr*>& exprs,
179
+ const std::unordered_map<Val*, Val*>& replacement_map);
180
+
181
+ } // namespace ir_utils
182
+
183
+ namespace lower_utils {
184
+
185
+ bool hasBlockSync(const Expr* expr, const ThreadPredicateMap& pred_map);
186
+
187
+ // Allocate global buffer for a grid communication calls, i.e. grid reduce, grid
188
+ // welford reduce, grid broadcast.
189
+ kir::Allocate* allocGlobalBufferForGridComm(
190
+ Val* buffer_size,
191
+ DataType dtype,
192
+ bool zero_init,
193
+ bool resets_to_zero = false);
194
+
195
+ struct BasicAllocInfo {
196
+ // The for loop that the initialization of this allocation must be
197
+ // placed in, nullptr if not within a loop
198
+ ForLoop* init_for_loop = nullptr;
199
+
200
+ // Keep track of the actual allocation loop. This can be different
201
+ // from init_for_loop only with unswitched shared memory allocations,
202
+ // which are moved outer loops to avoid duplicated allocations. This means
203
+ // that the alloc position may be outside what's expected. Most applications
204
+ // outside lower_allocation is likely looking for init_for_loop which is
205
+ // more directly related to how large an allocation is and how it's used.
206
+ // (see issue #1133).
207
+ ForLoop* alloc_for_loop = nullptr;
208
+
209
+ // The allocation position relative to buffer IDs, it could be outside the
210
+ // compute at position if it's shared memory with a compute at inside an
211
+ // unswitch
212
+ int64_t alloc_pos = 0;
213
+ };
214
+
215
+ // Fill the above allocation struct based on provided information. id_map is
216
+ // used if we're looking at a producer tensor but loops on a consumer tensor.
217
+ BasicAllocInfo getAllocInformation(
218
+ const TensorView* tv,
219
+ const std::vector<ForLoop*>& loops,
220
+ const std::unordered_map<IterDomain*, IterDomain*>& id_map = {},
221
+ bool use_id_map = false);
222
+
223
+ //! Returns true if the expression has a variant that takes a predicate
224
+ //! as an inline argument.
225
+ bool supportInlinePredicate(Expr* expr);
226
+
227
+ //! Test if an expression is a scalar expression.
228
+ bool isScalarExpr(Expr* expr);
229
+
230
+ //! Test if provided IterDomain instance has an extent that matches maximum
231
+ //! extent stored in parallel dimension map for parallel type of provided
232
+ //! IterDomain object. `in_compute_warp` specifies we are checking an
233
+ //! expression in the compute warp, if so, we need to get the parallel type
234
+ //! extent of the compute warp, instead of the global parallel type extent.
235
+ bool isExtentEqualToMaxParallelTypeExtent(
236
+ const IterDomain* id,
237
+ bool in_compute_warp = false);
238
+
239
+ //! Get the uint32_t index of a scalar TensorView. This is usually used for
240
+ //! indexing special items in shared memory, like mbarrier.
241
+ NVF_API Val* u32IndexScalarSmemTv(TensorView* tv);
242
+
243
+ //! Get the uint32_t index of a TensorIndex. This is usually used for
244
+ //! initializing a pipeline of mbarriers.
245
+ NVF_API Val* u32IndexScalarSmemTv(kir::TensorIndex* index);
246
+
247
+ //! Get the size of a global sync buffer needed to perform a grid reduction for
248
+ //! each axis in bitmap.
249
+ Val* getGridSyncBufferSize(const ParallelTypeBitmap& bitmap);
250
+
251
+ //! Returns the fusion outputs that require codegen.
252
+ //! The fusion outputs to be computed through expression evaluator are
253
+ //! filtered out.
254
+ std::vector<Val*> getFusionOutputsRequiringCodegen(Fusion* fusion);
255
+
256
+ //! Get the number of threads in a tensor view. Note that this function
257
+ //! only cares about the given tensor view itself, not the entire fusion.
258
+ //! That is, for example, if the tensor view is [TIDx{3}], but the entire
259
+ //! fusion has blockDim.x = 128, this function will return 3 instead of 128.
260
+ Val* getNumThreadsInTensorView(TensorView* tv);
261
+
262
+ //! Get the unit dimensions of A and B for the given MmaOp.
263
+ std::array<UnitDim, 2> getMmaLayout(const MmaOp* expr);
264
+
265
+ // Returns true if expr is an expression that initializes a reduction
266
+ // buffer.
267
+ bool isReductionInitExpr(const Expr* expr);
268
+
269
+ // Return true if it is sufficient to predicate the end of the loop
270
+ // iteration. An aligned vectorized loop is one example where it is
271
+ // guaranteed to be valid by the validation checks. More generally,
272
+ // the divisible split set is used to find such loops. The divisible
273
+ // split set contains splits used in view transformations as well as
274
+ // those whose output domains are vectorized. View transformations
275
+ // guarantee that any split involved is divisible, whereas
276
+ // vectorization only guarantees that the overall root extent is
277
+ // divisible by the split factor. Thus, if a loop IterDomain is
278
+ // an output of a split included in the divisible view splits, we can
279
+ // just predicate the end of the loop iteration. If a loop IterDomain
280
+ // is an output of a divisible split due to vectorization, it is only
281
+ // valid when the loop IterDomain is mapped with the vectorized inner
282
+ // output IterDomain. If it is mapped with an outer IterDomain, since
283
+ // the split input IterDomain may be an output IterDomain of a
284
+ // non-divisible split, we still need to predicate each loop iteration
285
+ // value.
286
+ bool predicateAtEnd(ForLoop* loop);
287
+
288
+ // Given linear_g and domain, prove that linear_g is linear with respect to
289
+ // domain and return the stride. linear_g is linear with respect to domain if
290
+ // there exists a strided view of domain such that linear_g is one of the
291
+ // axes of that strided view. Usually, linear_g is a group in the loop domain of
292
+ // some tensor, and domain is the allocation domain of some tensor. In this
293
+ // case, if the index of linear_g is i, then this function proves that the index
294
+ // is is a linear function of i, with the linear coefficient being the return
295
+ // value. Note that this function does the proof and stride calculation in a
296
+ // best-effort manner. It can not cover all linear cases. If the return value is
297
+ // nullptr, it can be either because linear_g is not linear with respect to
298
+ // domain, or because linear_g is actually linear with respect to domain, but it
299
+ // is too hard for this function to find a proof.
300
+ Val* proveLinearAndGetStride(
301
+ const ValGraph& id_graph,
302
+ const ValGroup& linear_g,
303
+ const ValGroups& domain);
304
+
305
+ // Get the concrete loop domain of a given loop ID
306
+ IterDomain* getConcreteLoopID(IterDomain* loop_id);
307
+
308
+ // Go through all expressions and compute a local ordering of loops. operator<
309
+ // is implemented based on the concrete_id_dependencies analysis done. If
310
+ // there's no dependency between two IDs then order doesn't mater, otherwise we
311
+ // can tell which is inner most by checking if there's any dependency
312
+ // relationships.
313
+ //
314
+ // Dependency relationships in concrete_id_dependencies has a "global" view in
315
+ // the fusion, so it can resolve ordering by only looking at id's and the
316
+ // dependency map.
317
+ //
318
+ // For example two expressions may have domains: [I0], [I1] Yet we
319
+ // won't know the ordering unless we see a domain with: [I0, I1]. This happened
320
+ // in Indexing9 (also see Indexing17) test when merging T5 with
321
+ // the group containing T10 (cache of T5, which is post broadcasted output) and
322
+ // T6(pre broadcasted output).
323
+ // T5 had the domain [0, 1, 2, 3, 4] produce at 3
324
+ // T6 had the domain [0, 3, 4] compute at 3
325
+ // Merging [0, 1, 2] and [0, 3, 4] resulted in the domain [0, 3, 4, 1, 2]
326
+ //
327
+ // If ID's are not in filter, we don't care about their ordering and ignore
328
+ // them. This is because we're only focused on loops we will have to merge
329
+ // across groups. If the domain is not in a produce at position in the producer
330
+ // edges, or a compute at position in the consumer edges, the expressions we
331
+ // look at may not have a unique ordering.
332
+ //
333
+ // The optional kernel_scope_domain parameter is only used in
334
+ // expression sorting. It isn't in the CA map, but since we only have
335
+ // a single unique IterDomain, the conrete ID is just itself.
336
+ struct IterDomainDependencySorter {
337
+ IterDomainDependencySorter(
338
+ const std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>&
339
+ concrete_id_dependencies,
340
+ IterDomain* kernel_scope_domain = nullptr)
341
+ : concrete_id_dependencies_(concrete_id_dependencies),
342
+ kernel_scope_domain_(kernel_scope_domain) {}
343
+
344
+ // Return true if id0 should be before id1
345
+ // Orders such that if x maps to {y}, x comes before y in final ordering.
346
+ inline bool operator()(IterDomain* id0, IterDomain* id1) {
347
+ auto concrete_id_0 =
348
+ id0 != kernel_scope_domain_ ? getConcreteLoopID(id0) : id0;
349
+ auto concrete_id_1 =
350
+ id1 != kernel_scope_domain_ ? getConcreteLoopID(id1) : id1;
351
+ if (concrete_id_dependencies_.find(concrete_id_0) !=
352
+ concrete_id_dependencies_.end()) {
353
+ const auto& dependencies_0 = concrete_id_dependencies_.at(concrete_id_0);
354
+ // if id0 depends on id1 it means id1 is inside id0, so id0 < id1
355
+ if (dependencies_0.count(concrete_id_1)) {
356
+ return true;
357
+ }
358
+ }
359
+
360
+ return false;
361
+ }
362
+
363
+ const std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>&
364
+ concrete_id_dependencies_;
365
+ const IterDomain* kernel_scope_domain_ = nullptr;
366
+ };
367
+
368
+ // Check if all the inputs of the given MmaOp is guarded by mbarrier
369
+ bool allMmaInputsGuardedByMBarrier(const MmaOp* mma);
370
+
371
+ // Create a list of expressions that will be used to wait for async operations.
372
+ // For example, if op_type is AsyncOpType::WgMma, then the returned expressions
373
+ // will be:
374
+ // wgmma.commit_group.sync.aligned
375
+ // wgmma.wait_group.sync.aligned
376
+ std::vector<Expr*> getSyncExprs(
377
+ AsyncOpType async_type,
378
+ int64_t keep_stages = 0);
379
+
380
+ } // namespace lower_utils
381
+
382
+ } // namespace nvfuser
@@ -0,0 +1,74 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <exceptions.h>
11
+
12
+ #include <ir/all_nodes.h>
13
+
14
+ namespace nvfuser {
15
+
16
+ class ContigIDs;
17
+
18
+ void validateIr(Fusion* fusion);
19
+
20
+ //! Validate vectorization and collect information on vectorization
21
+ //! used in code generation as well as runtime validation.
22
+ void validateAndCollectVectorizeInfo(Fusion* fusion);
23
+
24
+ //! Find the contig allocation domains that a vectorized loop domain
25
+ //! of a consumer TV depends on. Required for runtime validation.
26
+ void fillConsumerVectorizedContigAllocationDomains(
27
+ const TensorView* consumer_tv,
28
+ const ContigIDs& contig_finder);
29
+
30
+ //! Find the contig allocation domains that a vectorized loop domain
31
+ //! of a producer TV depends on. Required for runtime validation.
32
+ //! Producer must be transformed as consumer.
33
+ void fillProducerVectorizedContigAllocationDomains(
34
+ const TensorView* producer_tv,
35
+ const TensorView* consumer_tv,
36
+ const ContigIDs& contig_finder);
37
+
38
+ //! Validate data format and GPU arch compatibility of scheduled
39
+ //! mma operators on the fusion.
40
+ void validateMma(Fusion* fusion);
41
+
42
+ //! Validates swizzle ops to ensure consistent indexing:
43
+ //! - Currently only allow swizzle ops on the right of CA axis,
44
+ //! - (Except ZShape) All swizzle ops have to be on const sized ids
45
+ //! - Xor and Transpose swizzle have to have equal dimensions on the
46
+ //! participating ids.
47
+ void validateSwizzle(Fusion* fusion);
48
+
49
+ //! Validate use of ParallelType::Group. It is currently only allowed
50
+ //! in ReductionOp and not in WelfordOp. Group has similar constraints
51
+ //! as Vectorize, e.g., it can only be used with IterDomains with
52
+ //! static extents. Differences are, e.g, it has no constraints on
53
+ //! alignments and predicates. Each individual reduction has its own
54
+ //! predicate, so it is possile for only part of grouped reductions to
55
+ //! be executed.
56
+ //!
57
+ //! Also, grouping is only enabled for persistent grid reductions, in
58
+ //! other words, grid allreduces. Note that no grid reduction without
59
+ //! broadcast is persistent anymore.
60
+ //!
61
+ //! Validated ReductionOp with ParallelType::Group is converted to
62
+ //! GroupedReductionOp.
63
+ void validateAndConvertIterDomainGrouping(Fusion* fusion);
64
+
65
+ //! Validate the number of grouped reductions is within the limit
66
+ void validateGroupedReductions(Fusion* fusion);
67
+
68
+ //! Validate all of the lookup TVs are ensured to be fusion inputs
69
+ void validateLookupTV(Fusion* fusion);
70
+
71
+ //! Check that there are no reductions over unexpanded broadcasts
72
+ void validateReductions(Fusion* fusion);
73
+
74
+ } // namespace nvfuser