nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (242) hide show
  1. nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
  2. nvfuser/__init__.py +618 -0
  3. nvfuser/__init__.pyi +4 -0
  4. nvfuser/contrib/__init__.py +9 -0
  5. nvfuser/contrib/nn/__init__.py +13 -0
  6. nvfuser/contrib/nn/normalization.py +725 -0
  7. nvfuser/include/nvfuser/alias_analysis.h +116 -0
  8. nvfuser/include/nvfuser/bfs.h +929 -0
  9. nvfuser/include/nvfuser/codegen.h +26 -0
  10. nvfuser/include/nvfuser/compute_at.h +28 -0
  11. nvfuser/include/nvfuser/compute_at_map.h +394 -0
  12. nvfuser/include/nvfuser/contiguity.h +351 -0
  13. nvfuser/include/nvfuser/cuda_utils.h +50 -0
  14. nvfuser/include/nvfuser/debug.h +50 -0
  15. nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
  16. nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
  17. nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
  18. nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
  19. nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
  20. nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
  21. nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
  22. nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
  23. nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
  24. nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
  25. nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
  26. nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
  27. nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
  28. nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
  29. nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
  30. nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
  31. nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
  32. nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
  33. nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
  34. nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
  35. nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
  36. nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
  37. nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
  38. nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
  39. nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
  40. nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
  41. nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
  42. nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
  43. nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
  44. nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
  45. nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
  46. nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
  47. nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
  48. nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
  49. nvfuser/include/nvfuser/device_lower/utils.h +382 -0
  50. nvfuser/include/nvfuser/device_lower/validation.h +74 -0
  51. nvfuser/include/nvfuser/disjoint_set.h +556 -0
  52. nvfuser/include/nvfuser/dispatch.h +334 -0
  53. nvfuser/include/nvfuser/driver_api.h +49 -0
  54. nvfuser/include/nvfuser/dynamic_transform.h +316 -0
  55. nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
  56. nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
  57. nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
  58. nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
  59. nvfuser/include/nvfuser/evaluator_common.h +295 -0
  60. nvfuser/include/nvfuser/exceptions.h +283 -0
  61. nvfuser/include/nvfuser/expr_evaluator.h +125 -0
  62. nvfuser/include/nvfuser/expr_simplifier.h +218 -0
  63. nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
  64. nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
  65. nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
  66. nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
  67. nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
  68. nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
  69. nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
  70. nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
  71. nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
  72. nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
  73. nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
  74. nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
  75. nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
  76. nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
  77. nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
  78. nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
  79. nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
  80. nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
  81. nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
  82. nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
  83. nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
  84. nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
  85. nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
  86. nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
  87. nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
  88. nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
  89. nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
  90. nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
  91. nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
  92. nvfuser/include/nvfuser/fusion.h +511 -0
  93. nvfuser/include/nvfuser/fusion_guard.h +37 -0
  94. nvfuser/include/nvfuser/fusion_profiler.h +311 -0
  95. nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
  96. nvfuser/include/nvfuser/global_allocator.h +27 -0
  97. nvfuser/include/nvfuser/grouped_reduction.h +47 -0
  98. nvfuser/include/nvfuser/host_ir/container.h +60 -0
  99. nvfuser/include/nvfuser/host_ir/executor.h +152 -0
  100. nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
  101. nvfuser/include/nvfuser/host_ir/lower.h +35 -0
  102. nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
  103. nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
  104. nvfuser/include/nvfuser/id_model/id_model.h +359 -0
  105. nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
  106. nvfuser/include/nvfuser/id_model/indexing.h +208 -0
  107. nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
  108. nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
  109. nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
  110. nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
  111. nvfuser/include/nvfuser/id_model/schedule.h +54 -0
  112. nvfuser/include/nvfuser/id_model/to_string.h +87 -0
  113. nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
  114. nvfuser/include/nvfuser/id_model/utils.h +176 -0
  115. nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
  116. nvfuser/include/nvfuser/index_compute.h +651 -0
  117. nvfuser/include/nvfuser/instrumentation.h +107 -0
  118. nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
  119. nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
  120. nvfuser/include/nvfuser/ir/builder.h +215 -0
  121. nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
  122. nvfuser/include/nvfuser/ir/cloner.h +185 -0
  123. nvfuser/include/nvfuser/ir/container.h +226 -0
  124. nvfuser/include/nvfuser/ir/graphviz.h +119 -0
  125. nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
  126. nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
  127. nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
  128. nvfuser/include/nvfuser/ir/iostream.h +98 -0
  129. nvfuser/include/nvfuser/ir/printer.h +57 -0
  130. nvfuser/include/nvfuser/ir/utils.h +801 -0
  131. nvfuser/include/nvfuser/iter_visitor.h +661 -0
  132. nvfuser/include/nvfuser/kernel.h +299 -0
  133. nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
  134. nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
  135. nvfuser/include/nvfuser/kernel_ir.h +1457 -0
  136. nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
  137. nvfuser/include/nvfuser/linked_hash_map.h +97 -0
  138. nvfuser/include/nvfuser/logical_domain_map.h +577 -0
  139. nvfuser/include/nvfuser/macros.h +23 -0
  140. nvfuser/include/nvfuser/mma_type.h +257 -0
  141. nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
  142. nvfuser/include/nvfuser/multidevice/communication.h +232 -0
  143. nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
  144. nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
  145. nvfuser/include/nvfuser/multidevice/executor.h +107 -0
  146. nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
  147. nvfuser/include/nvfuser/multidevice/utils.h +187 -0
  148. nvfuser/include/nvfuser/non_divisible_split.h +86 -0
  149. nvfuser/include/nvfuser/opaque_type.h +129 -0
  150. nvfuser/include/nvfuser/ops/alias.h +192 -0
  151. nvfuser/include/nvfuser/ops/all_ops.h +13 -0
  152. nvfuser/include/nvfuser/ops/arith.h +712 -0
  153. nvfuser/include/nvfuser/ops/composite.h +130 -0
  154. nvfuser/include/nvfuser/ops/indexing.h +55 -0
  155. nvfuser/include/nvfuser/ops/normalization.h +263 -0
  156. nvfuser/include/nvfuser/ops/utils.h +127 -0
  157. nvfuser/include/nvfuser/options.h +313 -0
  158. nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
  159. nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
  160. nvfuser/include/nvfuser/polymorphic_value.h +432 -0
  161. nvfuser/include/nvfuser/predicate_compute.h +213 -0
  162. nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
  163. nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
  164. nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
  165. nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
  166. nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
  167. nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
  168. nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
  169. nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
  170. nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
  171. nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
  172. nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
  173. nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
  174. nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
  175. nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
  176. nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
  177. nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
  178. nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
  179. nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
  180. nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
  181. nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
  182. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
  183. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
  184. nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
  185. nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
  186. nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
  187. nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
  188. nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
  189. nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
  190. nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
  191. nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
  192. nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
  193. nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
  194. nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
  195. nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
  196. nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
  197. nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
  198. nvfuser/include/nvfuser/scheduler/registry.h +97 -0
  199. nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
  200. nvfuser/include/nvfuser/scheduler/resize.h +41 -0
  201. nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
  202. nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
  203. nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
  204. nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
  205. nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
  206. nvfuser/include/nvfuser/scheduler/utils.h +771 -0
  207. nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
  208. nvfuser/include/nvfuser/serde/factory.h +55 -0
  209. nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
  210. nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
  211. nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
  212. nvfuser/include/nvfuser/serde/utils.h +34 -0
  213. nvfuser/include/nvfuser/struct.inl +127 -0
  214. nvfuser/include/nvfuser/swizzle.h +54 -0
  215. nvfuser/include/nvfuser/sys_utils.h +40 -0
  216. nvfuser/include/nvfuser/tensor_metadata.h +118 -0
  217. nvfuser/include/nvfuser/tma.h +124 -0
  218. nvfuser/include/nvfuser/transform_iter.h +522 -0
  219. nvfuser/include/nvfuser/transform_replay.h +297 -0
  220. nvfuser/include/nvfuser/transform_rfactor.h +33 -0
  221. nvfuser/include/nvfuser/transform_view.h +136 -0
  222. nvfuser/include/nvfuser/type.h +1125 -0
  223. nvfuser/include/nvfuser/type_promotion.h +61 -0
  224. nvfuser/include/nvfuser/utils.h +619 -0
  225. nvfuser/include/nvfuser/val_graph.h +446 -0
  226. nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
  227. nvfuser/include/nvfuser/validator_utils.h +92 -0
  228. nvfuser/include/nvfuser/vectorization_info.h +31 -0
  229. nvfuser/include/nvfuser/visibility.h +21 -0
  230. nvfuser/lib/libnvfuser_codegen.so +0 -0
  231. nvfuser/nvfuser_version.py +69 -0
  232. nvfuser/pytorch_utils.py +184 -0
  233. nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
  234. nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
  235. nvfuser/utils.py +18 -0
  236. nvfuser/version.py +1 -0
  237. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
  238. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
  239. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
  240. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
  241. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
  242. nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,26 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <c10/macros/Export.h>
11
+ #include <exceptions.h>
12
+
13
+ #include <ir/all_nodes.h>
14
+ #include <kernel_ir.h>
15
+
16
+ #include <vector>
17
+
18
+ namespace nvfuser {
19
+
20
+ //! Detect ReductionOps that have serialGridReductionRequested() == true. When
21
+ //! found, confirm that no conflicting operations exist, then place sync nodes
22
+ //! before and after outer-most non-parallelized loop.
23
+ std::vector<Expr*> insertGridSerializationSyncs(
24
+ const std::vector<Expr*>& exprs);
25
+
26
+ } // namespace nvfuser
@@ -0,0 +1,200 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <exceptions.h>
11
+
12
+ #include <instrumentation.h>
13
+ #include <kernel_ir.h>
14
+ #include <kernel_ir_dispatch.h>
15
+ #include <logical_domain_map.h>
16
+
17
+ #include <unordered_set>
18
+ #include <vector>
19
+
20
+ namespace nvfuser {
21
+
22
+ // TODO: Replace with mutator as IndexLowering is replacing expr's with
23
+ // versions that are doing indexing
24
+ class IndexLowering : private OptOutConstDispatch {
25
+ public:
26
+ static std::vector<Expr*> getIndexedExprs(std::vector<Expr*> incoming_exprs);
27
+
28
+ private:
29
+ IndexLowering() = default;
30
+
31
+ void pushBack(Expr*);
32
+
33
+ // Return the most recently inserted
34
+ // expression in the current active
35
+ // scope or global scope.
36
+ Expr* back() const;
37
+
38
+ // Insert an expression before the current top-level expression.
39
+ void insertAtTopLevel(Expr* expr);
40
+
41
+ void handle(const FullOp*) final;
42
+ void handle(const IotaOp*) final;
43
+ void handle(const EyeOp*) final;
44
+ void handle(const ViewAsScalar*) final;
45
+ void handle(const UnaryOp*) final;
46
+ void handle(const BinaryOp*) final;
47
+ void handle(const TernaryOp*) final;
48
+ void handle(const ArrayConstruct*) final;
49
+ void handle(const StructConstruct*) final;
50
+ void handle(const GetAttr*) final;
51
+ void handle(const GetItem*) final;
52
+ void handle(const GetMetaData*) final;
53
+ void handle(const TensorConstruct*) final;
54
+ void handle(const SelectOp*) final;
55
+ void handle(const IndexSelectOp*) final;
56
+ void handle(const TorchGatherOp*) final;
57
+ void handle(const ScatterOp*) final;
58
+ void handle(const RNGOp*) final;
59
+ void handle(const ReductionOp*) final;
60
+ void handle(const GroupedReductionOp*) final;
61
+ void handle(const WelfordOp*) final;
62
+ void handle(const GroupedWelfordOp*) final;
63
+ void handle(const LoadStoreOp*) final;
64
+ void handle(const MmaOp*) final;
65
+ void handle(const BroadcastOp*) final;
66
+ void handle(const PadOp*) final;
67
+ void handle(const SliceOp*) final;
68
+ void handle(const CatOp*) final;
69
+
70
+ void handle(const kir::Asm*) final;
71
+ void handle(const ForLoop*) final;
72
+ void handle(const kir::IfThenElse*) final;
73
+ void handle(const kir::Allocate*) final;
74
+ void handle(const kir::BlockSync*) final;
75
+ void handle(const kir::GridSync*) final;
76
+ void handle(const kir::FenceAsyncProxy*) final;
77
+ void handle(const kir::WgMmaFence*) final;
78
+ void handle(const kir::SetMaxNReg*) final;
79
+ void handle(const kir::Return*) final;
80
+ void handle(const kir::MBarrierInit*) final;
81
+ void handle(const kir::MBarrierInvalidate*) final;
82
+ void handle(const kir::MBarrierArrive*) final;
83
+ void handle(const kir::MBarrierArriveExpectTx*) final;
84
+ void handle(const kir::MBarrierWait*) final;
85
+ void handle(const kir::MBarrierWaitParity*) final;
86
+ void handle(const kir::AsyncWait*) final;
87
+ void handle(const kir::AsyncCommit*) final;
88
+ void handle(const kir::BlockSerializeWait*) final;
89
+ void handle(const kir::BlockSerializeRelease*) final;
90
+
91
+ void generate(const std::vector<Expr*>& exprs);
92
+
93
+ // Get the loop in which the currently visiting expr is a rotated expr.
94
+ const std::unordered_set<ForLoop*>& getRotatedLoop() const {
95
+ return rotated_loop_;
96
+ }
97
+
98
+ // lower index for producer. The `override_index` is a mapping `id->index`,
99
+ // where `id` must be an IterDomain in the rFactor domain of the producer.
100
+ // This is can used to manually set the index for the given rFactor ID.
101
+ // Currently, this `override_index` is only used by indexing ops like
102
+ // select/index_select.
103
+ // The argument `generate_pointer` specifies whether to generate pointer for
104
+ // the tensor. If global tensor, then generate T1.data. If shared memory
105
+ // tensor, then use `cvta` ptx to convert shared memory address to unsigned
106
+ // int for indexing. Search `toSmem` in the codebase for additional
107
+ // information. This argument is effective only if the indexed tensor is a
108
+ // shared memory or global tensor. On other memory type, this argument will
109
+ // cause an error.
110
+ Val* lowerSrcIndex(
111
+ Val* val,
112
+ Val* dst,
113
+ const std::unordered_map<IterDomain*, Val*>& override_index = {},
114
+ bool generate_pointer = false,
115
+ DataType as_type = DataType::Null) const;
116
+
117
+ Val* lowerDstIndex(
118
+ Val* dst,
119
+ const std::unordered_map<int, Val*>& override_index = {},
120
+ bool generate_pointer = false,
121
+ DataType as_type = DataType::Null) const;
122
+
123
+ void handleCpAsyncBulkLoad(const LoadStoreOp* ldst);
124
+ void handleCpAsyncBulkStore(const LoadStoreOp* ldst);
125
+
126
+ void handleBlockReduction(const ReductionOp* rop, Val* out, Val* in);
127
+ void handleGridReduction(const ReductionOp* rop, Val* out, Val* in);
128
+ //! Called by handleGridReduction, this returns true if rop is lowered as a
129
+ //! serial grid reduction.
130
+ void handleSerialGridReduction(const ReductionOp* rop, Val* out, Val* in);
131
+
132
+ void handleBlockReduction(
133
+ const GroupedReductionOp* rop,
134
+ const std::vector<Val*>& outputs,
135
+ const std::vector<Val*>& inputs);
136
+ void handleGridReduction(
137
+ const GroupedReductionOp* rop,
138
+ const std::vector<Val*>& outputs,
139
+ const std::vector<Val*>& inputs);
140
+
141
+ void handleGridWelford(WelfordOp* new_wop);
142
+
143
+ void handleGroupedBlockWelford(
144
+ const GroupedWelfordOp* wop,
145
+ const std::vector<WelfordTriplet>& output_vals,
146
+ const std::vector<WelfordTriplet>& input_vals,
147
+ const std::vector<WelfordTriplet>& init_vals);
148
+ void handleGroupedGridWelford(
149
+ const GroupedWelfordOp* wop,
150
+ const std::vector<WelfordTriplet>& output_vals,
151
+ const std::vector<WelfordTriplet>& input_vals,
152
+ const std::vector<WelfordTriplet>& init_vals);
153
+
154
+ // Allocate a unique buffer for grid reductions and broadcast. A
155
+ // buffer is uniquely allocated for each output tensor of an
156
+ // expression.
157
+ kir::Allocate* allocateUniqueBuffer(
158
+ Val* buffer_size,
159
+ DataType dtype,
160
+ bool zero_init,
161
+ TensorView* out_tv,
162
+ std::unordered_map<TensorView*, kir::Allocate*>& alloc_map);
163
+
164
+ std::vector<kir::Allocate*> allocateWelfordWorkBuffer(
165
+ const std::vector<WelfordTriplet>& triplets,
166
+ WelfordTriplet::ValName name,
167
+ Val* buffer_size);
168
+
169
+ // Allocate a fused reduction object uniquely for a given
170
+ // TensorView. Parameter expr is the expression corresponding to the
171
+ // fused reduction.
172
+ void allocateUniqueFusedReduction(Expr* expr, TensorView* out_tv);
173
+
174
+ private:
175
+ std::vector<Expr*> lowered_exprs_;
176
+
177
+ // This is a slight work around as scope has a couple definitions, we have the
178
+ // Scope that's in ForLoop/IfThenElse which is really just a wrapper around
179
+ // std::vector<Expr*> and then we have the actual ForLoop/IfThenElse. We want
180
+ // to be able to carry both around because when we push back to a scope it
181
+ // could be either the body or else body of the IfThenElse. However, we want
182
+ // to understand the nesting of IfThenElse/ForLoop nodes.
183
+ Scope* active_scope_ = nullptr;
184
+
185
+ // Track for loops to send to indexing. Similar to what's done in
186
+ // kir::IrVisitor
187
+ std::vector<ForLoop*> for_loops_;
188
+
189
+ // Keep track of the loop in which the currently visiting expr is a rotated.
190
+ std::unordered_set<ForLoop*> rotated_loop_;
191
+
192
+ // Maps to keep track of allocated buffers and objects that must be
193
+ // allocated only once
194
+ std::unordered_map<TensorView*, kir::Allocate*> sync_buffer_map_;
195
+ std::unordered_map<TensorView*, kir::Allocate*> work_buffer_map_;
196
+ std::unordered_map<TensorView*, kir::AllocateFusedReduction*>
197
+ fused_reduction_map_;
198
+ };
199
+
200
+ } // namespace nvfuser
@@ -0,0 +1,16 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <ir/all_nodes.h>
11
+
12
+ namespace nvfuser {
13
+
14
+ std::vector<Expr*> lowerToInlinePtx(const std::vector<Expr*>& exprs);
15
+
16
+ } // namespace nvfuser
@@ -0,0 +1,39 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <exceptions.h>
11
+
12
+ #include <ir/all_nodes.h>
13
+ #include <kernel_ir.h>
14
+
15
+ #include <vector>
16
+
17
+ namespace nvfuser {
18
+
19
+ //! Insert sync at end of for-loops to prevent write-after-read race condition.
20
+ //!
21
+ //! WAR race condition occurs when the next iteration of the loop overwrites
22
+ //! shared memory value before a previous operation has finished reading it.
23
+ std::vector<Expr*> insertWarThreadSynchronization(
24
+ const std::vector<Expr*>& exprs);
25
+
26
+ //! Insert syncs between writing to shared memory and then reading it.
27
+ //! RAW pass is run before indexing, unrolling (loop duplication), memory
28
+ //! aliasing, and index (grid/block bcast/reduction)
29
+ std::vector<Expr*> insertRawThreadSynchronization(
30
+ const std::vector<Expr*>& exprs);
31
+
32
+ //! Insert wait expressions such as wgmma.wait_group.sync.aligned at end of
33
+ //! for-loops for async expressions to prevent write-after-read race condition.
34
+ //!
35
+ //! WAR race condition occurs when the next iteration of the loop overwrites
36
+ //! the buffer before a previous async expression has finished reading it.
37
+ std::vector<Expr*> insertWarAsyncWait(const std::vector<Expr*>& exprs);
38
+
39
+ } // namespace nvfuser
@@ -0,0 +1,24 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <ir/all_nodes.h>
11
+
12
+ namespace nvfuser {
13
+
14
+ //! Set up KernelPerformanceProfile of GpuLower when enabled, which
15
+ //! keeps track of expressions to profile. A new TensorView is added
16
+ //! for storing profiling results. The expression list is prepended
17
+ //! with an kir::Allocate node to allocate the TensorView profile
18
+ //! buffer. Note that any expression added after this pass will not be
19
+ //! profiled, so this pass should be called after all expressions are
20
+ //! lowered. KernelPerformanceProfile is copied to Kernel after
21
+ //! lowering.
22
+ std::vector<Expr*> instrumentKernel(const std::vector<Expr*>& exprs);
23
+
24
+ } // namespace nvfuser
@@ -0,0 +1,150 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <exceptions.h>
11
+ #include <ir/all_nodes.h>
12
+
13
+ namespace nvfuser {
14
+
15
+ // Note [Loop Rotation]
16
+ // Loop Rotation is an optimization pass to improve instruction scheduling. For
17
+ // a given loop, for example:
18
+ // for (int i = 0; i < n; i++) {
19
+ // line1(i);
20
+ // line2(i);
21
+ // line3(i);
22
+ // line4(i);
23
+ // }
24
+ // If we rotate one line up, then we get
25
+ // if (0 < n) {
26
+ // line1(0);
27
+ // }
28
+ // for (int i = 0; i < n; i++) {
29
+ // line2(i);
30
+ // line3(i);
31
+ // line4(i);
32
+ // if (i + 1 < n) {
33
+ // line1(i + 1);
34
+ // }
35
+ // }
36
+ // Similarly, if we rotate two lines up, then we get
37
+ // if (0 < n) {
38
+ // line1(0);
39
+ // line2(0);
40
+ // }
41
+ // for (int i = 0; i < n; i++) {
42
+ // line3(i);
43
+ // line4(i);
44
+ // if (i + 1 < n) {
45
+ // line1(i + 1);
46
+ // line2(i + 1);
47
+ // }
48
+ // }
49
+ // In order to take advantage of this pass, the scheduler needs to specify which
50
+ // loop to rotate and the consumers whose allocation and computation will be
51
+ // rotated, and pass this information as compilation parameter. For example, if
52
+ // I have a fusion that will create the following loop structure:
53
+ // for (int i = 0; i < id1.extent(); i++) {
54
+ // float T1[5];
55
+ // for (int j = 0; j < 5; j++) {
56
+ // if (i < T0.size[0]) {
57
+ // T1[j] = sin(T0[i, j]);
58
+ // }
59
+ // }
60
+ // float T2[5];
61
+ // for (int j = 0; j < 5; j++) {
62
+ // T2[j] = cos(T1[j]);
63
+ // }
64
+ // float T3[5];
65
+ // for (int j = 0; j < 5; j++) {
66
+ // T3[j] = exp(T2[j]);
67
+ // }
68
+ // for (int j = 0; j < 5; j++) {
69
+ // if (i < T4.size[0]) {
70
+ // T4[i, j] = log(T3[j]);
71
+ // }
72
+ // }
73
+ // }
74
+ // Then the scheduler could make a compilation parameter {id1, {T1, T2}} to the
75
+ // fusion, and this pass will transform the code as
76
+ // float T1[5];
77
+ // float T2[5];
78
+ // if (0 < id1.extent()) {
79
+ // for (int j = 0; j < 5; j++) {
80
+ // if (0 < T0.size[0]) {
81
+ // T1[j] = sin(T0[0, j]);
82
+ // }
83
+ // }
84
+ // for (int j = 0; j < 5; j++) {
85
+ // T2[j] = cos(T1[j]);
86
+ // }
87
+ // }
88
+ // for (int i = 0; i < id1.extent(); i++) {
89
+ // float T3[5];
90
+ // for (int j = 0; j < 5; j++) {
91
+ // T3[j] = exp(T2[j]);
92
+ // }
93
+ // for (int j = 0; j < 5; j++) {
94
+ // if (i < T4.size[0]) {
95
+ // T4[i, j] = log(T3[j]);
96
+ // }
97
+ // }
98
+ // if (i + 1 < id1.extent()) {
99
+ // for (int j = 0; j < 5; j++) {
100
+ // if (i + 1 < T0.size[0]) {
101
+ // T1[j] = sin(T0[i + 1, j]);
102
+ // }
103
+ // }
104
+ // for (int j = 0; j < 5; j++) {
105
+ // T2[j] = cos(T1[j]);
106
+ // }
107
+ // }
108
+ // }
109
+ // Currently, because all our existing predicates should already cover
110
+ // out-of-bound access, so we are omitting the predicates to get a
111
+ // better-looking code:
112
+ // float T1[5];
113
+ // float T2[5];
114
+ // for (int j = 0; j < 5; j++) {
115
+ // if (0 < T0.size[0]) {
116
+ // T1[j] = sin(T0[0, j]);
117
+ // }
118
+ // }
119
+ // for (int j = 0; j < 5; j++) {
120
+ // T2[j] = cos(T1[j]);
121
+ // }
122
+ // for (int i = 0; i < id1.extent(); i++) {
123
+ // float T3[5];
124
+ // for (int j = 0; j < 5; j++) {
125
+ // T3[j] = exp(T2[j]);
126
+ // }
127
+ // for (int j = 0; j < 5; j++) {
128
+ // if (i < T4.size[0]) {
129
+ // T4[i, j] = log(T3[j]);
130
+ // }
131
+ // }
132
+ // for (int j = 0; j < 5; j++) {
133
+ // if (i + 1 < T0.size[0]) {
134
+ // T1[j] = sin(T0[i + 1, j]);
135
+ // }
136
+ // }
137
+ // for (int j = 0; j < 5; j++) {
138
+ // T2[j] = cos(T1[j]);
139
+ // }
140
+ // }
141
+
142
+ // vector of (tv, dim, selection)
143
+ // For each entry in the vector, the selected tv/expr in loop tv->axis(dim)
144
+ // will be rotated
145
+ using LoopRotationParam = std::vector<
146
+ std::tuple<TensorView*, int64_t, std::unordered_set<Statement*>>>;
147
+
148
+ std::vector<Expr*> rotateLoops(const std::vector<Expr*>& exprs);
149
+
150
+ } // namespace nvfuser
@@ -0,0 +1,68 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+
9
+ #pragma once
10
+
11
+ #include <exceptions.h>
12
+
13
+ #include <compute_at_map.h>
14
+ #include <device_lower/analysis/thread_predicate.h>
15
+ #include <instrumentation.h>
16
+ #include <ir/all_nodes.h>
17
+ #include <kernel_ir.h>
18
+
19
+ namespace nvfuser {
20
+
21
+ //! Loop nest generator pass will get IR that looks something like:
22
+ //! T0[I0o{ceil(I0/4)}, I1o{ceil(I1/128)}, I0iU{4}, I1i{128}] = ...
23
+
24
+ // and will generate the loop nest structure for these exprs like:
25
+ //!
26
+ //! for( i : I0o{ceil(I0/4)} ) {
27
+ //! for( j : I1o{ceil(I1/128)} ) {
28
+ //! for( k : I0i{4} )
29
+ //! for( l : I1i{128} )
30
+ //! T0[I0o{ceil(I0/4)}, I1o{ceil(I1/128)}, I0iU{4}, I1i{128}] = ...
31
+ //!
32
+ //! It does not generate predicates, but it will generate allocations, and loop
33
+ //! nests to initialize reduction buffers.
34
+ class LoopNestGenerator {
35
+ public:
36
+ static std::vector<Expr*> loweredExprs(const std::vector<Expr*>& exprs);
37
+
38
+ private:
39
+ LoopNestGenerator(const std::vector<Expr*>& exprs);
40
+
41
+ // Open a new inner most for loop, track which TV it was constructed from
42
+ // according to the computeAt chain.
43
+ void openFor(IterDomain*);
44
+
45
+ // Close the inner most for loop
46
+ void closeFor();
47
+
48
+ // Appends an expression to the current scope
49
+ void pushFront(Expr* expr);
50
+
51
+ void handle(Expr* expr);
52
+
53
+ // Run the pass and accumulate output in lowered_exprs_
54
+ void generate(const std::vector<Expr*>& exprs);
55
+
56
+ private:
57
+ // Lowered exprs to return
58
+ std::vector<Expr*> lowered_exprs_;
59
+
60
+ // Keep all for loops conveniently to make unrolling easier, basically just a
61
+ // stack of the active for_loops
62
+ std::vector<ForLoop*> for_loops_;
63
+
64
+ // Loop structure of each expression
65
+ std::unordered_map<TensorView*, std::vector<IterDomain*>> loop_structures_;
66
+ };
67
+
68
+ } // namespace nvfuser
@@ -0,0 +1,86 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <exceptions.h>
11
+ #include <ir/all_nodes.h>
12
+ #include <kernel_ir.h>
13
+ #include <visibility.h>
14
+
15
+ #include <vector>
16
+
17
+ namespace nvfuser {
18
+
19
+ struct IndexFromIdGraph;
20
+
21
+ //! Insert magic zero definition at the begining of the kernel. Insert magic
22
+ //! zero update after every (outer most) loop nest with a compile time extent.
23
+ //!
24
+ //! This will make sure nvrtc does not aggressively save predicate and indices.
25
+ std::vector<Expr*> insertMagicZero(const std::vector<Expr*>& exprs);
26
+
27
+ //! Check if val is a reference to the magic zero variable
28
+ NVF_API bool isMagicZero(const Val* val);
29
+
30
+ //! Check if val is protected with magic zero.
31
+ //!
32
+ //! Specifically, this returns true if val is defined as "x + magic_zero".
33
+ bool isProtectedWithMagicZero(const Val* val);
34
+
35
+ //! Check if val is protected with magic zero, if yes, unwrap it.
36
+ //! maybeUnwrapMagicZero(i1) -> i1
37
+ //! maybeUnwrapMagicZero(i2 + magic_zero) -> i2
38
+ Val* maybeUnwrapMagicZero(Val* val);
39
+
40
+ // Determine if we may run into over reuse of predicates or registers in the
41
+ // compiler. If the loop can be unrolled and the index and domain are not
42
+ // "simple" we likely want the loop protected.
43
+ //
44
+ // Magic zero protection should only be done for global memory and predicates.
45
+ // We should avoid use on registers. Shared memory does not require it, but
46
+ // likely wouldn't hurt.
47
+ bool needsMagicZero(
48
+ ForLoop* loop,
49
+ IterDomain* reference_domain = nullptr,
50
+ Val* ind = nullptr);
51
+
52
+ struct IndexMagicZeroInfo {
53
+ //! Index that may be updated with magic zero
54
+ Val* index = nullptr;
55
+ //! Loop index that is protected by magic zero. nullptr if no loop
56
+ //! is protected
57
+ Val* original_loop_index = nullptr;
58
+ //! Protected loop index. nullptr if no loop is protected
59
+ Val* protected_loop_index = nullptr;
60
+ //! Protected loop. nullptr if no loop is protected
61
+ IterDomain* loop_id = nullptr;
62
+ };
63
+
64
+ //! Protect an index val of an IterDomain with magic zero
65
+ //!
66
+ //! This should be only used for predicate indexing.
67
+ //!
68
+ //! No protection is done if none of the loops is determined to require
69
+ //! protection by needsMagicZero.
70
+ IndexMagicZeroInfo protectPredicateIndexWithMagicZero(
71
+ Val* index,
72
+ const IndexFromIdGraph& id_graph,
73
+ const std::vector<ForLoop*>& loops);
74
+
75
+ //! Protect an index val of a tensor with magic zero
76
+ //!
77
+ //! This should be only used for non-predicate indexing.
78
+ //!
79
+ //! No protection is done if none of the loops is determined to require
80
+ //! protection by needsMagicZero.
81
+ void protectNonPredicateIndexWithMagicZero(
82
+ const std::vector<ForLoop*>& loops,
83
+ const std::vector<IterDomain*>& loop_domains,
84
+ std::unordered_map<IterDomain*, Val*>& concrete_loop_idx_map);
85
+
86
+ } // namespace nvfuser