nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (242) hide show
  1. nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
  2. nvfuser/__init__.py +618 -0
  3. nvfuser/__init__.pyi +4 -0
  4. nvfuser/contrib/__init__.py +9 -0
  5. nvfuser/contrib/nn/__init__.py +13 -0
  6. nvfuser/contrib/nn/normalization.py +725 -0
  7. nvfuser/include/nvfuser/alias_analysis.h +116 -0
  8. nvfuser/include/nvfuser/bfs.h +929 -0
  9. nvfuser/include/nvfuser/codegen.h +26 -0
  10. nvfuser/include/nvfuser/compute_at.h +28 -0
  11. nvfuser/include/nvfuser/compute_at_map.h +394 -0
  12. nvfuser/include/nvfuser/contiguity.h +351 -0
  13. nvfuser/include/nvfuser/cuda_utils.h +50 -0
  14. nvfuser/include/nvfuser/debug.h +50 -0
  15. nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
  16. nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
  17. nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
  18. nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
  19. nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
  20. nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
  21. nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
  22. nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
  23. nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
  24. nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
  25. nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
  26. nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
  27. nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
  28. nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
  29. nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
  30. nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
  31. nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
  32. nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
  33. nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
  34. nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
  35. nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
  36. nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
  37. nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
  38. nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
  39. nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
  40. nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
  41. nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
  42. nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
  43. nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
  44. nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
  45. nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
  46. nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
  47. nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
  48. nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
  49. nvfuser/include/nvfuser/device_lower/utils.h +382 -0
  50. nvfuser/include/nvfuser/device_lower/validation.h +74 -0
  51. nvfuser/include/nvfuser/disjoint_set.h +556 -0
  52. nvfuser/include/nvfuser/dispatch.h +334 -0
  53. nvfuser/include/nvfuser/driver_api.h +49 -0
  54. nvfuser/include/nvfuser/dynamic_transform.h +316 -0
  55. nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
  56. nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
  57. nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
  58. nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
  59. nvfuser/include/nvfuser/evaluator_common.h +295 -0
  60. nvfuser/include/nvfuser/exceptions.h +283 -0
  61. nvfuser/include/nvfuser/expr_evaluator.h +125 -0
  62. nvfuser/include/nvfuser/expr_simplifier.h +218 -0
  63. nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
  64. nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
  65. nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
  66. nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
  67. nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
  68. nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
  69. nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
  70. nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
  71. nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
  72. nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
  73. nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
  74. nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
  75. nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
  76. nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
  77. nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
  78. nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
  79. nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
  80. nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
  81. nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
  82. nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
  83. nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
  84. nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
  85. nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
  86. nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
  87. nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
  88. nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
  89. nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
  90. nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
  91. nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
  92. nvfuser/include/nvfuser/fusion.h +511 -0
  93. nvfuser/include/nvfuser/fusion_guard.h +37 -0
  94. nvfuser/include/nvfuser/fusion_profiler.h +311 -0
  95. nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
  96. nvfuser/include/nvfuser/global_allocator.h +27 -0
  97. nvfuser/include/nvfuser/grouped_reduction.h +47 -0
  98. nvfuser/include/nvfuser/host_ir/container.h +60 -0
  99. nvfuser/include/nvfuser/host_ir/executor.h +152 -0
  100. nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
  101. nvfuser/include/nvfuser/host_ir/lower.h +35 -0
  102. nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
  103. nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
  104. nvfuser/include/nvfuser/id_model/id_model.h +359 -0
  105. nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
  106. nvfuser/include/nvfuser/id_model/indexing.h +208 -0
  107. nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
  108. nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
  109. nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
  110. nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
  111. nvfuser/include/nvfuser/id_model/schedule.h +54 -0
  112. nvfuser/include/nvfuser/id_model/to_string.h +87 -0
  113. nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
  114. nvfuser/include/nvfuser/id_model/utils.h +176 -0
  115. nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
  116. nvfuser/include/nvfuser/index_compute.h +651 -0
  117. nvfuser/include/nvfuser/instrumentation.h +107 -0
  118. nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
  119. nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
  120. nvfuser/include/nvfuser/ir/builder.h +215 -0
  121. nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
  122. nvfuser/include/nvfuser/ir/cloner.h +185 -0
  123. nvfuser/include/nvfuser/ir/container.h +226 -0
  124. nvfuser/include/nvfuser/ir/graphviz.h +119 -0
  125. nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
  126. nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
  127. nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
  128. nvfuser/include/nvfuser/ir/iostream.h +98 -0
  129. nvfuser/include/nvfuser/ir/printer.h +57 -0
  130. nvfuser/include/nvfuser/ir/utils.h +801 -0
  131. nvfuser/include/nvfuser/iter_visitor.h +661 -0
  132. nvfuser/include/nvfuser/kernel.h +299 -0
  133. nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
  134. nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
  135. nvfuser/include/nvfuser/kernel_ir.h +1457 -0
  136. nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
  137. nvfuser/include/nvfuser/linked_hash_map.h +97 -0
  138. nvfuser/include/nvfuser/logical_domain_map.h +577 -0
  139. nvfuser/include/nvfuser/macros.h +23 -0
  140. nvfuser/include/nvfuser/mma_type.h +257 -0
  141. nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
  142. nvfuser/include/nvfuser/multidevice/communication.h +232 -0
  143. nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
  144. nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
  145. nvfuser/include/nvfuser/multidevice/executor.h +107 -0
  146. nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
  147. nvfuser/include/nvfuser/multidevice/utils.h +187 -0
  148. nvfuser/include/nvfuser/non_divisible_split.h +86 -0
  149. nvfuser/include/nvfuser/opaque_type.h +129 -0
  150. nvfuser/include/nvfuser/ops/alias.h +192 -0
  151. nvfuser/include/nvfuser/ops/all_ops.h +13 -0
  152. nvfuser/include/nvfuser/ops/arith.h +712 -0
  153. nvfuser/include/nvfuser/ops/composite.h +130 -0
  154. nvfuser/include/nvfuser/ops/indexing.h +55 -0
  155. nvfuser/include/nvfuser/ops/normalization.h +263 -0
  156. nvfuser/include/nvfuser/ops/utils.h +127 -0
  157. nvfuser/include/nvfuser/options.h +313 -0
  158. nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
  159. nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
  160. nvfuser/include/nvfuser/polymorphic_value.h +432 -0
  161. nvfuser/include/nvfuser/predicate_compute.h +213 -0
  162. nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
  163. nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
  164. nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
  165. nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
  166. nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
  167. nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
  168. nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
  169. nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
  170. nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
  171. nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
  172. nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
  173. nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
  174. nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
  175. nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
  176. nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
  177. nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
  178. nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
  179. nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
  180. nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
  181. nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
  182. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
  183. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
  184. nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
  185. nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
  186. nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
  187. nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
  188. nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
  189. nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
  190. nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
  191. nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
  192. nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
  193. nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
  194. nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
  195. nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
  196. nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
  197. nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
  198. nvfuser/include/nvfuser/scheduler/registry.h +97 -0
  199. nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
  200. nvfuser/include/nvfuser/scheduler/resize.h +41 -0
  201. nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
  202. nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
  203. nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
  204. nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
  205. nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
  206. nvfuser/include/nvfuser/scheduler/utils.h +771 -0
  207. nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
  208. nvfuser/include/nvfuser/serde/factory.h +55 -0
  209. nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
  210. nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
  211. nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
  212. nvfuser/include/nvfuser/serde/utils.h +34 -0
  213. nvfuser/include/nvfuser/struct.inl +127 -0
  214. nvfuser/include/nvfuser/swizzle.h +54 -0
  215. nvfuser/include/nvfuser/sys_utils.h +40 -0
  216. nvfuser/include/nvfuser/tensor_metadata.h +118 -0
  217. nvfuser/include/nvfuser/tma.h +124 -0
  218. nvfuser/include/nvfuser/transform_iter.h +522 -0
  219. nvfuser/include/nvfuser/transform_replay.h +297 -0
  220. nvfuser/include/nvfuser/transform_rfactor.h +33 -0
  221. nvfuser/include/nvfuser/transform_view.h +136 -0
  222. nvfuser/include/nvfuser/type.h +1125 -0
  223. nvfuser/include/nvfuser/type_promotion.h +61 -0
  224. nvfuser/include/nvfuser/utils.h +619 -0
  225. nvfuser/include/nvfuser/val_graph.h +446 -0
  226. nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
  227. nvfuser/include/nvfuser/validator_utils.h +92 -0
  228. nvfuser/include/nvfuser/vectorization_info.h +31 -0
  229. nvfuser/include/nvfuser/visibility.h +21 -0
  230. nvfuser/lib/libnvfuser_codegen.so +0 -0
  231. nvfuser/nvfuser_version.py +69 -0
  232. nvfuser/pytorch_utils.py +184 -0
  233. nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
  234. nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
  235. nvfuser/utils.py +18 -0
  236. nvfuser/version.py +1 -0
  237. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
  238. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
  239. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
  240. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
  241. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
  242. nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,511 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <ATen/core/ivalue.h>
11
+ #include <exceptions.h>
12
+
13
+ #include <debug.h>
14
+ #include <fusion_guard.h>
15
+ #include <ir/base_nodes.h>
16
+ #include <ir/cloner.h>
17
+ #include <ir/container.h>
18
+ #include <iter_visitor.h>
19
+ #include <runtime/executor_params.h>
20
+ #include <visibility.h>
21
+
22
+ #include <any>
23
+ #include <string>
24
+ #include <unordered_map>
25
+ #include <unordered_set>
26
+ #include <vector>
27
+
28
+ namespace nvfuser {
29
+
30
+ //! Usage: FusionGuard (defined in fusion_guard.h) and Fusion are required user
31
+ //! interfaces for any operation underlying the code generator. In order to
32
+ //! create values, expressions, and generate code a Fusion instance must be
33
+ //! active. It is the responsibility of the user to create a Fusion instance and
34
+ //! register it with the fusion guard. The simplest example of this is:
35
+ //!
36
+ //! Fusion fusion;
37
+ //! FusionGuard fg(&fusion);
38
+ //!
39
+ //! Once a fusion is active all values and operations will be registered with
40
+ //! it.
41
+ //!
42
+ //! FusionGuard and Fusion are critical to the lifetime model of the IR system.
43
+ //! FusionGuard is a convenient way to set what base container instance holds
44
+ //! the defined IR. Statements that are defined are registered through the
45
+ //! FusionGuard with a particular Fusion. FusionGuard provides convenient
46
+ //! methods to access the active fusion so it doesn't need to be passed around
47
+ //! constantly. Any IR node derived classes from Statement must register with
48
+ //! Fusion to avoid memory leaks.
49
+ //!
50
+ //! Fusion is generally thought of as a translated fusion group from the JIT. It
51
+ //! is likely a single kernel, although, we don't have to stick to this in the
52
+ //! future and could in theory generate multiple kernels with an executor to run
53
+ //! them.
54
+ //!
55
+ //! Fusion also allows users to set input/output values that will allow us to
56
+ //! figure out how to hook up runtime data to and from the JIT as well as
57
+ //! provide us mechanisms for dependency analysis and DCE including safety
58
+ //! checks.
59
+
60
+ class Fusion;
61
+ class TensorView;
62
+ class WelfordResult;
63
+
64
+ class SegmentCandidateFinder;
65
+ class SegmentedFusion;
66
+ class KernelArgumentHolder;
67
+
68
+ class DynamicTransformConcretizationInfo;
69
+
70
+ // Set the enum base to `int` so it can be safely serialized as a part of
71
+ // serde::InputOutputAlias.
72
+ enum class AllocationType : int {
73
+ New, // Allocate a new buffer
74
+ // Reuse the buffer allocated to `aliased_io`. For example, the tensor storing
75
+ // BatchNorm's running mean. The output EMA is updated in place.
76
+ ReuseBuffer,
77
+ // This is used to cheaply compute the output tensor using
78
+ // `ExpressionEvaluator` (instead of a kernel) for:
79
+ // 1. PointerArithmetics: For example, the output of a ViewOp is merely a
80
+ // pointer arithmetic of the input. In this case, aliased_io is a non-null
81
+ // tensor.
82
+ // 2. To evaluate output tensors which are not aliases. For example, default
83
+ // scheduling for MatmulOp/LinearOp in ExprEval scheduler.
84
+ Evaluate,
85
+ };
86
+
87
+ struct AliasInfo {
88
+ AllocationType type;
89
+ Val* aliased_io;
90
+ // Whether integration should hide the output from users. This is currently
91
+ // only used for ReuseBuffer.
92
+ bool hide_output;
93
+ };
94
+
95
+ //! Fusion is mutable but unique. Nodes cannot be copied in any way from one
96
+ //! Fusion to another. If anything like that is desired, it would require
97
+ //! duplicating all associated values and exprs. Fusion is considered to be SSA,
98
+ //! though this could also change in the future if there is a good reason to do
99
+ //! so.
100
+ //!
101
+ //! The Fusion owns the whole IR graph (Vals and Exprs)
102
+ //!
103
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
104
+ class NVF_API Fusion : public IrContainer {
105
+ typedef std::unordered_map<int, std::vector<int64_t>> PermutationMap;
106
+
107
+ public:
108
+ Fusion() = default;
109
+
110
+ Fusion(const Fusion& other);
111
+ Fusion(Fusion&& other) noexcept;
112
+
113
+ Fusion& operator=(const Fusion& other);
114
+ Fusion& operator=(Fusion&& other) noexcept;
115
+
116
+ ~Fusion() override;
117
+
118
+ friend void swap(Fusion& a, Fusion& b) noexcept;
119
+
120
+ void clear() noexcept;
121
+
122
+ //! Break dependency chains associated with Expr, remove references to expr
123
+ //! delete expr
124
+ void removeExpr(Expr* expr) override;
125
+
126
+ //! Completely remove val from the fusion, break all dependencies associated
127
+ //! with it
128
+ void removeVal(Val* val) override;
129
+
130
+ //! Register input as an input of the fusion
131
+ void addInput(Val* input);
132
+
133
+ //! Add output to outputs_ without modifying hide_output
134
+ void addOutputInternal(Val* output);
135
+
136
+ //! Register output as an output of the fusion
137
+ void addOutput(Val* output);
138
+
139
+ //! Deregister input as an input of the fusion
140
+ void removeInput(Val* input);
141
+
142
+ //! Deregister output as an output of the fusion
143
+ void removeOutput(Val* output);
144
+
145
+ //! Replace output with another value
146
+ void replaceOutput(Val* output, Val* replacement);
147
+
148
+ //! Assert that all leaves found from outputs are registered as an input
149
+ void validateInputs();
150
+
151
+ //! Print this fusion to an output stream
152
+ std::ostream& print(std::ostream& os, bool include_tensor_transforms = true)
153
+ const;
154
+
155
+ //! Print to default debugging output stream
156
+ std::ostream& print() const {
157
+ return print(debug());
158
+ }
159
+
160
+ //! Print Arith exprs
161
+ //! \param from_outputs_only Only print exprs reachable from outputs
162
+ void printMath(bool from_outputs_only = true);
163
+
164
+ //! Print transformations used in fusion (can be very verbose)
165
+ void printTransforms();
166
+
167
+ //! Lower the fusion and print a kernel
168
+ void printKernel(const CompileParams& compile_params = CompileParams());
169
+
170
+ //! Returns if this fusion is noop, for example, trivially forwarding inputs,
171
+ //! or all outputs are size-0 tensors, etc.
172
+ bool isNoOp();
173
+
174
+ //! Lower the fusion and evaluate bank conflict info
175
+ //! Returns (tensor, read conflict ways, write conflict ways)
176
+ //! Each tensor can be read/write by multiple expressions, so the ways are
177
+ //! vectors.
178
+ std::unordered_map<
179
+ TensorView*,
180
+ std::pair<std::vector<int64_t>, std::vector<int64_t>>>
181
+ bankConflictInfo(const CompileParams& compile_params = CompileParams());
182
+
183
+ //! Return a list of topologically sorted expressions. This only includes
184
+ //! exprs required to generate registered outputs.
185
+ std::vector<Expr*> exprs() const;
186
+
187
+ //! Return a vector of fusion inputs that feed this Val
188
+ std::vector<Val*> inputsOf(Val* val);
189
+
190
+ //! Return all Vals in math expressions that cannot be eliminated.
191
+ //!
192
+ //! It is generally equivalent to vals that are used to generate
193
+ //! outputs, however, when a multi-output expression exists, and only
194
+ //! some of the outputs are used, the remaining unused outputs are
195
+ //! also included as they must show up in the final code.
196
+ std::vector<Val*> usedMathVals();
197
+
198
+ //! Returns all vals that are produced by used math expressions and
199
+ //! also do not have further consumers.
200
+ //!
201
+ //! In the case of an active multi-output expressions, the returned vector
202
+ //! will include the expression outputs that did not lead to an fusion
203
+ //! output.
204
+ std::vector<Val*> terminatingMathVals();
205
+
206
+ //! Return all Exprs that use val
207
+ std::unordered_set<Expr*> unordered_uses(const Val* val) const;
208
+
209
+ //! Return the Expr that produces val
210
+ Expr* definition(const Val* val) const;
211
+
212
+ //! Indicate to kernel to set itself up to generate random numbers
213
+ bool isStochastic() const;
214
+
215
+ //! Run fusion segmentation algorithm to create a segmented fusion
216
+ std::unique_ptr<SegmentedFusion> segment(const KernelArgumentHolder& args);
217
+
218
+ const std::vector<Val*>& inputs() const {
219
+ return inputs_;
220
+ }
221
+
222
+ std::vector<Val*> inputsAndCreated();
223
+
224
+ const std::vector<Val*>& outputs() const {
225
+ return outputs_;
226
+ }
227
+
228
+ std::vector<Val*> getTerminatingOutputs() const;
229
+
230
+ // Aliasing output to input value, this is a WAR to allow inplace update on
231
+ // input tensor.
232
+ // Note: this is not always safe and should be used with extra caution.
233
+ // Currently the only place it's used is in the running stats update for batch
234
+ // normalization.
235
+ //
236
+ // TODO(wujingyue): Rename this method because `input` can be another fusion
237
+ // output.
238
+ //
239
+ // TODO: alias should be made aware to segmentation, so we'll always include
240
+ // the input tensor to the section where output is produced. Currently,
241
+ // aliases of type `PointerArithmetics` are marked after segmentation, but
242
+ // those of type `ReuseBuffer` are marked in fusion definitions.
243
+ NVF_API void aliasOutputToInput(Val* output, Val* input, AllocationType type);
244
+
245
+ //! Returns the aliased input of a given output along with an `AliasInfo`
246
+ //! describing how they alias. Returns <nullptr,nullptr> when `output` is not
247
+ //! aliased.
248
+ const AliasInfo& getOutputAlias(const Val* output) const;
249
+
250
+ bool isTVUseInfoValid() {
251
+ return all_tv_uses_valid_;
252
+ }
253
+
254
+ bool isUpdatingTVUseInfo() {
255
+ return is_during_update_uses_;
256
+ }
257
+
258
+ // NOTE: [Fusion managed data]
259
+ //
260
+ // Fusion-managed data is a mechanism to communicate data that survives fusion
261
+ // clone. Managed data can be named or unnamed.
262
+ //
263
+ // For unnamed data, to let fusion manage that data, do the following:
264
+ // size_t index = fusion.manage(data); // or
265
+ // size_t index = fusion.manage(data, clone_fn);
266
+ // This function returns an index which can be used to retrieve the data back.
267
+ // To retrieve the unnamed managed data, do
268
+ // T data = fusion.getManaged<T>(index); // rvalue
269
+ // T& data = fusion.getManaged<T>(index); // lvalue
270
+ // To test if fusion have managed data with the given index, do:
271
+ // bool has_data = fusion.hasManaged(index);
272
+ //
273
+ // For named data, the usage is similar. To manage:
274
+ // std::string name = "interesting_tvs";
275
+ // fusion.manage(name, data); // or
276
+ // fusion.manage(name, data, clone_fn);
277
+ // To retrieve:
278
+ // T data = fusion.getManaged<T>(name); // rvalue
279
+ // T& data = fusion.getManaged<T>(name); // lvalue
280
+ // To check existence:
281
+ // bool has_data = fusion.hasManaged(name);
282
+ // Note that special names, such as "loop_rotation", are reserved as lowering
283
+ // options.
284
+ //
285
+ // The managed data can be any type. To retrieve managed data, you always need
286
+ // to specify the actual type of the data. For the data whose type already
287
+ // have an overload of IrCloner::clone, fusion will automatically know how to
288
+ // modify it when a fusion clone happens. For these type of data, you can just
289
+ // use the overload of `manage` without the clone function. For example
290
+ // std::vector<TensorView*> interested_tvs;
291
+ // size_t index = fusion.manage(interested_tvs);
292
+ // For the data whose type does not have an overload of IrCloner::clone, you
293
+ // need to tell fusion how to transform the data to keep consistency during
294
+ // fusion clone. For example:
295
+ // struct InputsOutputs {
296
+ // TensorView* input;
297
+ // TensorView* output;
298
+ // bool some_flag;
299
+ // };
300
+ // auto clone_fn = [](IrCloner& cloner, std::any data) -> std::any {
301
+ // InputsOutputs result;
302
+ // auto d = std::any_cast<InputsOutputs>(data);
303
+ // result.input = cloner.clone(d.input);
304
+ // result.output = cloner.clone(d.output);
305
+ // result.some_flag = d.some_flag;
306
+ // return result;
307
+ // };
308
+ // InputsOutputs data{...};
309
+ // size_t index = fusion.manage(data, clone_fn);
310
+ //
311
+ // See test FusionManagedData_CUDA for example use cases.
312
+ using CloneFn = std::function<std::any(IrCloner&, std::any)>;
313
+
314
+ inline size_t manage(std::any data, CloneFn clone) {
315
+ managed_data_.emplace_back(data, clone);
316
+ return managed_data_.size() - 1;
317
+ }
318
+
319
+ inline void manage(std::string key, std::any data, CloneFn clone) {
320
+ managed_named_data_[key] = std::make_pair(data, clone);
321
+ }
322
+
323
+ template <typename T>
324
+ inline size_t manage(T data);
325
+
326
+ template <typename T>
327
+ inline void manage(std::string key, T data);
328
+
329
+ template <typename T>
330
+ inline T getManaged(size_t index) const {
331
+ return std::any_cast<T>(managed_data_.at(index).first);
332
+ }
333
+
334
+ template <typename T>
335
+ inline T getManaged(std::string key) const {
336
+ return std::any_cast<T>(managed_named_data_.at(key).first);
337
+ }
338
+
339
+ template <typename T>
340
+ inline T& getManaged(size_t index) {
341
+ return std::any_cast<T&>(managed_data_.at(index).first);
342
+ }
343
+
344
+ template <typename T>
345
+ inline T& getManaged(std::string key) {
346
+ return std::any_cast<T&>(managed_named_data_.at(key).first);
347
+ }
348
+
349
+ //! Try to get managed data by index, checking that we have an entry for it,
350
+ //! and that the entry has not been reset (see stopManaging).
351
+ template <typename T>
352
+ inline std::optional<const T> getManagedSafe(size_t index) const {
353
+ if (hasManaged(index)) {
354
+ return std::any_cast<T>(managed_data_.at(index).first);
355
+ }
356
+ return std::nullopt;
357
+ }
358
+
359
+ //! Try to get managed data by key, checking that we have an entry for that
360
+ //! key.
361
+ template <typename T>
362
+ inline std::optional<const T> getManagedSafe(std::string key) const {
363
+ auto it = managed_named_data_.find(key);
364
+ if (it == managed_named_data_.end()) {
365
+ return std::nullopt;
366
+ }
367
+ return std::any_cast<T>(it->second.first);
368
+ }
369
+
370
+ //! Disables a piece of managed data. After this, there will still be an entry
371
+ //! but .has_value() will return false. getManagedSafe() should be used in
372
+ //! cases where the data management may have been stopped.
373
+ inline void stopManaging(size_t index) {
374
+ if (!hasManaged(index)) {
375
+ return;
376
+ }
377
+ managed_data_.at(index).first.reset();
378
+ }
379
+
380
+ //! Disables a piece of managed data by removing the entry with this key.
381
+ //! getManagedSafe() should be used in cases where the data management may
382
+ //! have been stopped.
383
+ inline void stopManaging(std::string key) {
384
+ auto it = managed_named_data_.find(key);
385
+ if (it == managed_named_data_.end()) {
386
+ return;
387
+ }
388
+ managed_named_data_.erase(it);
389
+ }
390
+
391
+ inline bool hasManaged(size_t index) const {
392
+ return index < managed_data_.size() &&
393
+ managed_data_[index].first.has_value();
394
+ }
395
+
396
+ inline bool hasManaged(std::string key) const {
397
+ return managed_named_data_.find(key) != managed_named_data_.end();
398
+ }
399
+
400
+ //! True if any of tensors has a symblic axis
401
+ bool hasDynamicTransform();
402
+
403
+ static IrCloner copy(const Fusion* from, Fusion* to);
404
+
405
+ //! During scheduling, this can be set to a non-negative value. If done, then
406
+ //! during execution by KernelExecutor, we will check that this value matches
407
+ //! the corresponding value in LaunchParams.
408
+ int64_t expectedDynamicSmemBytes() const {
409
+ return expected_dynamic_smem_bytes_;
410
+ }
411
+
412
+ void setExpectedDynamicSmemBytes(int64_t bytes) {
413
+ expected_dynamic_smem_bytes_ = bytes;
414
+ }
415
+
416
+ //! This is a cached version of ir_utils::allTvs that is invalidated. Return a
417
+ //! copy of the vector instead of a reference as it can be invalidated by many
418
+ //! operations. If we returned a reference and are iterating on it while
419
+ //! making modifications to the fusion, it can easily cause a segfault.
420
+ std::vector<TensorView*> allTvs();
421
+
422
+ //! Specify id0 and id1 are mapped in the Exact graph. This should
423
+ //! be used only when absolutely necessary.
424
+ //!
425
+ //! Currently, id0->sameAs(id1) needs to hold. It will be an error
426
+ //! otherwise.
427
+ void registerExactMapping(IterDomain* id0, IterDomain* id1);
428
+
429
+ bool hasRegisteredExactMappings() const {
430
+ return hasManaged(exact_mappings_key);
431
+ }
432
+
433
+ DisjointSets<IterDomain*> registeredExactMappings() const;
434
+
435
+ void resetExactMappings();
436
+
437
+ protected:
438
+ friend SegmentCandidateFinder;
439
+ friend SegmentedFusion;
440
+ friend class TranslateApplicableWelford;
441
+ friend Val;
442
+
443
+ using IrContainer::registerExpr;
444
+ using IrContainer::registerVal;
445
+
446
+ //! Register the Val with this fusion
447
+ void registerVal(Val* val) override;
448
+
449
+ //! Register expr with this fusion.
450
+ //! When we register an expression, we want to update the dependency tracking
451
+ //! of Vals. If this container is a not a Kernel, it will remove previous
452
+ //! definitions of outputs and register this Expr as the definition. Otherwise
453
+ //! will update definition if not previously set, but will not remove old
454
+ //! definitions.
455
+ void registerExpr(Expr* expr) override;
456
+
457
+ //! Clear Expr's from TV uses that are not required to produce outputs from
458
+ //! inputs. Only other place this is used (other than Fusion) is in
459
+ //! Val::uses()
460
+ void resetTvUses();
461
+
462
+ //! Declare that TensorView uses need to be updated (but don't actually do
463
+ //! the update).
464
+ void invalidateTvsAndUses() {
465
+ all_tv_uses_valid_ = false;
466
+ all_tvs_ptr_.reset();
467
+ }
468
+
469
+ private:
470
+ // Fusion inputs and outputs
471
+ std::vector<Val*> inputs_;
472
+ std::vector<Val*> outputs_;
473
+
474
+ // io alias pointing from output to input
475
+ std::unordered_map<const Val*, AliasInfo> io_alias_;
476
+
477
+ // Records if the current use data in the IR nodes are valid
478
+ // the states are either all valid or all invalid
479
+ bool all_tv_uses_valid_ = false;
480
+ bool is_during_update_uses_ = false;
481
+
482
+ std::vector<std::pair<std::any, CloneFn>> managed_data_;
483
+ std::unordered_map<std::string, std::pair<std::any, CloneFn>>
484
+ managed_named_data_;
485
+
486
+ // If set to a non-negative value during scheduling, this will be checked by
487
+ // the executor.
488
+ int64_t expected_dynamic_smem_bytes_ = -1LL;
489
+
490
+ std::unique_ptr<std::vector<TensorView*>> all_tvs_ptr_ = nullptr;
491
+
492
+ inline static const std::string exact_mappings_key = "exact_mappings";
493
+ };
494
+
495
+ template <typename T>
496
+ size_t Fusion::manage(T data) {
497
+ std::any a = data;
498
+ return manage(a, [](IrCloner& cloner, std::any data) {
499
+ return std::any(cloner.clone(std::any_cast<T>(data)));
500
+ });
501
+ }
502
+
503
+ template <typename T>
504
+ void Fusion::manage(std::string key, T data) {
505
+ std::any a = data;
506
+ manage(key, a, [](IrCloner& cloner, std::any data) {
507
+ return std::any(cloner.clone(std::any_cast<T>(data)));
508
+ });
509
+ }
510
+
511
+ } // namespace nvfuser
@@ -0,0 +1,37 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <visibility.h>
11
+
12
+ namespace nvfuser {
13
+
14
+ class Fusion;
15
+
16
+ //! Fusion Guard is our "context manager". It holds the active fusion and
17
+ //! allows it to be accessed anywhere through
18
+ //! FusionGuard::getCurFusion().
19
+ //!
20
+ //! See also the comments in fusion.h
21
+ class FusionGuard {
22
+ public:
23
+ //! Set the active fusion so it can be manipulated.
24
+ NVF_API explicit FusionGuard(Fusion* fusion);
25
+
26
+ NVF_API ~FusionGuard();
27
+
28
+ NVF_API static Fusion* getCurFusion();
29
+ static void setCurFusion(Fusion* fusion);
30
+
31
+ private:
32
+ Fusion* prev_fusion_;
33
+
34
+ static thread_local Fusion* active_fusion_;
35
+ };
36
+
37
+ } // namespace nvfuser