nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (242) hide show
  1. nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
  2. nvfuser/__init__.py +618 -0
  3. nvfuser/__init__.pyi +4 -0
  4. nvfuser/contrib/__init__.py +9 -0
  5. nvfuser/contrib/nn/__init__.py +13 -0
  6. nvfuser/contrib/nn/normalization.py +725 -0
  7. nvfuser/include/nvfuser/alias_analysis.h +116 -0
  8. nvfuser/include/nvfuser/bfs.h +929 -0
  9. nvfuser/include/nvfuser/codegen.h +26 -0
  10. nvfuser/include/nvfuser/compute_at.h +28 -0
  11. nvfuser/include/nvfuser/compute_at_map.h +394 -0
  12. nvfuser/include/nvfuser/contiguity.h +351 -0
  13. nvfuser/include/nvfuser/cuda_utils.h +50 -0
  14. nvfuser/include/nvfuser/debug.h +50 -0
  15. nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
  16. nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
  17. nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
  18. nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
  19. nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
  20. nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
  21. nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
  22. nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
  23. nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
  24. nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
  25. nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
  26. nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
  27. nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
  28. nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
  29. nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
  30. nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
  31. nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
  32. nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
  33. nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
  34. nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
  35. nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
  36. nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
  37. nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
  38. nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
  39. nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
  40. nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
  41. nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
  42. nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
  43. nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
  44. nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
  45. nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
  46. nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
  47. nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
  48. nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
  49. nvfuser/include/nvfuser/device_lower/utils.h +382 -0
  50. nvfuser/include/nvfuser/device_lower/validation.h +74 -0
  51. nvfuser/include/nvfuser/disjoint_set.h +556 -0
  52. nvfuser/include/nvfuser/dispatch.h +334 -0
  53. nvfuser/include/nvfuser/driver_api.h +49 -0
  54. nvfuser/include/nvfuser/dynamic_transform.h +316 -0
  55. nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
  56. nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
  57. nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
  58. nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
  59. nvfuser/include/nvfuser/evaluator_common.h +295 -0
  60. nvfuser/include/nvfuser/exceptions.h +283 -0
  61. nvfuser/include/nvfuser/expr_evaluator.h +125 -0
  62. nvfuser/include/nvfuser/expr_simplifier.h +218 -0
  63. nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
  64. nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
  65. nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
  66. nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
  67. nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
  68. nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
  69. nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
  70. nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
  71. nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
  72. nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
  73. nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
  74. nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
  75. nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
  76. nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
  77. nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
  78. nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
  79. nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
  80. nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
  81. nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
  82. nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
  83. nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
  84. nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
  85. nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
  86. nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
  87. nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
  88. nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
  89. nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
  90. nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
  91. nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
  92. nvfuser/include/nvfuser/fusion.h +511 -0
  93. nvfuser/include/nvfuser/fusion_guard.h +37 -0
  94. nvfuser/include/nvfuser/fusion_profiler.h +311 -0
  95. nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
  96. nvfuser/include/nvfuser/global_allocator.h +27 -0
  97. nvfuser/include/nvfuser/grouped_reduction.h +47 -0
  98. nvfuser/include/nvfuser/host_ir/container.h +60 -0
  99. nvfuser/include/nvfuser/host_ir/executor.h +152 -0
  100. nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
  101. nvfuser/include/nvfuser/host_ir/lower.h +35 -0
  102. nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
  103. nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
  104. nvfuser/include/nvfuser/id_model/id_model.h +359 -0
  105. nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
  106. nvfuser/include/nvfuser/id_model/indexing.h +208 -0
  107. nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
  108. nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
  109. nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
  110. nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
  111. nvfuser/include/nvfuser/id_model/schedule.h +54 -0
  112. nvfuser/include/nvfuser/id_model/to_string.h +87 -0
  113. nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
  114. nvfuser/include/nvfuser/id_model/utils.h +176 -0
  115. nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
  116. nvfuser/include/nvfuser/index_compute.h +651 -0
  117. nvfuser/include/nvfuser/instrumentation.h +107 -0
  118. nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
  119. nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
  120. nvfuser/include/nvfuser/ir/builder.h +215 -0
  121. nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
  122. nvfuser/include/nvfuser/ir/cloner.h +185 -0
  123. nvfuser/include/nvfuser/ir/container.h +226 -0
  124. nvfuser/include/nvfuser/ir/graphviz.h +119 -0
  125. nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
  126. nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
  127. nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
  128. nvfuser/include/nvfuser/ir/iostream.h +98 -0
  129. nvfuser/include/nvfuser/ir/printer.h +57 -0
  130. nvfuser/include/nvfuser/ir/utils.h +801 -0
  131. nvfuser/include/nvfuser/iter_visitor.h +661 -0
  132. nvfuser/include/nvfuser/kernel.h +299 -0
  133. nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
  134. nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
  135. nvfuser/include/nvfuser/kernel_ir.h +1457 -0
  136. nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
  137. nvfuser/include/nvfuser/linked_hash_map.h +97 -0
  138. nvfuser/include/nvfuser/logical_domain_map.h +577 -0
  139. nvfuser/include/nvfuser/macros.h +23 -0
  140. nvfuser/include/nvfuser/mma_type.h +257 -0
  141. nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
  142. nvfuser/include/nvfuser/multidevice/communication.h +232 -0
  143. nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
  144. nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
  145. nvfuser/include/nvfuser/multidevice/executor.h +107 -0
  146. nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
  147. nvfuser/include/nvfuser/multidevice/utils.h +187 -0
  148. nvfuser/include/nvfuser/non_divisible_split.h +86 -0
  149. nvfuser/include/nvfuser/opaque_type.h +129 -0
  150. nvfuser/include/nvfuser/ops/alias.h +192 -0
  151. nvfuser/include/nvfuser/ops/all_ops.h +13 -0
  152. nvfuser/include/nvfuser/ops/arith.h +712 -0
  153. nvfuser/include/nvfuser/ops/composite.h +130 -0
  154. nvfuser/include/nvfuser/ops/indexing.h +55 -0
  155. nvfuser/include/nvfuser/ops/normalization.h +263 -0
  156. nvfuser/include/nvfuser/ops/utils.h +127 -0
  157. nvfuser/include/nvfuser/options.h +313 -0
  158. nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
  159. nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
  160. nvfuser/include/nvfuser/polymorphic_value.h +432 -0
  161. nvfuser/include/nvfuser/predicate_compute.h +213 -0
  162. nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
  163. nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
  164. nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
  165. nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
  166. nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
  167. nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
  168. nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
  169. nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
  170. nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
  171. nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
  172. nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
  173. nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
  174. nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
  175. nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
  176. nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
  177. nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
  178. nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
  179. nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
  180. nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
  181. nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
  182. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
  183. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
  184. nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
  185. nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
  186. nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
  187. nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
  188. nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
  189. nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
  190. nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
  191. nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
  192. nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
  193. nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
  194. nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
  195. nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
  196. nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
  197. nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
  198. nvfuser/include/nvfuser/scheduler/registry.h +97 -0
  199. nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
  200. nvfuser/include/nvfuser/scheduler/resize.h +41 -0
  201. nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
  202. nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
  203. nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
  204. nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
  205. nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
  206. nvfuser/include/nvfuser/scheduler/utils.h +771 -0
  207. nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
  208. nvfuser/include/nvfuser/serde/factory.h +55 -0
  209. nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
  210. nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
  211. nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
  212. nvfuser/include/nvfuser/serde/utils.h +34 -0
  213. nvfuser/include/nvfuser/struct.inl +127 -0
  214. nvfuser/include/nvfuser/swizzle.h +54 -0
  215. nvfuser/include/nvfuser/sys_utils.h +40 -0
  216. nvfuser/include/nvfuser/tensor_metadata.h +118 -0
  217. nvfuser/include/nvfuser/tma.h +124 -0
  218. nvfuser/include/nvfuser/transform_iter.h +522 -0
  219. nvfuser/include/nvfuser/transform_replay.h +297 -0
  220. nvfuser/include/nvfuser/transform_rfactor.h +33 -0
  221. nvfuser/include/nvfuser/transform_view.h +136 -0
  222. nvfuser/include/nvfuser/type.h +1125 -0
  223. nvfuser/include/nvfuser/type_promotion.h +61 -0
  224. nvfuser/include/nvfuser/utils.h +619 -0
  225. nvfuser/include/nvfuser/val_graph.h +446 -0
  226. nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
  227. nvfuser/include/nvfuser/validator_utils.h +92 -0
  228. nvfuser/include/nvfuser/vectorization_info.h +31 -0
  229. nvfuser/include/nvfuser/visibility.h +21 -0
  230. nvfuser/lib/libnvfuser_codegen.so +0 -0
  231. nvfuser/nvfuser_version.py +69 -0
  232. nvfuser/pytorch_utils.py +184 -0
  233. nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
  234. nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
  235. nvfuser/utils.py +18 -0
  236. nvfuser/version.py +1 -0
  237. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
  238. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
  239. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
  240. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
  241. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
  242. nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,158 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+
9
+ #pragma once
10
+
11
+ #include <exceptions.h>
12
+ #include <visibility.h>
13
+
14
+ #include <device_lower/utils.h>
15
+ #include <ir/all_nodes.h>
16
+ #include <parallel_type_bitmap.h>
17
+
18
+ #include <unordered_map>
19
+ #include <unordered_set>
20
+ #include <utility>
21
+
22
+ namespace nvfuser {
23
+
24
+ //! Maps TensorViews to a { ParallelTypeBitmap, SourceMap } pair
25
+ //!
26
+ //! Map from TensorView to bit set represnting <BIDx, BIDy, BIDz, TIDx, TIDy,
27
+ //! TIDz> If any dependency of TV had a parallelized reduction, we will track
28
+ //! it here. This will be used for predicate generation to prevent
29
+ //! parallelization on that axis. This is important if we have a reduction on
30
+ //! for example TIDx, as the reduced value is only valid on threadIdx.x == 0
31
+ //! therefore if we use that value later in the kernel we have that predicate.
32
+ //! If we follow a reduction parallelized on TIDx with a broadcast on TIDx we
33
+ //! no longer need the predicate and can reset the bit accordingly
34
+ //!
35
+ //! In addition, if a parallel thread type is not used, it is
36
+ //! redundant to use all threads/blocks. That isn't a problem
37
+ //! generally although it can be inefficient, but when an aliased smem
38
+ //! buffer is used as an output, redundant writes can be invalid (see issue
39
+ //! #1110). PredicateInfo::redundant_types track which parallel types
40
+ //! are redundant for each tensor and is used to let only one
41
+ //! thread/block of a redundant type execute the expression for a
42
+ //! tensor.
43
+ class ThreadPredicateMap {
44
+ public:
45
+ using SourceMap =
46
+ std::unordered_map<ParallelType, std::unordered_set<const TensorView*>>;
47
+
48
+ //! Thread predicate information for each tensor
49
+ struct PredicateInfo {
50
+ // Parallel types where only one thread/block is valid.
51
+ ParallelTypeBitmap limited_types;
52
+ // Parallel types where only one thread/block is enough.
53
+ ParallelTypeBitmap redundant_types;
54
+
55
+ // when a loop domain of a Tensor stored in global memory
56
+ // is merged from concretized broadcast logical domain, the broadcasted
57
+ // logical domains should be skipped when writing to global memory.
58
+ // broadcast_ld_indices_map maps a parallel type to a list of indices
59
+ // of the broadcasted logical domains. The write to global memory is needed
60
+ // only when the index equals to 0.
61
+ std::unordered_map<ParallelType, std::vector<Val*>>
62
+ broadcast_ld_indices_map;
63
+
64
+ // Tracking use chain of redundant writes:
65
+ // [Redundant use chain]
66
+ // a parallel type is a `redundant_consumer_type` only
67
+ // if all of its propagation use chains terminate with
68
+ // a redundant write of this type.
69
+ // A propagation use chain is currently either a reg-to-reg
70
+ // chain for a shared mem tv, or a reg/smem-to-reg/smem chain
71
+ // for a global tv.
72
+ // This is complementary information to `redundant_types`.
73
+ // If a tensor view is redundantly written and not redundantly
74
+ // used by all consumers, see FusionRedundantPredSync3,
75
+ // a RAW sync will need to be inserted before reading
76
+ // this redundantly written tensor.
77
+ ParallelTypeBitmap redundant_use_types;
78
+ bool operator==(const PredicateInfo& other) const {
79
+ return limited_types == other.limited_types &&
80
+ redundant_types == other.redundant_types &&
81
+ redundant_use_types == other.redundant_use_types;
82
+ }
83
+ };
84
+
85
+ using MapType = std::unordered_map<const TensorView*, PredicateInfo>;
86
+
87
+ using const_iterator = MapType::const_iterator;
88
+
89
+ //! Build a map from each tensor to PredicateInfo.
90
+ void build(Fusion* fusion);
91
+
92
+ //! Get a PredicateInfo for a given tensor. If it's an output of
93
+ //! a parallel broadcast, unmask the limited_types_ bit of the
94
+ //! corresponding parallel type since it must join the broadcast
95
+ //! operation although the valid input is only available at one of
96
+ //! the threads/blocks.
97
+ NVF_API PredicateInfo getPredicateInfo(const TensorView* tv) const;
98
+
99
+ //! Returns a flag set that indicates which parallel types should be
100
+ //! predicated.
101
+ ParallelTypeBitmap getPredicatedParallelTypes(const TensorView* tv) const;
102
+
103
+ //! Returns a Bool predicate for a given TensorView.
104
+ Val* getPredicate(
105
+ const TensorView* tv,
106
+ ParallelTypeBitmap mask = ParallelTypeBitmap().setAll()) const;
107
+
108
+ //! Returns a ParallelTypeBitmap representing which domain needs
109
+ //! blockBroadcast.
110
+ //!
111
+ //! Even when a domain is broadcast and parallelized, it does not need
112
+ //! blockBroadcast unless it is predicated by limited_types_
113
+ ParallelTypeBitmap getParallelBroadcastDomains(const TensorView* tv) const;
114
+
115
+ //! Mark tv as updated so that rebuilding the map should recompute
116
+ //! its predicates and those of its dependents.
117
+ void markAsUpdated(const TensorView* tv);
118
+
119
+ void print() const;
120
+
121
+ //! Generate a Bool value from PredicateInfo.
122
+ static Val* getPredicateFromPredicateInfo(
123
+ const ThreadPredicateMap::PredicateInfo& pred_info,
124
+ const ParallelTypeBitmap& mask);
125
+
126
+ //! Get the redundant use types of the given expr, see [Redundant use chain]
127
+ ParallelTypeBitmap getRedundantConsumerType(Expr* expr) const;
128
+
129
+ private:
130
+ // Update the thread_predicates bitset based on provided Expr
131
+ void updateBitSet(const Expr*);
132
+ void avoidConcretizedBroadcastRedundantWrite(const TensorView* out_tv);
133
+ const_iterator find(const TensorView* tv) const;
134
+ const_iterator end() const;
135
+
136
+ const PredicateInfo& at(const TensorView* tv) const;
137
+ PredicateInfo& at(const TensorView* tv);
138
+
139
+ //! Update a mapping
140
+ bool update(
141
+ const TensorView* tv,
142
+ const ParallelTypeBitmap& limited_types,
143
+ const ParallelTypeBitmap& redundant_types);
144
+
145
+ //! Update a mapping
146
+ bool update(const TensorView* tv, const PredicateInfo& pred_and_src);
147
+
148
+ //! Backward populate redundant use chain info once the redundant
149
+ //! parallel writes have been identified.
150
+ void populateRedundantUseMap(Fusion* fusion);
151
+
152
+ private:
153
+ MapType thread_predicates_;
154
+ //! Keep track of updated tensors that need predicates to be computed
155
+ std::unordered_set<const TensorView*> updated_tvs_;
156
+ };
157
+
158
+ } // namespace nvfuser
@@ -0,0 +1,93 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <ostream>
11
+ #include <unordered_map>
12
+ #include <variant>
13
+ #include <vector>
14
+
15
+ #include <ir/all_nodes.h>
16
+ #include <val_graph.h>
17
+
18
+ namespace nvfuser {
19
+
20
+ // See doc/dev/tma.md for design
21
+
22
+ // All ValGroups are in the traversal graph of tensor indexer
23
+
24
+ struct TMADim {
25
+ ValGroup partitioned;
26
+ ValGroup box;
27
+ ValGroup tile;
28
+ ValGroup stride;
29
+ Val* gmem_stride_bytes;
30
+
31
+ Val* tensorSize() const {
32
+ return partitioned->front()->as<IterDomain>()->extent();
33
+ }
34
+ Val* boxSize() const {
35
+ return box ? box->front()->as<IterDomain>()->extent()
36
+ : gmem_stride_bytes->fusion()->oneVal();
37
+ }
38
+ Val* tileSize() const {
39
+ return tile ? tile->front()->as<IterDomain>()->extent()
40
+ : gmem_stride_bytes->fusion()->oneVal();
41
+ }
42
+ Val* elementStride() const {
43
+ return stride ? stride->front()->as<IterDomain>()->extent()
44
+ : gmem_stride_bytes->fusion()->oneVal();
45
+ }
46
+ };
47
+
48
+ std::ostream& operator<<(std::ostream& os, const TMADim& d);
49
+
50
+ class TMAInfo {
51
+ std::vector<TMADim> dims_;
52
+ MmaInputSmemSwizzle swizzle_;
53
+ TensorView* gmem_tv_;
54
+
55
+ public:
56
+ TMAInfo(
57
+ std::vector<TMADim> dims,
58
+ MmaInputSmemSwizzle swizzle,
59
+ TensorView* gmem_tv)
60
+ : dims_(std::move(dims)), swizzle_(swizzle), gmem_tv_(gmem_tv) {}
61
+
62
+ const std::vector<TMADim>& dims() const {
63
+ return dims_;
64
+ }
65
+
66
+ std::vector<ValGroup> getTMADomain() const {
67
+ std::vector<ValGroup> result;
68
+ std::transform(
69
+ dims_.begin(),
70
+ dims_.end(),
71
+ std::back_inserter(result),
72
+ [](const auto& d) { return d.partitioned; });
73
+ return result;
74
+ }
75
+
76
+ Val* tileSizeBytes() const {
77
+ int64_t itemsize = dataTypeSize(gmem_tv_->dtype());
78
+ Val* size = IrBuilder::create<Val>(itemsize, DataType::Index);
79
+ for (const auto& d : dims_) {
80
+ size = SimplifyingIrBuilder::mulExpr(size, d.tileSize());
81
+ }
82
+ return size;
83
+ }
84
+
85
+ Val* tensorMap() const;
86
+ };
87
+
88
+ std::unordered_map<TensorView*, const TMAInfo> getConsumerToTMAInfoMap(
89
+ Fusion* fusion);
90
+
91
+ MmaInputSmemSwizzle getSwizzle(TensorView* tv);
92
+
93
+ } // namespace nvfuser
@@ -0,0 +1,75 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <exceptions.h>
11
+ #include <ir/all_nodes.h>
12
+ #include <logical_domain_map.h>
13
+ #include <visibility.h>
14
+
15
+ namespace nvfuser {
16
+
17
+ //! Traverse and collect all concretized broadcast domains.
18
+ //!
19
+ //! The traversal first initializes the origin map with broadcast
20
+ //! domains in input tensors. Then, a new entry is added to the origin
21
+ //! map when a broadcast op is encountered during a forward traversal
22
+ //! of the given fusion. For non-broadcast ops, mappings are just
23
+ //! propagated forward using PairwiseLogicalDomainMap.
24
+ //!
25
+ //! When the mapped consumer domain is not broadcast, it means the
26
+ //! producer broadcast domain is concretized, and its origin broadcast
27
+ //! domains are marked as concretized.
28
+ class NVF_API ConcretizedBroadcastDomains : private IterVisitor {
29
+ public:
30
+ ConcretizedBroadcastDomains() = delete;
31
+ ConcretizedBroadcastDomains(Fusion* fusion);
32
+
33
+ //! Is a domain concretized?
34
+ bool isConcretized(IterDomain* id) const;
35
+
36
+ //! Is a domain concretized to a unique concrete domain?
37
+ bool isUniquelyConcretized(IterDomain* id) const;
38
+
39
+ //! Is a domain concretized to multiple concrete domains?
40
+ bool maybeNonUniquelyConcretized(IterDomain* id) const;
41
+
42
+ //! Return all domains id is concretized to, if concretized
43
+ std::unordered_set<IterDomain*> allConcretizedDomains(IterDomain* id) const;
44
+
45
+ private:
46
+ using IterVisitor::handle;
47
+
48
+ void handle(TensorView* tv) final;
49
+
50
+ void handle(BroadcastOp* bop) final;
51
+
52
+ void dispatch(Expr* expr) final;
53
+
54
+ void markAsConcretized(
55
+ IterDomain* broadcast_root_domain,
56
+ IterDomain* concrete_root_domain);
57
+
58
+ bool insertRootDomainToConcreteDomainSet(
59
+ IterDomain* new_root_id,
60
+ std::unordered_set<IterDomain*>& id_set);
61
+
62
+ private:
63
+ //! Maps each root broadcast domain to its original root broadcast
64
+ //! domains. Their can be multiple original domains due to, e.g.,
65
+ //! binary ops with broadcast domains in both inputs.
66
+ std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>
67
+ broadcast_origin_map_;
68
+ //! Map all broadcast domains to concrete root domains
69
+ std::unordered_map<IterDomain*, std::unordered_set<IterDomain*>>
70
+ broadcast_to_concrete_map_;
71
+
72
+ std::unique_ptr<ExactLogicalDomainMap> exact_map_;
73
+ };
74
+
75
+ } // namespace nvfuser
@@ -0,0 +1,135 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <id_model/utils.h>
11
+
12
+ #include <sstream>
13
+
14
+ namespace nvfuser {
15
+
16
+ class IdModelOptions {
17
+ public:
18
+ IdModelOptions()
19
+ : build_id_model_(isOptionEnabled(EnableOption::IdModel)),
20
+ consumer_index_(
21
+ isIdModelOptionEnabled(IdModelEnableOption::ConsumerIndex)),
22
+ producer_index_(
23
+ isIdModelOptionEnabled(IdModelEnableOption::ProducerIndex)),
24
+ inline_predicate_(
25
+ isIdModelOptionEnabled(IdModelEnableOption::InlinePredicate)),
26
+ unswitch_predicate_(
27
+ isIdModelOptionEnabled(IdModelEnableOption::UnswitchPredicate)),
28
+ loop_(isIdModelOptionEnabled(IdModelEnableOption::Loop)) {
29
+ ensureConsistency();
30
+ }
31
+
32
+ bool buildIdModel() const {
33
+ return build_id_model_;
34
+ }
35
+
36
+ void setBuildIdModel(bool b) {
37
+ build_id_model_ = b;
38
+ ensureConsistency();
39
+ }
40
+
41
+ bool buildTensorIndexer() const {
42
+ return build_tensor_indexer_;
43
+ }
44
+
45
+ void setBuildTensorIndexer(bool b) {
46
+ build_tensor_indexer_ = b;
47
+ ensureConsistency();
48
+ }
49
+
50
+ bool consumerIndex() const {
51
+ return consumer_index_;
52
+ }
53
+
54
+ void setConsumerIndex(bool b) {
55
+ consumer_index_ = b;
56
+ ensureConsistency();
57
+ }
58
+
59
+ bool producerIndex() const {
60
+ return producer_index_;
61
+ }
62
+
63
+ void setProducerIndex(bool b) {
64
+ producer_index_ = b;
65
+ ensureConsistency();
66
+ }
67
+
68
+ bool inlinePredicate() const {
69
+ return inline_predicate_;
70
+ }
71
+
72
+ void setInlinePredicate(bool b) {
73
+ inline_predicate_ = b;
74
+ ensureConsistency();
75
+ }
76
+
77
+ bool unswitchPredicate() const {
78
+ return unswitch_predicate_;
79
+ }
80
+
81
+ void setUnswitchPredicate(bool b) {
82
+ unswitch_predicate_ = b;
83
+ ensureConsistency();
84
+ }
85
+
86
+ bool loop() const {
87
+ return loop_;
88
+ }
89
+
90
+ void setLoop(bool b) {
91
+ loop_ = b;
92
+ ensureConsistency();
93
+ }
94
+
95
+ std::string toString() const {
96
+ auto bool2str = [](bool b) { return b ? "true" : "false"; };
97
+
98
+ std::stringstream ss;
99
+ ss << "build_id_model=" << bool2str(build_id_model_)
100
+ << ", build_tensor_indexer=" << bool2str(build_tensor_indexer_)
101
+ << ", consumer_index=" << bool2str(consumer_index_)
102
+ << ", producer_index=" << bool2str(producer_index_)
103
+ << ", inline_predicate=" << bool2str(inline_predicate_)
104
+ << ", unswitch_predicate=" << bool2str(unswitch_predicate_)
105
+ << ", loop=" << bool2str(loop_);
106
+ return ss.str();
107
+ }
108
+
109
+ private:
110
+ void ensureConsistency() {
111
+ // TensorIndexer is required if these options are enabled
112
+ build_tensor_indexer_ = build_tensor_indexer_ || consumer_index_ ||
113
+ producer_index_ || inline_predicate_ || unswitch_predicate_ || loop_;
114
+ // Similarly, IdModel needs to be built if TensorIndexer is used
115
+ build_id_model_ = build_id_model_ || build_tensor_indexer_;
116
+ }
117
+
118
+ private:
119
+ // Build IdModel
120
+ bool build_id_model_ = false;
121
+ // Build TensorIndexer
122
+ bool build_tensor_indexer_ = false;
123
+ // Globally enables consumer indexing.
124
+ bool consumer_index_ = false;
125
+ // Globally enables producer indexing.
126
+ bool producer_index_ = false;
127
+ // Globally enables inline predicate
128
+ bool inline_predicate_ = false;
129
+ // Globally enables unswitch predicate
130
+ bool unswitch_predicate_ = false;
131
+ // Generate loops using IdModel
132
+ bool loop_ = false;
133
+ };
134
+
135
+ } // namespace nvfuser