nvfuser-cu121-torch25 0.2.25.dev20250201__cp310-cp310-manylinux_2_28_x86_64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (242) hide show
  1. nvfuser/_C.cpython-310-x86_64-linux-gnu.so +0 -0
  2. nvfuser/__init__.py +618 -0
  3. nvfuser/__init__.pyi +4 -0
  4. nvfuser/contrib/__init__.py +9 -0
  5. nvfuser/contrib/nn/__init__.py +13 -0
  6. nvfuser/contrib/nn/normalization.py +725 -0
  7. nvfuser/include/nvfuser/alias_analysis.h +116 -0
  8. nvfuser/include/nvfuser/bfs.h +929 -0
  9. nvfuser/include/nvfuser/codegen.h +26 -0
  10. nvfuser/include/nvfuser/compute_at.h +28 -0
  11. nvfuser/include/nvfuser/compute_at_map.h +394 -0
  12. nvfuser/include/nvfuser/contiguity.h +351 -0
  13. nvfuser/include/nvfuser/cuda_utils.h +50 -0
  14. nvfuser/include/nvfuser/debug.h +50 -0
  15. nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
  16. nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
  17. nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
  18. nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
  19. nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
  20. nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
  21. nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
  22. nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
  23. nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
  24. nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
  25. nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
  26. nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
  27. nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
  28. nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
  29. nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
  30. nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
  31. nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
  32. nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
  33. nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
  34. nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
  35. nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
  36. nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
  37. nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
  38. nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
  39. nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
  40. nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
  41. nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
  42. nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
  43. nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
  44. nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
  45. nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
  46. nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
  47. nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
  48. nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
  49. nvfuser/include/nvfuser/device_lower/utils.h +382 -0
  50. nvfuser/include/nvfuser/device_lower/validation.h +74 -0
  51. nvfuser/include/nvfuser/disjoint_set.h +556 -0
  52. nvfuser/include/nvfuser/dispatch.h +334 -0
  53. nvfuser/include/nvfuser/driver_api.h +49 -0
  54. nvfuser/include/nvfuser/dynamic_transform.h +316 -0
  55. nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
  56. nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
  57. nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
  58. nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
  59. nvfuser/include/nvfuser/evaluator_common.h +295 -0
  60. nvfuser/include/nvfuser/exceptions.h +283 -0
  61. nvfuser/include/nvfuser/expr_evaluator.h +125 -0
  62. nvfuser/include/nvfuser/expr_simplifier.h +218 -0
  63. nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
  64. nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
  65. nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
  66. nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
  67. nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
  68. nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
  69. nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
  70. nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
  71. nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
  72. nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
  73. nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
  74. nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
  75. nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
  76. nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
  77. nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
  78. nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
  79. nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
  80. nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
  81. nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
  82. nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
  83. nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
  84. nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
  85. nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
  86. nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
  87. nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
  88. nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
  89. nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
  90. nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
  91. nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
  92. nvfuser/include/nvfuser/fusion.h +511 -0
  93. nvfuser/include/nvfuser/fusion_guard.h +37 -0
  94. nvfuser/include/nvfuser/fusion_profiler.h +311 -0
  95. nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
  96. nvfuser/include/nvfuser/global_allocator.h +27 -0
  97. nvfuser/include/nvfuser/grouped_reduction.h +47 -0
  98. nvfuser/include/nvfuser/host_ir/container.h +60 -0
  99. nvfuser/include/nvfuser/host_ir/executor.h +152 -0
  100. nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
  101. nvfuser/include/nvfuser/host_ir/lower.h +35 -0
  102. nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
  103. nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
  104. nvfuser/include/nvfuser/id_model/id_model.h +359 -0
  105. nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
  106. nvfuser/include/nvfuser/id_model/indexing.h +208 -0
  107. nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
  108. nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
  109. nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
  110. nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
  111. nvfuser/include/nvfuser/id_model/schedule.h +54 -0
  112. nvfuser/include/nvfuser/id_model/to_string.h +87 -0
  113. nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
  114. nvfuser/include/nvfuser/id_model/utils.h +176 -0
  115. nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
  116. nvfuser/include/nvfuser/index_compute.h +651 -0
  117. nvfuser/include/nvfuser/instrumentation.h +107 -0
  118. nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
  119. nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
  120. nvfuser/include/nvfuser/ir/builder.h +215 -0
  121. nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
  122. nvfuser/include/nvfuser/ir/cloner.h +185 -0
  123. nvfuser/include/nvfuser/ir/container.h +226 -0
  124. nvfuser/include/nvfuser/ir/graphviz.h +119 -0
  125. nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
  126. nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
  127. nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
  128. nvfuser/include/nvfuser/ir/iostream.h +98 -0
  129. nvfuser/include/nvfuser/ir/printer.h +57 -0
  130. nvfuser/include/nvfuser/ir/utils.h +801 -0
  131. nvfuser/include/nvfuser/iter_visitor.h +661 -0
  132. nvfuser/include/nvfuser/kernel.h +299 -0
  133. nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
  134. nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
  135. nvfuser/include/nvfuser/kernel_ir.h +1457 -0
  136. nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
  137. nvfuser/include/nvfuser/linked_hash_map.h +97 -0
  138. nvfuser/include/nvfuser/logical_domain_map.h +577 -0
  139. nvfuser/include/nvfuser/macros.h +23 -0
  140. nvfuser/include/nvfuser/mma_type.h +257 -0
  141. nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
  142. nvfuser/include/nvfuser/multidevice/communication.h +232 -0
  143. nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
  144. nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
  145. nvfuser/include/nvfuser/multidevice/executor.h +107 -0
  146. nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
  147. nvfuser/include/nvfuser/multidevice/utils.h +187 -0
  148. nvfuser/include/nvfuser/non_divisible_split.h +86 -0
  149. nvfuser/include/nvfuser/opaque_type.h +129 -0
  150. nvfuser/include/nvfuser/ops/alias.h +192 -0
  151. nvfuser/include/nvfuser/ops/all_ops.h +13 -0
  152. nvfuser/include/nvfuser/ops/arith.h +712 -0
  153. nvfuser/include/nvfuser/ops/composite.h +130 -0
  154. nvfuser/include/nvfuser/ops/indexing.h +55 -0
  155. nvfuser/include/nvfuser/ops/normalization.h +263 -0
  156. nvfuser/include/nvfuser/ops/utils.h +127 -0
  157. nvfuser/include/nvfuser/options.h +313 -0
  158. nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
  159. nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
  160. nvfuser/include/nvfuser/polymorphic_value.h +432 -0
  161. nvfuser/include/nvfuser/predicate_compute.h +213 -0
  162. nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
  163. nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
  164. nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
  165. nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
  166. nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
  167. nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
  168. nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
  169. nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
  170. nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
  171. nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
  172. nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
  173. nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
  174. nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
  175. nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
  176. nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
  177. nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
  178. nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
  179. nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
  180. nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
  181. nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
  182. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
  183. nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
  184. nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
  185. nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
  186. nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
  187. nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
  188. nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
  189. nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
  190. nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
  191. nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
  192. nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
  193. nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
  194. nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
  195. nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
  196. nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
  197. nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
  198. nvfuser/include/nvfuser/scheduler/registry.h +97 -0
  199. nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
  200. nvfuser/include/nvfuser/scheduler/resize.h +41 -0
  201. nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
  202. nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
  203. nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
  204. nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
  205. nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
  206. nvfuser/include/nvfuser/scheduler/utils.h +771 -0
  207. nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
  208. nvfuser/include/nvfuser/serde/factory.h +55 -0
  209. nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
  210. nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
  211. nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
  212. nvfuser/include/nvfuser/serde/utils.h +34 -0
  213. nvfuser/include/nvfuser/struct.inl +127 -0
  214. nvfuser/include/nvfuser/swizzle.h +54 -0
  215. nvfuser/include/nvfuser/sys_utils.h +40 -0
  216. nvfuser/include/nvfuser/tensor_metadata.h +118 -0
  217. nvfuser/include/nvfuser/tma.h +124 -0
  218. nvfuser/include/nvfuser/transform_iter.h +522 -0
  219. nvfuser/include/nvfuser/transform_replay.h +297 -0
  220. nvfuser/include/nvfuser/transform_rfactor.h +33 -0
  221. nvfuser/include/nvfuser/transform_view.h +136 -0
  222. nvfuser/include/nvfuser/type.h +1125 -0
  223. nvfuser/include/nvfuser/type_promotion.h +61 -0
  224. nvfuser/include/nvfuser/utils.h +619 -0
  225. nvfuser/include/nvfuser/val_graph.h +446 -0
  226. nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
  227. nvfuser/include/nvfuser/validator_utils.h +92 -0
  228. nvfuser/include/nvfuser/vectorization_info.h +31 -0
  229. nvfuser/include/nvfuser/visibility.h +21 -0
  230. nvfuser/lib/libnvfuser_codegen.so +0 -0
  231. nvfuser/nvfuser_version.py +69 -0
  232. nvfuser/pytorch_utils.py +184 -0
  233. nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
  234. nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
  235. nvfuser/utils.py +18 -0
  236. nvfuser/version.py +1 -0
  237. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
  238. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +20 -0
  239. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
  240. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
  241. nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
  242. nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,204 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2024-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <ATen/cuda/CUDAContext.h>
11
+ #include <scheduler/multi_matmul.h>
12
+
13
+ namespace nvfuser {
14
+
15
+ // MmaOps in the scheduled tensor. Each one outputs a TensorView* which we call
16
+ // an mma_result. Each MmaOp will also have two input TensorViews which we call
17
+ // "ab" and "bb" since they are the immediate A and B operands and they contain
18
+ // broadcast dimensions. Again there can be multiple abs and multiple bbs in
19
+ // one fusion. These TensorViews are loaded from global memory tensors that we
20
+ // call "a" and "b" into shared memory tensors called acw_smem and bcw_smem.
21
+ // They are loaded from shared memory to register buffers we call "acr" and
22
+ // "bcr" ("cr" meaning "cache read" in this context).
23
+ //
24
+ // Putting this all together we have the following order for a simple matmul
25
+ //
26
+ // a -> acw_smem -> acr -> ... -> ab
27
+ // \ .
28
+ // mma_result -> ... -> dc -> d
29
+ // /
30
+ // b -> bcw_smem -> bcr -> ... -> bb
31
+ //
32
+ // The ... indicate that there might be other tensors involved in a prologue or
33
+ // epilogue section at that location.
34
+ //
35
+ // In this example there are two matmuls both using the same "a" operand:
36
+ //
37
+ // b1 -> bcw_smem1 -> bcr1 -> ... -> bb1
38
+ // \ .
39
+ // mma_result1
40
+ // / \ .
41
+ // a -> acw_smem -> acr -> ... -> ab ... -> dc -> d
42
+ // \ /
43
+ // mma_result2
44
+ // /
45
+ // b2 -> bcw_smem2 -> bcr2 -> ... -> bb2
46
+ //
47
+ // Note that there can be more than one output d and each one will have its own
48
+ // register cache dc.
49
+ //
50
+ // Split-K and smem epilogue unswizzling add two additional tensors for each
51
+ // mma in the fusion: splitk_sum and smem_epilogue.
52
+ //
53
+ // // No split-K, no smem epilogue unswizzling:
54
+ // mma_result -> ... -> dc -> d
55
+ // // split-K, no smem epilogue unswizzling:
56
+ // mma_result -> splitk_sum -> ... -> dc -> d
57
+ // // smem epilogue unswizzling, no split-K:
58
+ // mma_result -> smem_epilogue -> ... -> dc -> d
59
+ // // split-K and smem epilogue unswizzling:
60
+ // mma_result -> smem_epilogue -> splitk_sum -> ... -> dc -> d
61
+ //
62
+ // These additional tensors are added to each mma_result in the fusion.
63
+ //
64
+ // Each of the named tensors above is scheduled differently. We schedule them
65
+ // by building AbstractTensors for each tensor category; these are held in
66
+ // HopperMultipleMatmulScheduler::schedules_.
67
+ // TODO: Inherit from SchedulerEntry
68
+ class HopperMultipleMatmulScheduler : public MultipleMatmulScheduler {
69
+ public:
70
+ HopperMultipleMatmulScheduler(Fusion* fusion, const MatmulParams* params)
71
+ : MultipleMatmulScheduler(fusion, params) {
72
+ const auto device_prop = at::cuda::getCurrentDeviceProperties();
73
+ const int cc = device_prop->major * 10 + device_prop->minor;
74
+ NVF_ERROR(
75
+ cc >= 90 && cc < 100, "This matmul scheduler is restricted to Hopper.");
76
+ }
77
+
78
+ void run() final;
79
+
80
+ private:
81
+ void cacheInputsAndOutputs();
82
+
83
+ // Including current tensor naming convention for reference,
84
+ // this is very temporary and will change over time and
85
+ // in fact the whole body of this function will
86
+ // eventually be a set of utility functions for different
87
+ // sections of matmul(fusion) kernels, with
88
+ // each having its own build out to do.
89
+ //
90
+ // Current naming convention is based on the following formula:
91
+ //
92
+ // d = alpha * (a x b) + beta * c
93
+ //
94
+ // and is defined in the following way:
95
+ //
96
+ // operands assumed in global memory : a, b, c
97
+ //
98
+ // registers staging global load : ar, br (short for a/b read)
99
+ //
100
+ // shared mem cache of operands : acw_smem, bcw_smem (short for a/b
101
+ // cache_write smem)
102
+ //
103
+ // registers at shared memory load output : acr, bcr (short for a/b cache
104
+ // read)
105
+ //
106
+ // register tensor input to the actual mma op: ab, bb (short for a/b
107
+ // broadcasted)
108
+ //
109
+ // accumulator register: mma_result
110
+ // - mma_result is MmaOp output if there is epilogue
111
+ // - mma_result is dc (short for d cache) if there is no epilogue
112
+ //
113
+ // result in global memory: d
114
+
115
+ // Currently the support is for a, b, c and d as fusion inputs/outputs
116
+ // aka. no prolog fusion yet.
117
+ void defineOperandCaches();
118
+
119
+ void cacheOperandsToSmem(
120
+ const std::vector<TensorView*>& operands,
121
+ std::vector<TensorView*>& smem_operands);
122
+
123
+ //! Swizzle the M and N outer dimensions after makeTile has been called.
124
+ //! This updates outer_dim_roles if we introduce a new dimension, which can
125
+ //! happen if tv is missing a merged axis, in which case we skip merging after
126
+ //! the split. This is analogous to forwarding during transform propagation.
127
+ void swizzleBlockTiles(
128
+ TensorView* tv,
129
+ std::vector<MatmulDimRole>& outer_dim_roles);
130
+
131
+ //! This calls orig->cacheAfter() and also updates the broadcast graph to
132
+ //! reflect the new IterDomain mappings
133
+ TensorView* cacheAfter(
134
+ TensorView* orig,
135
+ LoadStoreOpType op_type = LoadStoreOpType::Set,
136
+ CacheOp cache_op = CacheOp::AllLevels,
137
+ bool propagate_allocation_domain = false);
138
+
139
+ //! Do block tiling for a collection of TensorViews. The tensors should be
140
+ //! unscheduled before this method is called.
141
+ //! 1) Axes will be ordered according to canonicalDimOrdering, and then axes
142
+ //! with the same role will be merged.
143
+ //! 2) After that, we perform splits according to
144
+ //! params_->tile_sizes.cta_tile, e.g. [M, K] -> [Mo, Ko, Mi, Ki].
145
+ //! 3) Depending on the value of params_->grid_swizzle_factor, if the TV has
146
+ //! both M and N dimensions, we perform a 2D swizzle of the outer dimensions
147
+ //! Mo and No.
148
+ //! 4) Finally, we do a split-K split if the splitk_factor is not 1
149
+ std::vector<std::vector<MatmulDimRole>> blockTileTensors(
150
+ const std::vector<TensorView*>& tvs);
151
+
152
+ //! Specifies the CGA dimensions by setting "cluster_dims" as fusion-managed
153
+ //! data
154
+ void setCGADims() const {
155
+ if (params_->cluster_dims != MatmulParams::ClusterDims{1, 1, 1}) {
156
+ fusion_->manage(
157
+ "cluster_dims",
158
+ std::tuple<int64_t, int64_t, int64_t>{
159
+ params_->cluster_dims.x,
160
+ params_->cluster_dims.y,
161
+ params_->cluster_dims.z});
162
+ }
163
+ }
164
+
165
+ //! Schedule the loads of all operands from global memory to shared memory.
166
+ //! Starting from the basic tiled schedule, we swizzle the operand memory.
167
+ //! Note that the cache op and LoadStoreOpType are already set during
168
+ //! defineOperandCaches().
169
+ void scheduleOperands();
170
+
171
+ //! Check that there is no computation in the prologues, since we do not
172
+ //! support that (yet)
173
+ void inspectPrologues() const;
174
+
175
+ void parallelizeBlocks(const std::vector<TensorView*>& tvs) const;
176
+
177
+ void scheduleMmaResults();
178
+
179
+ void scheduleEpilogue();
180
+
181
+ void scheduleSplitKSum();
182
+
183
+ void setUpInlining();
184
+
185
+ void setUpCircularBuffering();
186
+
187
+ // Map TensorView's iterDomain to its ValGroup.
188
+ // Then, find the MatmulDimRole for the ValGroup.
189
+ // Return MatmulDimRole for IterDomain
190
+ MatmulDimRole findMatmulDimRole(IterDomain* id);
191
+
192
+ // Schedule a block-tiled TensorView like mma output.
193
+ // Why? WGMMA has a unique output format. TensorViews after the mma-result in
194
+ // registers must respect this format for correctness.
195
+ void transformLikeMmaOutput(TensorView* tv, bool is_mma_result);
196
+
197
+ private:
198
+ std::vector<ValGroup> canonical_dim_ordering_;
199
+
200
+ std::vector<TensorView*> acw_smems_, bcw_smems_, acrs_, bcrs_, abs_, bbs_,
201
+ splitk_sums_, smem_epilogues_;
202
+ };
203
+
204
+ } // namespace nvfuser
@@ -0,0 +1,19 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <fusion.h>
11
+
12
+ namespace nvfuser {
13
+
14
+ // Marks aliases between fusion inputs and outputs. This respects existing
15
+ // allocation domains **even when** they are empty (assuming default order). See
16
+ // [Note on overriding empty allocation domains].
17
+ void markAliases(Fusion* fusion);
18
+
19
+ } // namespace nvfuser
@@ -0,0 +1,40 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <ATen/core/ivalue.h>
11
+ #include <exceptions.h>
12
+ #include <fusion.h>
13
+ #include <mma_type.h>
14
+ #include <scheduler/matmul_heuristic.h>
15
+ #include <scheduler/registry.h>
16
+ #include <visibility.h>
17
+
18
+ namespace nvfuser {
19
+ class MatmulScheduler : public SchedulerEntry {
20
+ public:
21
+ bool canScheduleCompileTime(Fusion* fusion) override;
22
+
23
+ bool canScheduleRunTime(
24
+ Fusion* fusion,
25
+ SchedulerRuntimeInfo& runtime_info,
26
+ HeuristicDataCache* data_cache = nullptr) override;
27
+
28
+ std::unique_ptr<HeuristicParams> computeHeuristics(
29
+ Fusion* fusion,
30
+ SchedulerRuntimeInfo& runtime_info,
31
+ HeuristicDataCache* data_cache = nullptr) override;
32
+
33
+ void schedule(Fusion* fusion, const HeuristicParams* params) override;
34
+
35
+ constexpr static SchedulerType schedulerType() {
36
+ return SchedulerType::Matmul;
37
+ }
38
+ };
39
+
40
+ } // namespace nvfuser
@@ -0,0 +1,293 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <c10/util/hash.h>
11
+ #include <mma_type.h>
12
+ #include <scheduler/heuristic.h>
13
+ #include <utils.h>
14
+ #include <functional>
15
+
16
+ #include <sstream>
17
+ #include "type.h"
18
+
19
+ namespace nvfuser {
20
+
21
+ // Parameters of the matmul heuristic to describe the optimial schedule.
22
+ class MatmulParams : public HeuristicParams {
23
+ public:
24
+ MatmulParams()
25
+ : HeuristicParams(SchedulerType::Matmul), supported_vec_size() {};
26
+ //! A list of possible strategies used to define along which axis
27
+ //! parallelization will be done.
28
+ enum class TileRasterizationOrder { RowMajor = 0, ColumnMajor = 1 };
29
+
30
+ //! A wrapper for circular buffering config pieces
31
+ struct CircularBufferOptions {
32
+ bool circular_buffer_smem_write = false;
33
+ bool circular_buffer_smem_read = false;
34
+ // This parameter controls the number of circular
35
+ // buffering stages to use when loading operands a and b.
36
+ //
37
+ // If this value is greater than two then it indicates circular buffering,
38
+ // in which case async_gmem_load_operands must also be true.
39
+ //
40
+ // Note that whenever circular_buffer_smem_write is true, this value must be
41
+ // greater than one. Otherwise it is ignored.
42
+ int smem_circular_buffer_stage = 2;
43
+
44
+ // The circular buffering prefetch distance will be set to
45
+ // smem_circular_buffer_stage - smem_circular_buffer_prefetch_gap
46
+ // This value must be positive since the prefetch distance must be strictly
47
+ // less than the number of stages.
48
+ int smem_circular_buffer_prefetch_gap = 1;
49
+
50
+ bool operator==(const CircularBufferOptions& other) const {
51
+ return other.circular_buffer_smem_write == circular_buffer_smem_write &&
52
+ other.circular_buffer_smem_read == circular_buffer_smem_read &&
53
+ other.smem_circular_buffer_stage == smem_circular_buffer_stage &&
54
+ other.smem_circular_buffer_prefetch_gap ==
55
+ smem_circular_buffer_prefetch_gap;
56
+ }
57
+
58
+ std::string toString() const {
59
+ std::stringstream ss;
60
+ ss << "CircularBufferOptions:\n"
61
+ << " circular_buffer_smem_write: "
62
+ << (circular_buffer_smem_write ? "true" : "false") << "\n"
63
+ << " circular_buffer_smem_read: "
64
+ << (circular_buffer_smem_read ? "true" : "false") << "\n"
65
+ << " smem_circular_buffer_stage: " << smem_circular_buffer_stage
66
+ << "\n"
67
+ << " smem_circular_buffer_prefetch_gap: "
68
+ << smem_circular_buffer_prefetch_gap;
69
+ return ss.str();
70
+ }
71
+
72
+ size_t hash() const {
73
+ return std::hash<size_t>{}(
74
+ (static_cast<size_t>(smem_circular_buffer_prefetch_gap) << 3) |
75
+ (static_cast<size_t>(smem_circular_buffer_stage) << 2) |
76
+ (static_cast<size_t>(circular_buffer_smem_write)) << 1) |
77
+ (static_cast<size_t>(circular_buffer_smem_read));
78
+ }
79
+ };
80
+
81
+ //! This is the maximum vectorization supported by the inputs and outputs.
82
+ //! This refers to the number of data elements loaded simultaneously, not the
83
+ //! number of bytes.
84
+ struct SupportedVectorization {
85
+ // Each operand load from global to shared memory is vectorized along its
86
+ // inner-most allocation dimension as long as that is an M, N, or K
87
+ // dimension. For example, if the innermost dimension is a batch dimension
88
+ // then we will not vectorize that operand's loads from global to shared
89
+ // memory. If there are multiple dimensions in a given role, such as
90
+ // multiple K dimensions, then we can only vectorize those inner dimensions
91
+ // that are consistent with the canonical dimension ordering shared by all
92
+ // tensors in the Fusion.
93
+ int64_t a;
94
+ int64_t b;
95
+
96
+ // The epilogue is handled in a separate loop from the main loop/operand
97
+ // loads. We inline the epilogue expressions as much as possible, and we
98
+ // vectorize all tensors with the same factor for better memory coalescence;
99
+ // i.e. we parallelize the epilogue like [ ... TIDx V ] so we do not
100
+ // introduce any loops between the TIDx and V dimensions. If we used
101
+ // different vectorization for each output or epilogue input, then we would
102
+ // need an unrolled loop between TIDx and V which would interfere with
103
+ // memory coalescence. We assume the decrease in indexing arithmetic from
104
+ // vectorization is not worth the slowdown from non-coalesced accesses, so
105
+ // we prefer to use a smaller vectorization instead.
106
+ //
107
+ // To determine the epilogue vectorization we do the following steps:
108
+ // - Look at each output, then each epilogue input and find the first
109
+ // tensor with a non-batch dimension as its innermost allocation
110
+ // dimension. We will use that as the innermost loop dimension and will
111
+ // vectorize that dimension. If there are multiple such innermost
112
+ // dimensions with the same role and full contiguity then we consider all
113
+ // those dimensions as the merged vectorized dimension. For example if
114
+ // we have an output whose allocation domain is [ B1 M1 N1 M2 M3 ] then
115
+ // (M2*M3) will be the vectorized dimension. On the other hand, we would
116
+ // skip a tensor that had allocation domain [ M1 M2 M3 N1 B1 ] since the
117
+ // batch dimension is innermost.
118
+ // - Then we pass over all epilogue inputs and outputs. For each tensor, we
119
+ // consider all innermost dimensions in order. For example if we have
120
+ // determined that we will vectorize along M1*M2*M3 and a tensor has
121
+ // allocation [ B1 M1 N1 M2 M3 ] then we consider dimension M2*M3 (along
122
+ // with all other strides) to find supported vectorization. If another
123
+ // tensor has allocation [ B1 M1 M2 M3 N1 ] then we skip it since its
124
+ // innermost dimension is not an N role dimension so its access will not
125
+ // be vectorized.
126
+ // - We store the minimum of all the maximum supported vectorizations
127
+ // across all epilogue input and output tensors that were not skipped.
128
+ // That is the value below. If no vectorization is possible, this will be
129
+ // set to 1.
130
+ int64_t epilogue;
131
+
132
+ bool operator==(const SupportedVectorization& other) const {
133
+ return other.a == a && other.b == b && other.epilogue == epilogue;
134
+ }
135
+
136
+ std::string toString() const {
137
+ std::stringstream ss;
138
+ ss << "SupportedVectorization:\n"
139
+ << " a: " << a << "\n"
140
+ << " b: " << b << "\n"
141
+ << " epilogue: " << epilogue;
142
+ return ss.str();
143
+ }
144
+
145
+ size_t hash() const {
146
+ return std::hash<size_t>{}(
147
+ (static_cast<size_t>(a) << 8) |
148
+ (static_cast<size_t>(b)) << 4) |
149
+ (static_cast<size_t>(epilogue));
150
+ }
151
+ } supported_vec_size;
152
+
153
+ //! (Ampere+) Use cp.async to load operands.
154
+ bool async_gmem_load_operands = false;
155
+
156
+ //! Specifies the tiling hierarchy on block and warp levels.
157
+ MatMulTileOptions tile_sizes = {};
158
+
159
+ //! Specify the type of MMA op to be used in generated kernel.
160
+ MmaMacro mma_macro = MmaMacro::NoMMA;
161
+
162
+ //! Specify CTA rastrization order.
163
+ TileRasterizationOrder cta_order = TileRasterizationOrder::RowMajor;
164
+
165
+ //! Specify which tensor we circular buffer.
166
+ CircularBufferOptions circular_buffer_options = {};
167
+
168
+ //! Swizzle factor is used to increase L2 hit rate.
169
+ //! It horizontally squeezes the grid so that gridDim.x is larger and
170
+ //! gridDim.y is smaller.
171
+ //! We rely on the observation that the CTAs are scheduled by the GPU by
172
+ //! iterating on gridDim.x first. As a result, as blocks are launched, they
173
+ //! will more likely be forming sub-tiles of the C matrix. This will increase
174
+ //! L2 hit rate/data reuse of A and B.
175
+ //!
176
+ //! Eg for grid_swizzle_factor=2:
177
+ //! A1 A2 B1 B2 --> A1 A2 A3 A4 B1 B2 B3 B4
178
+ //! A3 A4 B3 B4 C1 C2 C3 C4 D1 D2 D3 D4
179
+ //! C1 C2 D1 D2
180
+ //! C3 C4 D3 D4
181
+ int grid_swizzle_factor = 1;
182
+
183
+ //! Unswizzle MMA results in shared memory to get
184
+ //! coalesced write to global memory
185
+ bool use_smem_epilogue = false;
186
+
187
+ //! Promote reuse of prologue shared memory
188
+ bool promote_prologue_smem_reuse = false;
189
+
190
+ //! Whether to do single-kernel split-K. If this is >1, we will rfactor the K
191
+ //! axis and perform a grid reduction before the epilogue.
192
+ int splitk_factor = 1;
193
+
194
+ //! This is the CGA size on Hopper+ devices. This parameter is ignored on
195
+ //! Ampere and Turing.
196
+ struct ClusterDims {
197
+ int64_t x = 1;
198
+ int64_t y = 1;
199
+ int64_t z = 1;
200
+
201
+ bool operator==(const ClusterDims& other) const {
202
+ return x == other.x && y == other.y && z == other.z;
203
+ }
204
+
205
+ bool operator!=(const ClusterDims& other) const {
206
+ return !(*this == other);
207
+ }
208
+
209
+ std::string toString() const {
210
+ std::stringstream ss;
211
+ ss << "__cluster_dims__(" << x << ", " << y << ", " << z << ")";
212
+ return ss.str();
213
+ }
214
+
215
+ size_t hash() const {
216
+ return std::hash<size_t>{}(
217
+ (static_cast<size_t>(x) << 32) |
218
+ (static_cast<size_t>(y)) << 16) |
219
+ (static_cast<size_t>(z));
220
+ }
221
+ } cluster_dims;
222
+
223
+ std::string toString() const override {
224
+ std::stringstream ss;
225
+ ss << "\n===== Matmul Parameters ========\n"
226
+ << (tag.empty() ? "" : "Tag: ") << tag << "\n"
227
+ << "MMA macro: " << nvfuser::toString(mma_macro) << "\n"
228
+ << circular_buffer_options.toString() << "\n"
229
+ << supported_vec_size.toString() << "\n"
230
+ << nvfuser::toString(tile_sizes) << "\n"
231
+ << "Async global mem load: "
232
+ << (async_gmem_load_operands ? "true" : "false") << "\n"
233
+ << "Indexing mode: "
234
+ << (cparams.index_type.has_value()
235
+ ? (cparams.index_type.value() == PrimDataType::Int ? "int64_t"
236
+ : "int32_t")
237
+ : "unavailable")
238
+ << "\n"
239
+ << "Tile rasterization order: "
240
+ << ((cta_order == TileRasterizationOrder::RowMajor) ? "row-major"
241
+ : "column-major")
242
+ << "\n"
243
+ << "Grid swizzle factor: " << grid_swizzle_factor << "\n"
244
+ << cluster_dims.toString() << "\n"
245
+ << "Use shared memory epilogue: " << use_smem_epilogue << "\n"
246
+ << "Promote re-use of prologue shared memory: "
247
+ << promote_prologue_smem_reuse << "\n"
248
+ << "Split-K factor: " << splitk_factor << "\n"
249
+ << "====================================\n";
250
+ return ss.str();
251
+ }
252
+
253
+ size_t hash() const override {
254
+ // combine boolean flags for hashing
255
+ size_t attr_hash = (static_cast<size_t>(promote_prologue_smem_reuse) << 2) |
256
+ (static_cast<size_t>(use_smem_epilogue) << 1) |
257
+ (static_cast<size_t>(async_gmem_load_operands));
258
+
259
+ // combined hash
260
+ attr_hash = std::hash<size_t>{}(attr_hash) ^
261
+ (nvfuser::hash(mma_macro) << 1) ^
262
+ (circular_buffer_options.hash() << 2) ^
263
+ (nvfuser::hash(tile_sizes) << 3) ^
264
+ (std::hash<size_t>{}(static_cast<size_t>(cta_order)) << 4) ^
265
+ (std::hash<size_t>{}(grid_swizzle_factor) << 5) ^
266
+ (std::hash<size_t>{}(splitk_factor) << 6);
267
+ return attr_hash;
268
+ }
269
+
270
+ bool sameAs(const HeuristicParams* other_base) const override {
271
+ auto other = dynamic_cast<const MatmulParams*>(other_base);
272
+ if (other == nullptr) {
273
+ return false;
274
+ }
275
+
276
+ return other->cparams == cparams && other->mma_macro == mma_macro &&
277
+ other->async_gmem_load_operands == async_gmem_load_operands &&
278
+ other->tile_sizes == tile_sizes &&
279
+ other->circular_buffer_options == circular_buffer_options &&
280
+ other->supported_vec_size == supported_vec_size &&
281
+ other->cta_order == cta_order &&
282
+ other->grid_swizzle_factor == grid_swizzle_factor &&
283
+ other->use_smem_epilogue == use_smem_epilogue &&
284
+ other->promote_prologue_smem_reuse == promote_prologue_smem_reuse &&
285
+ other->splitk_factor == splitk_factor;
286
+ }
287
+
288
+ std::unique_ptr<HeuristicParams> clone() const override {
289
+ return std::make_unique<MatmulParams>(*this);
290
+ }
291
+ };
292
+
293
+ } // namespace nvfuser
@@ -0,0 +1,65 @@
1
+ // clang-format off
2
+ /*
3
+ * SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
4
+ * All rights reserved.
5
+ * SPDX-License-Identifier: BSD-3-Clause
6
+ */
7
+ // clang-format on
8
+ #pragma once
9
+
10
+ #include <mma_type.h>
11
+ #include <scheduler/matmul_heuristic.h>
12
+ #include <scheduler/matmul_heuristic_plugin_api.h>
13
+ #include <scheduler/mma_utils.h>
14
+
15
+ #include <memory>
16
+ #include <optional>
17
+
18
+ namespace nvfuser {
19
+
20
+ namespace matmul_heuristic_plugin {
21
+
22
+ //! Returns true if KernelConfigFactoryGuard is active indicating an imitated
23
+ //! plugin, or if a shared library plugin has been provided using the
24
+ //! environment variable NVFUSER_MATMUL_HEURISTIC_PLUGIN.
25
+ bool hasPlugin();
26
+
27
+ //! If there is no user-defined plugin (see hasPlugin()) we return false.
28
+ //! Otherwise, we use the plugin to modify the heuristic parameters in place. M,
29
+ //! N, K, layout (inner allocated dimension roles of each operand), and
30
+ //! precision must be provided. For convenience, we use `roles_map` to build the
31
+ //! precision string.
32
+ bool updateMatmulParams(
33
+ MatmulParams* params,
34
+ int64_t M,
35
+ int64_t N,
36
+ int64_t K,
37
+ int64_t batch_size,
38
+ const mma_utils::MatmulOperandInnerDims& inner_dims,
39
+ const mma_utils::TensorRolesMap& tensor_roles);
40
+
41
+ //! Defines the type of the "makeConfig" symbol
42
+ using KernelConfigFactory = std::function<std::unique_ptr<KernelConfig>()>;
43
+
44
+ //! This function can be used to imitate a plugin. To do so, subclass
45
+ //! KernelConfig, implementing a custom `configure` method, then create a guard
46
+ //! object like this:
47
+ //!
48
+ //! KernelConfigFactoryGuard kfg([]() { return (KernelConfig*)(new
49
+ //! MyKernelConfig);});
50
+ //!
51
+ //! When kfg passes out of scope, the config factory will be reset to its prior
52
+ //! value.
53
+ class KernelConfigFactoryGuard {
54
+ public:
55
+ explicit KernelConfigFactoryGuard(KernelConfigFactory func);
56
+ ~KernelConfigFactoryGuard();
57
+
58
+ private:
59
+ KernelConfigFactory prev_factory_;
60
+ bool prev_factory_modified_;
61
+ };
62
+
63
+ } // namespace matmul_heuristic_plugin
64
+
65
+ } // namespace nvfuser