nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,1457 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <ir/all_nodes.h>
|
12
|
+
#include <ir/base_nodes.h>
|
13
|
+
#include <mma_type.h>
|
14
|
+
#include <parallel_type_bitmap.h>
|
15
|
+
#include <tma.h>
|
16
|
+
#include <type.h>
|
17
|
+
#include <utils.h>
|
18
|
+
#include <visibility.h>
|
19
|
+
|
20
|
+
#include <cstdint>
|
21
|
+
#include <string>
|
22
|
+
#include <unordered_map>
|
23
|
+
#include <unordered_set>
|
24
|
+
#include <vector>
|
25
|
+
|
26
|
+
namespace nvfuser {
|
27
|
+
|
28
|
+
class IrBuilderPasskey;
|
29
|
+
|
30
|
+
namespace kir {
|
31
|
+
class Kernel;
|
32
|
+
|
33
|
+
// Values
|
34
|
+
class Predicate;
|
35
|
+
class TensorIndex;
|
36
|
+
|
37
|
+
// Expressions
|
38
|
+
class Allocate;
|
39
|
+
class Asm;
|
40
|
+
class BlockSync;
|
41
|
+
class GridSync;
|
42
|
+
class FenceAsyncProxy;
|
43
|
+
class WgMmaFence;
|
44
|
+
class SetMaxNReg;
|
45
|
+
class Return;
|
46
|
+
class MBarrierInit;
|
47
|
+
class MBarrierInvalidate;
|
48
|
+
class MBarrierArrive;
|
49
|
+
class MBarrierArriveExpectTx;
|
50
|
+
class MBarrierWait;
|
51
|
+
class MBarrierWaitParity;
|
52
|
+
class BlockSerializeWait;
|
53
|
+
class BlockSerializeRelease;
|
54
|
+
class AsyncWait;
|
55
|
+
class AsyncCommit;
|
56
|
+
class InitMagicZero;
|
57
|
+
class UpdateMagicZero;
|
58
|
+
class IfThenElse;
|
59
|
+
class GridReduction;
|
60
|
+
class GroupedGridReduction;
|
61
|
+
class GridBroadcast;
|
62
|
+
class GridWelford;
|
63
|
+
class GroupedGridWelford;
|
64
|
+
class AllocateFusedReduction;
|
65
|
+
|
66
|
+
// Expr container
|
67
|
+
|
68
|
+
class Predicate final : public Val {
|
69
|
+
public:
|
70
|
+
explicit Predicate(
|
71
|
+
IrBuilderPasskey passkey,
|
72
|
+
PredicateType ptype,
|
73
|
+
const Expr* expr = nullptr,
|
74
|
+
Val* thread_pred = nullptr);
|
75
|
+
|
76
|
+
explicit Predicate(IrBuilderPasskey passkey, ForLoop* unrolled_loop);
|
77
|
+
|
78
|
+
explicit Predicate(IrBuilderPasskey passkey, Val* value);
|
79
|
+
|
80
|
+
std::string toString(int indent_size = 0) const override;
|
81
|
+
|
82
|
+
std::string toInlineString(int indent_size = 0) const override;
|
83
|
+
|
84
|
+
PredicateType predicate_type() const {
|
85
|
+
return ptype_;
|
86
|
+
}
|
87
|
+
|
88
|
+
const Expr* expr() const {
|
89
|
+
NVF_ERROR(
|
90
|
+
ptype_ != PredicateType::Unswitch &&
|
91
|
+
ptype_ != PredicateType::Vectorize && ptype_ != PredicateType::Manual &&
|
92
|
+
ptype_ != PredicateType::ElectSync);
|
93
|
+
return expr_;
|
94
|
+
}
|
95
|
+
|
96
|
+
Val* thread_pred() const {
|
97
|
+
NVF_ERROR(
|
98
|
+
ptype_ == PredicateType::Inline ||
|
99
|
+
ptype_ == PredicateType::Misaligned ||
|
100
|
+
ptype_ == PredicateType::ReductionWrite ||
|
101
|
+
ptype_ == PredicateType::ElectSync);
|
102
|
+
return thread_pred_;
|
103
|
+
}
|
104
|
+
|
105
|
+
ForLoop* unrolled_loop() const {
|
106
|
+
NVF_ERROR(ptype_ == PredicateType::Unswitch);
|
107
|
+
return unrolled_loop_;
|
108
|
+
}
|
109
|
+
|
110
|
+
bool hasValue() const {
|
111
|
+
return value_ != nullptr;
|
112
|
+
}
|
113
|
+
|
114
|
+
Val* value() const {
|
115
|
+
NVF_ERROR(
|
116
|
+
value_ != nullptr,
|
117
|
+
"The conditional expression for this Predicate is invalid.");
|
118
|
+
return value_;
|
119
|
+
}
|
120
|
+
|
121
|
+
void setValue(Val* value) {
|
122
|
+
NVF_ERROR(value != nullptr, "The Bool expression is invalid.");
|
123
|
+
value_ = value;
|
124
|
+
}
|
125
|
+
|
126
|
+
bool isConst() const final {
|
127
|
+
return hasValue() && value_->isConst();
|
128
|
+
}
|
129
|
+
|
130
|
+
bool isTrivial() const {
|
131
|
+
return isConst() && value_->value().is<bool>() &&
|
132
|
+
value_->value().as<bool>();
|
133
|
+
}
|
134
|
+
|
135
|
+
private:
|
136
|
+
PredicateType ptype_ = PredicateType::Manual;
|
137
|
+
|
138
|
+
// For PredicateCompute::getInlinePredicate,
|
139
|
+
// ShiftPredicateInserter::getShiftPredicate and getPaddingPredicate
|
140
|
+
const Expr* expr_ = nullptr;
|
141
|
+
|
142
|
+
// For PredicateCompute::getInlinePredicate
|
143
|
+
Val* thread_pred_ = nullptr;
|
144
|
+
|
145
|
+
// For ParallelType::Unswitch - UnswitchPredicate::get
|
146
|
+
ForLoop* unrolled_loop_ = nullptr;
|
147
|
+
|
148
|
+
// The Bool conditional value
|
149
|
+
// The value is nullptr until lower_predicate pass
|
150
|
+
Val* value_ = nullptr;
|
151
|
+
};
|
152
|
+
|
153
|
+
class TensorIndex final : public Val {
|
154
|
+
public:
|
155
|
+
TensorIndex(
|
156
|
+
IrBuilderPasskey,
|
157
|
+
const TensorView* view,
|
158
|
+
Val* index,
|
159
|
+
DataType dtype = DataType::Null);
|
160
|
+
|
161
|
+
Val* index() const {
|
162
|
+
return index_;
|
163
|
+
}
|
164
|
+
|
165
|
+
TensorView* view() const {
|
166
|
+
NVF_ERROR(view_ != nullptr);
|
167
|
+
return const_cast<TensorView*>(view_); // NOLINT
|
168
|
+
}
|
169
|
+
|
170
|
+
std::string toString(int indent_size = 0) const override;
|
171
|
+
|
172
|
+
std::string toInlineString(int indent_size = 0) const override;
|
173
|
+
|
174
|
+
private:
|
175
|
+
const TensorView* view_ = nullptr;
|
176
|
+
Val* index_ = nullptr;
|
177
|
+
};
|
178
|
+
|
179
|
+
// In theory, we should just put this struct into class Asm, but unfortunately,
|
180
|
+
// due to compiler bug, we can not do that:
|
181
|
+
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88165
|
182
|
+
struct AsmOptions {
|
183
|
+
bool volatile_ = false;
|
184
|
+
bool memory = false;
|
185
|
+
std::unordered_set<int64_t> readable_outputs = {};
|
186
|
+
};
|
187
|
+
|
188
|
+
class Asm final : public Expr {
|
189
|
+
public:
|
190
|
+
using Options = AsmOptions;
|
191
|
+
|
192
|
+
using Expr::Expr;
|
193
|
+
|
194
|
+
explicit Asm(
|
195
|
+
IrBuilderPasskey passkey,
|
196
|
+
const std::string& code,
|
197
|
+
const std::vector<Val*>& outputs,
|
198
|
+
const std::vector<Val*>& inputs,
|
199
|
+
const Options& options = Options());
|
200
|
+
|
201
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
202
|
+
|
203
|
+
const char* getOpString() const override {
|
204
|
+
return "Asm";
|
205
|
+
}
|
206
|
+
|
207
|
+
std::string toString(int indent_size = 0) const override;
|
208
|
+
std::string toInlineString(int indent_size = 0) const override;
|
209
|
+
|
210
|
+
const std::string& code() const {
|
211
|
+
return attribute<std::string>(0);
|
212
|
+
}
|
213
|
+
|
214
|
+
const Options& options() const {
|
215
|
+
return attribute<Options>(1);
|
216
|
+
}
|
217
|
+
|
218
|
+
Options& options() {
|
219
|
+
return attribute<Options>(1);
|
220
|
+
}
|
221
|
+
|
222
|
+
bool volatile_() const {
|
223
|
+
return options().volatile_;
|
224
|
+
}
|
225
|
+
|
226
|
+
bool& volatile_() {
|
227
|
+
return options().volatile_;
|
228
|
+
}
|
229
|
+
|
230
|
+
bool memory() const {
|
231
|
+
return options().memory;
|
232
|
+
}
|
233
|
+
|
234
|
+
bool& memory() {
|
235
|
+
return options().memory;
|
236
|
+
}
|
237
|
+
|
238
|
+
bool hasBooleanInput() const {
|
239
|
+
for (auto input : inputs()) {
|
240
|
+
if (input->dtype() == DataType::Bool) {
|
241
|
+
return true;
|
242
|
+
}
|
243
|
+
}
|
244
|
+
return false;
|
245
|
+
}
|
246
|
+
|
247
|
+
std::vector<std::pair<std::string, Val*>> constraintsAndOutputs() const;
|
248
|
+
std::vector<std::pair<std::string, Val*>> constraintsAndInputs() const;
|
249
|
+
|
250
|
+
std::string parameters() const;
|
251
|
+
};
|
252
|
+
|
253
|
+
//! Allocate is a lower level Node that describes a buffer of memory that
|
254
|
+
//! is required as an intermediate within a kernel. The extent is the expression
|
255
|
+
//! of the size of the buffer that is generated from the TensorView that
|
256
|
+
//! describes the output of an operation.
|
257
|
+
class Allocate final : public Expr {
|
258
|
+
public:
|
259
|
+
using Expr::Expr;
|
260
|
+
|
261
|
+
//! Allocation of a multi-dimensional buffer
|
262
|
+
//!
|
263
|
+
//! param shape Size of each dimension
|
264
|
+
//! param zero_init Should this memory be zero-initialized?
|
265
|
+
//! param resets_to_zero Will this memory be set to zero upon completion of
|
266
|
+
//! this kernel?
|
267
|
+
//! param alias Is this an alias of previously-allocated memory
|
268
|
+
explicit Allocate(
|
269
|
+
IrBuilderPasskey passkey,
|
270
|
+
Val* buffer,
|
271
|
+
MemoryType memory_type,
|
272
|
+
std::vector<Val*> shape = {},
|
273
|
+
bool zero_init = false,
|
274
|
+
bool resets_to_zero = false,
|
275
|
+
Allocate* alias = nullptr);
|
276
|
+
|
277
|
+
//! Allocation of a non-dimensional buffer
|
278
|
+
//!
|
279
|
+
//! param size Size of allocation
|
280
|
+
explicit Allocate(
|
281
|
+
IrBuilderPasskey passkey,
|
282
|
+
Val* buffer,
|
283
|
+
MemoryType memory_type,
|
284
|
+
Val* size,
|
285
|
+
bool zero_init = false,
|
286
|
+
bool resets_to_zero = false);
|
287
|
+
|
288
|
+
const char* getOpString() const override {
|
289
|
+
return "Allocate";
|
290
|
+
}
|
291
|
+
|
292
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
293
|
+
|
294
|
+
std::string toString(int indent_size = 0) const override;
|
295
|
+
std::string toInlineString(int indent_size = 0) const override;
|
296
|
+
|
297
|
+
Val* buffer() const {
|
298
|
+
return attributeVal(0);
|
299
|
+
}
|
300
|
+
|
301
|
+
MemoryType memoryType() const {
|
302
|
+
return attribute<MemoryType>(1);
|
303
|
+
}
|
304
|
+
|
305
|
+
//! Total size
|
306
|
+
Val* size() const {
|
307
|
+
return input(0);
|
308
|
+
}
|
309
|
+
|
310
|
+
//! Size of each dimension
|
311
|
+
std::vector<Val*> shape() const {
|
312
|
+
std::vector<Val*> result;
|
313
|
+
result.reserve(attributes().size() - 6);
|
314
|
+
for (auto i = attributes().begin() + 6; i != attributes().end(); ++i) {
|
315
|
+
result.emplace_back((*i)->as<Val>());
|
316
|
+
}
|
317
|
+
return result;
|
318
|
+
}
|
319
|
+
|
320
|
+
//! Does this allocation require its memory to be initialized to zero before
|
321
|
+
//! this kernel is launched? If this is true, then an additional memset
|
322
|
+
//! kernel might be launched before the current Fusion kernel is launched in
|
323
|
+
//! order to guarantee that this buffer is filled with zeroes (see
|
324
|
+
//! resetsToZero() below).
|
325
|
+
bool zeroInit() const {
|
326
|
+
return attribute<bool>(2);
|
327
|
+
}
|
328
|
+
|
329
|
+
//! Is this buffer guaranteed to be reset to all zero values at the end of
|
330
|
+
//! this kernel? This is used to avoid an additional memset kernel launch for
|
331
|
+
//! buffers that require zeroed memory (see zeroInit() above).
|
332
|
+
//!
|
333
|
+
//! A common use case for zeroInit() allocations is semaphore buffers that
|
334
|
+
//! hold counters starting at zero. Typically, each participating thread would
|
335
|
+
//! increment the counter and the last thread would leave the counter in a
|
336
|
+
//! non-zeroed state. The next time that kernel is run, it can no longer
|
337
|
+
//! re-use the non-zero semaphore buffer, so KernelExecutor will launch
|
338
|
+
//! at::zeroes to allocate a new buffer, resulting in a memset kernel launch.
|
339
|
+
//!
|
340
|
+
//! Instead, if the last thread resets the counter to zero, then the buffer
|
341
|
+
//! can be re-used, and at::zeroes need only be run at the first kernel
|
342
|
+
//! launch. If resetsToZero() is true, then KernelExecutor will use
|
343
|
+
//! contigZeroedTensor() and releaseZeroedMemory() from global_allocator.h to
|
344
|
+
//! reuse zeroed memory avoiding the additional kernel launch.
|
345
|
+
//!
|
346
|
+
//! Whenever possible, we should try to guarantee that resetsToZero() is true
|
347
|
+
//! if zeroInit() is true by modifying our code to clean up global counters,
|
348
|
+
//! because the latency penalty of an additional kernel launch should be
|
349
|
+
//! greater than that required to reset this memory at the end of the fusion.
|
350
|
+
//! The exception is when a kernel is launched only a single time, in which
|
351
|
+
//! case resetting the memory is unnecessary, but we expect that kernels will
|
352
|
+
//! instead be launched many times.
|
353
|
+
bool resetsToZero() const {
|
354
|
+
return attribute<bool>(3);
|
355
|
+
}
|
356
|
+
|
357
|
+
// This alias tracks the next Allocate node in a linked chain of aliases
|
358
|
+
// If the alias is nullptr, then the Allocate node uses memory in the kernel
|
359
|
+
const Allocate* alias() const {
|
360
|
+
return dynamic_cast<const Allocate*>(attribute(4));
|
361
|
+
}
|
362
|
+
|
363
|
+
// Set the address of a shared memory allocation within the dynamic shared
|
364
|
+
// memory array. The addr argument should be a scalar expression describing an
|
365
|
+
// aligned address in bytes.
|
366
|
+
void setAddress(Val* addr) {
|
367
|
+
NVF_CHECK(
|
368
|
+
memoryType() == MemoryType::Shared,
|
369
|
+
"Allocation address may only be set for shared memory allocations. Memory type is ",
|
370
|
+
memoryType());
|
371
|
+
NVF_CHECK(
|
372
|
+
address() == nullptr,
|
373
|
+
"Attempted to set address twice for allocation ",
|
374
|
+
toString());
|
375
|
+
attributes_[5] = addr;
|
376
|
+
}
|
377
|
+
|
378
|
+
// This is an integer scalar describing the byte address within the dynamic
|
379
|
+
// shared memory array for a shared memory allocation. For memory types other
|
380
|
+
// than Shared, or before allocation, this function might return nullptr.
|
381
|
+
Val* address() const {
|
382
|
+
return attributeVal(5);
|
383
|
+
}
|
384
|
+
};
|
385
|
+
|
386
|
+
// Sync represents __syncthreads barrier for block level coordination.
|
387
|
+
//
|
388
|
+
// TODO(kir): change name to SyncThreads as we could have other barriers.
|
389
|
+
//
|
390
|
+
class BlockSync final : public Expr {
|
391
|
+
public:
|
392
|
+
using Expr::Expr;
|
393
|
+
|
394
|
+
explicit BlockSync(IrBuilderPasskey passkey, bool war_sync = false);
|
395
|
+
|
396
|
+
const char* getOpString() const override {
|
397
|
+
return "BlockSync";
|
398
|
+
}
|
399
|
+
|
400
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
401
|
+
|
402
|
+
std::string toString(int indent_size = 0) const override;
|
403
|
+
std::string toInlineString(int indent_size = 0) const override;
|
404
|
+
|
405
|
+
// TODO: war_sync_ is only used for testing/validation purposes.
|
406
|
+
bool isWarHazardSync() const {
|
407
|
+
return attribute<bool>(0);
|
408
|
+
}
|
409
|
+
};
|
410
|
+
|
411
|
+
// Synchronize all blocks in device, implies cooperative group launch is
|
412
|
+
// required.
|
413
|
+
class GridSync final : public Expr {
|
414
|
+
public:
|
415
|
+
using Expr::Expr;
|
416
|
+
|
417
|
+
explicit GridSync(
|
418
|
+
IrBuilderPasskey passkey,
|
419
|
+
ParallelTypeBitmap sync_dims,
|
420
|
+
Val* sync_buffer);
|
421
|
+
|
422
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
423
|
+
|
424
|
+
const char* getOpString() const override {
|
425
|
+
return "GridSync";
|
426
|
+
}
|
427
|
+
|
428
|
+
std::string toString(int indent_size = 0) const override;
|
429
|
+
std::string toInlineString(int indent_size = 0) const override;
|
430
|
+
|
431
|
+
ParallelTypeBitmap syncDims() const {
|
432
|
+
return attribute<ParallelTypeBitmap>(0);
|
433
|
+
}
|
434
|
+
|
435
|
+
Val* syncBuffer() const {
|
436
|
+
return attributeVal(1);
|
437
|
+
}
|
438
|
+
};
|
439
|
+
|
440
|
+
// PTX: fence.proxy.async
|
441
|
+
class FenceAsyncProxy final : public Expr {
|
442
|
+
public:
|
443
|
+
using Expr::Expr;
|
444
|
+
|
445
|
+
explicit FenceAsyncProxy(IrBuilderPasskey passkey);
|
446
|
+
|
447
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
448
|
+
|
449
|
+
const char* getOpString() const override {
|
450
|
+
return "FenceAsyncProxy";
|
451
|
+
}
|
452
|
+
|
453
|
+
std::string toString(int indent_size = 0) const override;
|
454
|
+
std::string toInlineString(int indent_size = 0) const override;
|
455
|
+
};
|
456
|
+
|
457
|
+
// PTX: wgmma.fence.sync.aligned
|
458
|
+
class WgMmaFence final : public Expr {
|
459
|
+
public:
|
460
|
+
using Expr::Expr;
|
461
|
+
|
462
|
+
explicit WgMmaFence(IrBuilderPasskey passkey);
|
463
|
+
|
464
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
465
|
+
|
466
|
+
const char* getOpString() const override {
|
467
|
+
return "WgMmaFence";
|
468
|
+
}
|
469
|
+
|
470
|
+
std::string toString(int indent_size = 0) const override;
|
471
|
+
std::string toInlineString(int indent_size = 0) const override;
|
472
|
+
};
|
473
|
+
|
474
|
+
// PTX: setmaxnreg.inc.sync.aligned.u32 and setmaxnreg.dec.sync.aligned.u32
|
475
|
+
class SetMaxNReg final : public Expr {
|
476
|
+
public:
|
477
|
+
using Expr::Expr;
|
478
|
+
|
479
|
+
explicit SetMaxNReg(
|
480
|
+
IrBuilderPasskey passkey,
|
481
|
+
Val* number_of_registers,
|
482
|
+
bool increase_registers);
|
483
|
+
|
484
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
485
|
+
|
486
|
+
const char* getOpString() const override {
|
487
|
+
return (increaseRegisters()) ? "IncSetMaxNReg" : "DecSetMaxNReg";
|
488
|
+
}
|
489
|
+
|
490
|
+
std::string toString(int indent_size = 0) const override;
|
491
|
+
std::string toInlineString(int indent_size = 0) const override;
|
492
|
+
|
493
|
+
bool increaseRegisters() const {
|
494
|
+
return attribute<bool>(0);
|
495
|
+
}
|
496
|
+
|
497
|
+
Val* numberOfRegisters() const {
|
498
|
+
return input(0);
|
499
|
+
}
|
500
|
+
};
|
501
|
+
|
502
|
+
class Return final : public Expr {
|
503
|
+
public:
|
504
|
+
using Expr::Expr;
|
505
|
+
|
506
|
+
explicit Return(IrBuilderPasskey passkey);
|
507
|
+
|
508
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
509
|
+
|
510
|
+
const char* getOpString() const override {
|
511
|
+
return "Return";
|
512
|
+
}
|
513
|
+
|
514
|
+
std::string toString(int indent_size = 0) const override;
|
515
|
+
std::string toInlineString(int indent_size = 0) const override;
|
516
|
+
};
|
517
|
+
|
518
|
+
class MBarrierInit final : public Expr {
|
519
|
+
public:
|
520
|
+
using Expr::Expr;
|
521
|
+
explicit MBarrierInit(
|
522
|
+
IrBuilderPasskey passkey,
|
523
|
+
Val* mbarrier,
|
524
|
+
Val* thread_count);
|
525
|
+
|
526
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
527
|
+
|
528
|
+
const char* getOpString() const override {
|
529
|
+
return "MBarrierInit";
|
530
|
+
}
|
531
|
+
|
532
|
+
std::string toString(int indent_size = 0) const override;
|
533
|
+
std::string toInlineString(int indent_size = 0) const override;
|
534
|
+
|
535
|
+
Val* mbarrier() const {
|
536
|
+
return input(0);
|
537
|
+
}
|
538
|
+
|
539
|
+
Val* threadCount() const {
|
540
|
+
return input(1);
|
541
|
+
}
|
542
|
+
};
|
543
|
+
|
544
|
+
class MBarrierInvalidate final : public Expr {
|
545
|
+
public:
|
546
|
+
using Expr::Expr;
|
547
|
+
explicit MBarrierInvalidate(IrBuilderPasskey passkey, Val* mbarrier);
|
548
|
+
|
549
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
550
|
+
|
551
|
+
const char* getOpString() const override {
|
552
|
+
return "MBarrierInvalidate";
|
553
|
+
}
|
554
|
+
|
555
|
+
std::string toString(int indent_size = 0) const override;
|
556
|
+
std::string toInlineString(int indent_size = 0) const override;
|
557
|
+
|
558
|
+
Val* mbarrier() const {
|
559
|
+
return input(0);
|
560
|
+
}
|
561
|
+
};
|
562
|
+
|
563
|
+
class MBarrierArrive final : public Expr {
|
564
|
+
public:
|
565
|
+
using Expr::Expr;
|
566
|
+
explicit MBarrierArrive(IrBuilderPasskey passkey, Val* state, Val* mbarrier);
|
567
|
+
|
568
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
569
|
+
|
570
|
+
const char* getOpString() const override {
|
571
|
+
return "MBarrierArrive";
|
572
|
+
}
|
573
|
+
|
574
|
+
std::string toString(int indent_size = 0) const override;
|
575
|
+
std::string toInlineString(int indent_size = 0) const override;
|
576
|
+
|
577
|
+
Val* state() const {
|
578
|
+
if (!outputs().empty()) {
|
579
|
+
return output(0);
|
580
|
+
}
|
581
|
+
return nullptr;
|
582
|
+
}
|
583
|
+
|
584
|
+
Val* mbarrier() const {
|
585
|
+
return input(0);
|
586
|
+
}
|
587
|
+
};
|
588
|
+
|
589
|
+
// IR node for: mbarrier.arrive.expect_tx
|
590
|
+
// This is usually used to specify the number of bytes that will be
|
591
|
+
// transferred for cp.async and cp.async.bulk, so that future mbarrier.wait
|
592
|
+
// can wait for the completion of the transfer.
|
593
|
+
class MBarrierArriveExpectTx final : public Expr {
|
594
|
+
public:
|
595
|
+
using Expr::Expr;
|
596
|
+
explicit MBarrierArriveExpectTx(
|
597
|
+
IrBuilderPasskey passkey,
|
598
|
+
Val* state,
|
599
|
+
Val* mbarrier,
|
600
|
+
Val* tx_count);
|
601
|
+
|
602
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
603
|
+
|
604
|
+
const char* getOpString() const override {
|
605
|
+
return "MBarrierArriveExpectTx";
|
606
|
+
}
|
607
|
+
|
608
|
+
std::string toString(int indent_size = 0) const override;
|
609
|
+
std::string toInlineString(int indent_size = 0) const override;
|
610
|
+
|
611
|
+
Val* state() const {
|
612
|
+
if (!outputs().empty()) {
|
613
|
+
return output(0);
|
614
|
+
}
|
615
|
+
return nullptr;
|
616
|
+
}
|
617
|
+
|
618
|
+
Val* mbarrier() const {
|
619
|
+
return input(0);
|
620
|
+
}
|
621
|
+
|
622
|
+
Val* txCount() const {
|
623
|
+
return input(1);
|
624
|
+
}
|
625
|
+
};
|
626
|
+
|
627
|
+
class MBarrierWait final : public Expr {
|
628
|
+
public:
|
629
|
+
using Expr::Expr;
|
630
|
+
explicit MBarrierWait(IrBuilderPasskey passkey, Val* mbarrier, Val* state);
|
631
|
+
|
632
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
633
|
+
|
634
|
+
const char* getOpString() const override {
|
635
|
+
return "MBarrierWait";
|
636
|
+
}
|
637
|
+
|
638
|
+
std::string toString(int indent_size = 0) const override;
|
639
|
+
std::string toInlineString(int indent_size = 0) const override;
|
640
|
+
|
641
|
+
Val* mbarrier() const {
|
642
|
+
return input(0);
|
643
|
+
}
|
644
|
+
|
645
|
+
Val* state() const {
|
646
|
+
return input(1);
|
647
|
+
}
|
648
|
+
};
|
649
|
+
|
650
|
+
class MBarrierWaitParity final : public Expr {
|
651
|
+
public:
|
652
|
+
using Expr::Expr;
|
653
|
+
explicit MBarrierWaitParity(
|
654
|
+
IrBuilderPasskey passkey,
|
655
|
+
Val* mbarrier,
|
656
|
+
Val* parity);
|
657
|
+
|
658
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
659
|
+
|
660
|
+
const char* getOpString() const override {
|
661
|
+
return "MBarrierWaitParity";
|
662
|
+
}
|
663
|
+
|
664
|
+
std::string toString(int indent_size = 0) const override;
|
665
|
+
std::string toInlineString(int indent_size = 0) const override;
|
666
|
+
|
667
|
+
Val* mbarrier() const {
|
668
|
+
return input(0);
|
669
|
+
}
|
670
|
+
|
671
|
+
Val* parity() const {
|
672
|
+
return input(1);
|
673
|
+
}
|
674
|
+
};
|
675
|
+
|
676
|
+
// For all but first block in each reduction segment, first thread waits for
|
677
|
+
// sync flag to indicate it is our turn to proceed (sync flag is incremented by
|
678
|
+
// BlockSerializeRelease). Then block sync. This has the effect of
|
679
|
+
// serializing blocks in each reduction segment. This is a block syncing
|
680
|
+
// operation.
|
681
|
+
class BlockSerializeWait final : public Expr {
|
682
|
+
public:
|
683
|
+
using Expr::Expr;
|
684
|
+
|
685
|
+
explicit BlockSerializeWait(
|
686
|
+
IrBuilderPasskey passkey,
|
687
|
+
ParallelTypeBitmap sync_dims,
|
688
|
+
Val* sync_buffer);
|
689
|
+
|
690
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
691
|
+
|
692
|
+
const char* getOpString() const override {
|
693
|
+
return "BlockSerializeWait";
|
694
|
+
}
|
695
|
+
|
696
|
+
std::string toString(int indent_size = 0) const override;
|
697
|
+
std::string toInlineString(int indent_size = 0) const override;
|
698
|
+
|
699
|
+
ParallelTypeBitmap syncDims() const {
|
700
|
+
return attribute<ParallelTypeBitmap>(0);
|
701
|
+
}
|
702
|
+
|
703
|
+
Val* syncBuffer() const {
|
704
|
+
return attributeVal(1);
|
705
|
+
}
|
706
|
+
};
|
707
|
+
|
708
|
+
// This first performs a block sync. For all but last block in the reduction
|
709
|
+
// segment, first thread then writes the next segment ID to the sync flag. When
|
710
|
+
// used with BlockSerializeWait, this has the effect of serializing blocks in
|
711
|
+
// order each reduction segment.
|
712
|
+
class BlockSerializeRelease final : public Expr {
|
713
|
+
public:
|
714
|
+
using Expr::Expr;
|
715
|
+
|
716
|
+
explicit BlockSerializeRelease(
|
717
|
+
IrBuilderPasskey passkey,
|
718
|
+
ParallelTypeBitmap sync_dims,
|
719
|
+
Val* sync_buffer);
|
720
|
+
|
721
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
722
|
+
|
723
|
+
const char* getOpString() const override {
|
724
|
+
return "BlockSerializeRelease";
|
725
|
+
}
|
726
|
+
|
727
|
+
std::string toString(int indent_size = 0) const override;
|
728
|
+
std::string toInlineString(int indent_size = 0) const override;
|
729
|
+
|
730
|
+
ParallelTypeBitmap syncDims() const {
|
731
|
+
return attribute<ParallelTypeBitmap>(0);
|
732
|
+
}
|
733
|
+
|
734
|
+
Val* syncBuffer() const {
|
735
|
+
return attributeVal(1);
|
736
|
+
}
|
737
|
+
};
|
738
|
+
|
739
|
+
// AsyncWait represents wait intrinsics for cp.async, cp.async.bulk and
|
740
|
+
// wgmma.mma_async
|
741
|
+
class AsyncWait final : public Expr {
|
742
|
+
public:
|
743
|
+
using Expr::Expr;
|
744
|
+
|
745
|
+
explicit AsyncWait(
|
746
|
+
IrBuilderPasskey passkey,
|
747
|
+
AsyncOpType async_op_type,
|
748
|
+
int64_t keep_stages = 0);
|
749
|
+
|
750
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
751
|
+
|
752
|
+
const char* getOpString() const override {
|
753
|
+
return "AsyncWait";
|
754
|
+
}
|
755
|
+
|
756
|
+
std::string toString(int indent_size = 0) const override;
|
757
|
+
std::string toInlineString(int indent_size = 0) const override;
|
758
|
+
|
759
|
+
const char* ptx() const;
|
760
|
+
bool memory() const;
|
761
|
+
|
762
|
+
AsyncOpType asyncOpType() const {
|
763
|
+
return attribute<AsyncOpType>(0);
|
764
|
+
}
|
765
|
+
|
766
|
+
//! Returns the remaining number of stages that are not synchronized
|
767
|
+
//! after this op.
|
768
|
+
int64_t keepStages() const {
|
769
|
+
return attribute<int64_t>(1);
|
770
|
+
}
|
771
|
+
};
|
772
|
+
|
773
|
+
// AsyncCommit represents commit intrinsics for cp.async
|
774
|
+
// A commit intrinsic communicates delimiter of transaction groups
|
775
|
+
// to the async load hardware. Example usage see [Cicular buffer].
|
776
|
+
class AsyncCommit final : public Expr {
|
777
|
+
public:
|
778
|
+
using Expr::Expr;
|
779
|
+
|
780
|
+
explicit AsyncCommit(IrBuilderPasskey passkey, AsyncOpType async_op_type);
|
781
|
+
|
782
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
783
|
+
|
784
|
+
const char* getOpString() const override {
|
785
|
+
return "AsyncCommit";
|
786
|
+
}
|
787
|
+
|
788
|
+
std::string toString(int indent_size = 0) const override;
|
789
|
+
std::string toInlineString(int indent_size = 0) const override;
|
790
|
+
|
791
|
+
const char* ptx() const;
|
792
|
+
|
793
|
+
//! Returns if the corresponding PTX needs a `:memory` in the end, this value
|
794
|
+
//! will be used to set AsmOptions::memory when lowering to inline PTX.
|
795
|
+
bool memory() const;
|
796
|
+
|
797
|
+
AsyncOpType asyncOpType() const {
|
798
|
+
return attribute<AsyncOpType>(0);
|
799
|
+
}
|
800
|
+
};
|
801
|
+
|
802
|
+
// Simply prints "DEFINE_MAGIC_ZERO" in the code in accordance with magic_zero
|
803
|
+
// in helpers.cu
|
804
|
+
class InitMagicZero final : public Expr {
|
805
|
+
public:
|
806
|
+
using Expr::Expr;
|
807
|
+
|
808
|
+
explicit InitMagicZero(IrBuilderPasskey passkey);
|
809
|
+
|
810
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
811
|
+
|
812
|
+
const char* getOpString() const override {
|
813
|
+
return "InitMagicZero";
|
814
|
+
}
|
815
|
+
|
816
|
+
std::string toString(int indent_size = 0) const override;
|
817
|
+
std::string toInlineString(int indent_size = 0) const override;
|
818
|
+
};
|
819
|
+
|
820
|
+
// Simply prints "UPDATE_MAGIC_ZERO" in the code in accordance with magic_zero
|
821
|
+
// in helpers.cu
|
822
|
+
class UpdateMagicZero final : public Expr {
|
823
|
+
public:
|
824
|
+
using Expr::Expr;
|
825
|
+
|
826
|
+
explicit UpdateMagicZero(IrBuilderPasskey passkey);
|
827
|
+
|
828
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
829
|
+
|
830
|
+
const char* getOpString() const override {
|
831
|
+
return "UpdateMagicZero";
|
832
|
+
}
|
833
|
+
|
834
|
+
std::string toString(int indent_size = 0) const override;
|
835
|
+
std::string toInlineString(int indent_size = 0) const override;
|
836
|
+
};
|
837
|
+
|
838
|
+
//! IfThenElse provides scoping for an boolean operator. Exprs placed in its
|
839
|
+
//! body are considered inside the scope of the if statement. In the future the
|
840
|
+
//! implementation should look quite different so that we can do proper
|
841
|
+
//! dependency annalysis like in Fusion.
|
842
|
+
//!
|
843
|
+
//! TODO(kir): this is not a real expression
|
844
|
+
//!
|
845
|
+
class IfThenElse final : public Expr {
|
846
|
+
public:
|
847
|
+
using Expr::Expr;
|
848
|
+
|
849
|
+
explicit IfThenElse(IrBuilderPasskey passkey, Predicate* cond);
|
850
|
+
|
851
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
852
|
+
|
853
|
+
const char* getOpString() const override {
|
854
|
+
return "IfThenElse";
|
855
|
+
}
|
856
|
+
|
857
|
+
std::string toString(int indent_size = 0) const override;
|
858
|
+
std::string toInlineString(int indent_size = 0) const override;
|
859
|
+
|
860
|
+
Scope& thenBody() {
|
861
|
+
return attribute<Scope>(0);
|
862
|
+
}
|
863
|
+
const Scope& thenBody() const {
|
864
|
+
return attribute<Scope>(0);
|
865
|
+
}
|
866
|
+
|
867
|
+
Scope& elseBody() {
|
868
|
+
return attribute<Scope>(1);
|
869
|
+
}
|
870
|
+
|
871
|
+
const Scope& elseBody() const {
|
872
|
+
return attribute<Scope>(1);
|
873
|
+
}
|
874
|
+
|
875
|
+
bool hasElse() const {
|
876
|
+
return !elseBody().empty();
|
877
|
+
}
|
878
|
+
|
879
|
+
bool empty() const {
|
880
|
+
return thenBody().empty() && elseBody().empty();
|
881
|
+
}
|
882
|
+
};
|
883
|
+
|
884
|
+
//! Grid reduction operation
|
885
|
+
//!
|
886
|
+
//! This node is used only after lowering a fusion to explicitly mark a grid
|
887
|
+
//! reduction and the buffer allocation needed to do it.
|
888
|
+
//!
|
889
|
+
//! This node provides KernelExecutor the information it needs to allocate the
|
890
|
+
//! reduction and sync buffers.
|
891
|
+
class GridReduction final : public ReductionOp {
|
892
|
+
static constexpr int num_reduction_op_attr = 4;
|
893
|
+
|
894
|
+
public:
|
895
|
+
using ReductionOp::ReductionOp;
|
896
|
+
|
897
|
+
GridReduction(
|
898
|
+
IrBuilderPasskey passkey,
|
899
|
+
BinaryOpType reduction_op_type,
|
900
|
+
Val* init,
|
901
|
+
Val* out,
|
902
|
+
Val* in,
|
903
|
+
Allocate* reduction_buffer,
|
904
|
+
Allocate* sync_buffer,
|
905
|
+
Val* entrance_index,
|
906
|
+
Val* entrances,
|
907
|
+
bool is_allreduce = false,
|
908
|
+
TensorIndex* serial_reduction_tensor = nullptr);
|
909
|
+
|
910
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
911
|
+
|
912
|
+
const char* getOpString() const override {
|
913
|
+
return "GridReduction";
|
914
|
+
}
|
915
|
+
|
916
|
+
std::string toString(int indent_size = 0) const override;
|
917
|
+
std::string toInlineString(int indent_size = 0) const override;
|
918
|
+
|
919
|
+
Allocate* reduction_buffer() const {
|
920
|
+
return attribute(num_reduction_op_attr)->as<Allocate>();
|
921
|
+
}
|
922
|
+
|
923
|
+
Allocate* sync_buffer() const {
|
924
|
+
return attribute(num_reduction_op_attr + 1)->as<Allocate>();
|
925
|
+
}
|
926
|
+
|
927
|
+
// Which instance of entering this grid reduction is this iteration?
|
928
|
+
Val* entrance_index() const {
|
929
|
+
return attributeVal(num_reduction_op_attr + 2);
|
930
|
+
}
|
931
|
+
|
932
|
+
// How many times will this grid reduction be entered
|
933
|
+
Val* entrances() const {
|
934
|
+
return attributeVal(num_reduction_op_attr + 3);
|
935
|
+
}
|
936
|
+
|
937
|
+
// gridReduce has template flags for thread predicates. In order to
|
938
|
+
// use them, the thread predicate is held here separately from
|
939
|
+
// Expr::predicate_.
|
940
|
+
const ParallelTypeBitmap& threadPredicate() const {
|
941
|
+
return attribute<ParallelTypeBitmap>(num_reduction_op_attr + 4);
|
942
|
+
}
|
943
|
+
|
944
|
+
ParallelTypeBitmap& threadPredicate() {
|
945
|
+
return attribute<ParallelTypeBitmap>(num_reduction_op_attr + 4);
|
946
|
+
}
|
947
|
+
|
948
|
+
TensorIndex* serialReductionTensor() const {
|
949
|
+
return dynamic_cast<TensorIndex*>(attributeVal(num_reduction_op_attr + 5));
|
950
|
+
}
|
951
|
+
|
952
|
+
bool isSerial() const {
|
953
|
+
return serialReductionTensor() != nullptr;
|
954
|
+
}
|
955
|
+
|
956
|
+
GridReduction* withThreadPredicate(
|
957
|
+
const ParallelTypeBitmap& thread_predicate) {
|
958
|
+
auto result = shallowCopy()->as<GridReduction>();
|
959
|
+
result->threadPredicate() = thread_predicate;
|
960
|
+
return result;
|
961
|
+
}
|
962
|
+
};
|
963
|
+
|
964
|
+
class GroupedGridReduction final : public GroupedReductionOp {
|
965
|
+
public:
|
966
|
+
using GroupedReductionOp::GroupedReductionOp;
|
967
|
+
|
968
|
+
GroupedGridReduction(
|
969
|
+
IrBuilderPasskey passkey,
|
970
|
+
std::vector<BinaryOpType> reduction_op_type,
|
971
|
+
std::vector<Val*> init,
|
972
|
+
std::vector<Val*> out,
|
973
|
+
std::vector<Val*> in,
|
974
|
+
std::vector<Allocate*> reduction_buffers,
|
975
|
+
Allocate* sync_buffer,
|
976
|
+
Val* entrance_index,
|
977
|
+
Val* entrances,
|
978
|
+
Val* buffer_stride,
|
979
|
+
bool is_allreduce = false);
|
980
|
+
|
981
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
982
|
+
|
983
|
+
// number of attributes in the parent class
|
984
|
+
size_t numGroupedReductionOpAttr() const {
|
985
|
+
return 2 + outputs().size();
|
986
|
+
}
|
987
|
+
|
988
|
+
const char* getOpString() const override {
|
989
|
+
return "GroupedGridReduction";
|
990
|
+
}
|
991
|
+
|
992
|
+
std::string toString(int indent_size = 0) const override;
|
993
|
+
std::string toInlineString(int indent_size = 0) const override;
|
994
|
+
|
995
|
+
std::vector<Allocate*> reduction_buffers() const {
|
996
|
+
auto offset = numGroupedReductionOpAttr() + 5;
|
997
|
+
auto size = outputs().size();
|
998
|
+
std::vector<Allocate*> result;
|
999
|
+
result.reserve(size);
|
1000
|
+
for (auto i : c10::irange(offset, offset + size)) {
|
1001
|
+
result.emplace_back(attribute(i)->as<Allocate>());
|
1002
|
+
}
|
1003
|
+
return result;
|
1004
|
+
}
|
1005
|
+
|
1006
|
+
Allocate* reduction_buffer(size_t i) const {
|
1007
|
+
return reduction_buffers().at(i);
|
1008
|
+
}
|
1009
|
+
|
1010
|
+
Allocate* sync_buffer() const {
|
1011
|
+
return attribute(numGroupedReductionOpAttr())->as<Allocate>();
|
1012
|
+
}
|
1013
|
+
|
1014
|
+
// Which instance of entering this grid reduction is this iteration?
|
1015
|
+
Val* entrance_index() const {
|
1016
|
+
return attributeVal(numGroupedReductionOpAttr() + 1);
|
1017
|
+
}
|
1018
|
+
|
1019
|
+
// How many times will this grid reduction be entered
|
1020
|
+
Val* entrances() const {
|
1021
|
+
return attributeVal(numGroupedReductionOpAttr() + 2);
|
1022
|
+
}
|
1023
|
+
|
1024
|
+
// Stride of reduction buffers
|
1025
|
+
Val* buffer_stride() const {
|
1026
|
+
return attributeVal(numGroupedReductionOpAttr() + 3);
|
1027
|
+
}
|
1028
|
+
|
1029
|
+
// gridReduce has template flags for thread predicates. In order to
|
1030
|
+
// use them, the thread predicate is held here separately from
|
1031
|
+
// Expr::predicate_.
|
1032
|
+
const ParallelTypeBitmap& threadPredicate() const {
|
1033
|
+
return attribute<ParallelTypeBitmap>(numGroupedReductionOpAttr() + 4);
|
1034
|
+
}
|
1035
|
+
|
1036
|
+
ParallelTypeBitmap& threadPredicate() {
|
1037
|
+
return attribute<ParallelTypeBitmap>(numGroupedReductionOpAttr() + 4);
|
1038
|
+
}
|
1039
|
+
|
1040
|
+
GroupedGridReduction* withThreadPredicate(
|
1041
|
+
const ParallelTypeBitmap& thread_predicate) {
|
1042
|
+
auto result = shallowCopy()->as<GroupedGridReduction>();
|
1043
|
+
result->threadPredicate() = thread_predicate;
|
1044
|
+
return result;
|
1045
|
+
}
|
1046
|
+
};
|
1047
|
+
|
1048
|
+
//! Grid broadcast operation
|
1049
|
+
//!
|
1050
|
+
//! This node is used only after lowering a fusion to explicitly mark a grid
|
1051
|
+
//! broadcast and the buffer allocation needed to do it.
|
1052
|
+
//!
|
1053
|
+
//! This node provides KernelExecutor the information it needs to allocate the
|
1054
|
+
//! broadcast and sync buffers.
|
1055
|
+
class GridBroadcast final : public Expr {
|
1056
|
+
public:
|
1057
|
+
using Expr::Expr;
|
1058
|
+
|
1059
|
+
GridBroadcast(
|
1060
|
+
IrBuilderPasskey passkey,
|
1061
|
+
BroadcastOp* broadcast_op,
|
1062
|
+
Allocate* broadcast_buffer,
|
1063
|
+
Allocate* sync_buffer);
|
1064
|
+
|
1065
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
1066
|
+
|
1067
|
+
const char* getOpString() const override {
|
1068
|
+
return "GridBroadcast";
|
1069
|
+
}
|
1070
|
+
|
1071
|
+
std::string toString(int indent_size = 0) const override;
|
1072
|
+
std::string toInlineString(int indent_size = 0) const override;
|
1073
|
+
|
1074
|
+
BroadcastOp* broadcast_op() const {
|
1075
|
+
return attribute(0)->as<BroadcastOp>();
|
1076
|
+
}
|
1077
|
+
|
1078
|
+
Allocate* broadcast_buffer() const {
|
1079
|
+
return attribute(1)->as<Allocate>();
|
1080
|
+
}
|
1081
|
+
|
1082
|
+
Allocate* sync_buffer() const {
|
1083
|
+
return attribute(2)->as<Allocate>();
|
1084
|
+
}
|
1085
|
+
};
|
1086
|
+
|
1087
|
+
//! Grid welford operation
|
1088
|
+
//!
|
1089
|
+
//! This node is used only after lowering a fusion to explicitly mark a grid
|
1090
|
+
//! reduction and the buffer allocation needed to do it.
|
1091
|
+
//!
|
1092
|
+
//! This node provides KernelExecutor the information it needs to allocate the
|
1093
|
+
//! reduction and sync buffers.
|
1094
|
+
//!
|
1095
|
+
//! TODO: Make this a subclass of WelfordOp
|
1096
|
+
class GridWelford final : public Expr {
|
1097
|
+
public:
|
1098
|
+
using Expr::Expr;
|
1099
|
+
|
1100
|
+
GridWelford(
|
1101
|
+
IrBuilderPasskey passkey,
|
1102
|
+
WelfordOp* welford_op,
|
1103
|
+
Allocate* var_buffer,
|
1104
|
+
Allocate* avg_buffer,
|
1105
|
+
Allocate* n_buffer,
|
1106
|
+
Allocate* sync_buffer,
|
1107
|
+
Val* entrance_index,
|
1108
|
+
Val* entrances);
|
1109
|
+
|
1110
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
1111
|
+
|
1112
|
+
const char* getOpString() const override {
|
1113
|
+
return "GridWelford";
|
1114
|
+
}
|
1115
|
+
|
1116
|
+
std::string toString(int indent_size = 0) const override;
|
1117
|
+
std::string toInlineString(int indent_size = 0) const override;
|
1118
|
+
|
1119
|
+
WelfordOp* welford_op() const {
|
1120
|
+
return attribute(0)->as<WelfordOp>();
|
1121
|
+
}
|
1122
|
+
|
1123
|
+
Allocate* var_buffer() const {
|
1124
|
+
return attribute(1)->as<Allocate>();
|
1125
|
+
}
|
1126
|
+
|
1127
|
+
Allocate* avg_buffer() const {
|
1128
|
+
return attribute(2)->as<Allocate>();
|
1129
|
+
}
|
1130
|
+
|
1131
|
+
Allocate* N_buffer() const {
|
1132
|
+
return attribute(3)->as<Allocate>();
|
1133
|
+
}
|
1134
|
+
|
1135
|
+
Allocate* sync_buffer() const {
|
1136
|
+
return attribute(4)->as<Allocate>();
|
1137
|
+
}
|
1138
|
+
|
1139
|
+
// Which instance of entering this grid reduction is this iteration?
|
1140
|
+
Val* entrance_index() const {
|
1141
|
+
return attributeVal(5);
|
1142
|
+
}
|
1143
|
+
|
1144
|
+
// How many times will this grid reduction be entered
|
1145
|
+
Val* entrances() const {
|
1146
|
+
return attributeVal(6);
|
1147
|
+
}
|
1148
|
+
|
1149
|
+
// gridReduce has template flags for thread predicates. In order to
|
1150
|
+
// use them, the thread predicate is held here separately from
|
1151
|
+
// Expr::predicate_.
|
1152
|
+
const ParallelTypeBitmap& threadPredicate() const {
|
1153
|
+
return attribute<ParallelTypeBitmap>(7);
|
1154
|
+
}
|
1155
|
+
ParallelTypeBitmap& threadPredicate() {
|
1156
|
+
return attribute<ParallelTypeBitmap>(7);
|
1157
|
+
}
|
1158
|
+
|
1159
|
+
GridWelford* withThreadPredicate(const ParallelTypeBitmap& thread_predicate) {
|
1160
|
+
auto result = shallowCopy()->as<GridWelford>();
|
1161
|
+
result->threadPredicate() = thread_predicate;
|
1162
|
+
return result;
|
1163
|
+
}
|
1164
|
+
};
|
1165
|
+
|
1166
|
+
class GroupedGridWelford final : public GroupedWelfordOp {
|
1167
|
+
public:
|
1168
|
+
using GroupedWelfordOp::GroupedWelfordOp;
|
1169
|
+
|
1170
|
+
// input, output and init vals are vectors of triplets
|
1171
|
+
GroupedGridWelford(
|
1172
|
+
IrBuilderPasskey passkey,
|
1173
|
+
std::vector<WelfordTriplet> output_vals,
|
1174
|
+
std::vector<WelfordTriplet> input_vals,
|
1175
|
+
std::vector<WelfordTriplet> init_vals,
|
1176
|
+
std::array<std::vector<Allocate*>, 3> reduction_buffers,
|
1177
|
+
Allocate* sync_buffer,
|
1178
|
+
Val* entrance_index,
|
1179
|
+
Val* entrances,
|
1180
|
+
Val* buffer_stride,
|
1181
|
+
bool is_allreduce = false,
|
1182
|
+
bool use_outer_opt = false);
|
1183
|
+
|
1184
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
1185
|
+
|
1186
|
+
size_t numGroupedWelfordOpAttr() const {
|
1187
|
+
return 1 + outputs().size();
|
1188
|
+
}
|
1189
|
+
|
1190
|
+
const char* getOpString() const override {
|
1191
|
+
return "GroupedGridWelford";
|
1192
|
+
}
|
1193
|
+
|
1194
|
+
std::string toString(int indent_size = 0) const override;
|
1195
|
+
std::string toInlineString(int indent_size = 0) const override;
|
1196
|
+
|
1197
|
+
std::array<std::vector<Allocate*>, 3> reduction_buffers() const {
|
1198
|
+
auto offset = numGroupedWelfordOpAttr() + 5;
|
1199
|
+
auto size = outputs().size() / 3;
|
1200
|
+
std::array<std::vector<Allocate*>, 3> result;
|
1201
|
+
result[0].reserve(size);
|
1202
|
+
result[1].reserve(size);
|
1203
|
+
result[2].reserve(size);
|
1204
|
+
for (auto i : c10::irange(size)) {
|
1205
|
+
result[0].emplace_back(attribute(offset + i * 3)->as<Allocate>());
|
1206
|
+
result[1].emplace_back(attribute(offset + i * 3 + 1)->as<Allocate>());
|
1207
|
+
result[2].emplace_back(attribute(offset + i * 3 + 2)->as<Allocate>());
|
1208
|
+
}
|
1209
|
+
return result;
|
1210
|
+
}
|
1211
|
+
|
1212
|
+
Allocate* sync_buffer() const {
|
1213
|
+
return attribute(numGroupedWelfordOpAttr())->as<Allocate>();
|
1214
|
+
}
|
1215
|
+
|
1216
|
+
// Which instance of entering this grid reduction is this iteration?
|
1217
|
+
Val* entrance_index() const {
|
1218
|
+
return attributeVal(numGroupedWelfordOpAttr() + 1);
|
1219
|
+
}
|
1220
|
+
|
1221
|
+
// How many times will this grid reduction be entered
|
1222
|
+
Val* entrances() const {
|
1223
|
+
return attributeVal(numGroupedWelfordOpAttr() + 2);
|
1224
|
+
}
|
1225
|
+
|
1226
|
+
// Stride of reduction buffers
|
1227
|
+
Val* buffer_stride() const {
|
1228
|
+
return attributeVal(numGroupedWelfordOpAttr() + 3);
|
1229
|
+
}
|
1230
|
+
|
1231
|
+
// gridReduce has template flags for thread predicates. In order to
|
1232
|
+
// use them, the thread predicate is held here separately from
|
1233
|
+
// Expr::predicate_.
|
1234
|
+
const ParallelTypeBitmap& threadPredicate() const {
|
1235
|
+
return attribute<ParallelTypeBitmap>(numGroupedWelfordOpAttr() + 4);
|
1236
|
+
}
|
1237
|
+
ParallelTypeBitmap& threadPredicate() {
|
1238
|
+
return attribute<ParallelTypeBitmap>(numGroupedWelfordOpAttr() + 4);
|
1239
|
+
}
|
1240
|
+
|
1241
|
+
GroupedGridWelford* withThreadPredicate(
|
1242
|
+
const ParallelTypeBitmap& thread_predicate) {
|
1243
|
+
auto result = shallowCopy()->as<GroupedGridWelford>();
|
1244
|
+
result->threadPredicate() = thread_predicate;
|
1245
|
+
return result;
|
1246
|
+
}
|
1247
|
+
|
1248
|
+
// True if the outer-optimized kernel should be used
|
1249
|
+
bool useOuterOpt() const {
|
1250
|
+
auto offset = numGroupedWelfordOpAttr() + 5 + outputs().size();
|
1251
|
+
return attribute<bool>(offset);
|
1252
|
+
}
|
1253
|
+
|
1254
|
+
//! Return the required smem buffer size
|
1255
|
+
int64_t getSmemBufferSize(int64_t bdimx, int64_t bdimy, int64_t bdimz) const;
|
1256
|
+
};
|
1257
|
+
|
1258
|
+
//! Represents a WelfordOp with the division by count is hoisted out
|
1259
|
+
//! of an innermost loop
|
1260
|
+
class VectorizedWelfordOp final : public WelfordOp {
|
1261
|
+
public:
|
1262
|
+
using WelfordOp::WelfordOp;
|
1263
|
+
|
1264
|
+
VectorizedWelfordOp(
|
1265
|
+
IrBuilderPasskey,
|
1266
|
+
const WelfordTriplet& output,
|
1267
|
+
const WelfordTriplet& input,
|
1268
|
+
const WelfordTriplet& init,
|
1269
|
+
Val* count,
|
1270
|
+
Val* reciprocal_of_count,
|
1271
|
+
Val* hoisted_predicate);
|
1272
|
+
|
1273
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
1274
|
+
|
1275
|
+
const char* getOpString() const override {
|
1276
|
+
return "VectorizedWelfordOp";
|
1277
|
+
}
|
1278
|
+
|
1279
|
+
//! New count that should be set to outN
|
1280
|
+
Val* count() const {
|
1281
|
+
return attributeVal(WelfordOp::kNumAttrs);
|
1282
|
+
}
|
1283
|
+
|
1284
|
+
//! Reciprocal of count
|
1285
|
+
Val* reciprocalOfCount() const {
|
1286
|
+
return attributeVal(WelfordOp::kNumAttrs + 1);
|
1287
|
+
}
|
1288
|
+
|
1289
|
+
//! Predicate of this expression hoisted out of an innermost loop
|
1290
|
+
Val* hoistedPredicate() const {
|
1291
|
+
return attributeVal(WelfordOp::kNumAttrs + 2);
|
1292
|
+
}
|
1293
|
+
};
|
1294
|
+
|
1295
|
+
// Allocate an instance of the fused reduction class.
|
1296
|
+
class AllocateFusedReduction final : public Expr {
|
1297
|
+
explicit AllocateFusedReduction(IrBuilderPasskey passkey, Expr* grid_expr);
|
1298
|
+
|
1299
|
+
public:
|
1300
|
+
using Expr::Expr;
|
1301
|
+
|
1302
|
+
explicit AllocateFusedReduction(
|
1303
|
+
IrBuilderPasskey passkey,
|
1304
|
+
GridReduction* grid_reduction)
|
1305
|
+
: AllocateFusedReduction(passkey, dynamic_cast<Expr*>(grid_reduction)) {}
|
1306
|
+
|
1307
|
+
explicit AllocateFusedReduction(
|
1308
|
+
IrBuilderPasskey passkey,
|
1309
|
+
GridWelford* grid_welford)
|
1310
|
+
: AllocateFusedReduction(passkey, dynamic_cast<Expr*>(grid_welford)) {}
|
1311
|
+
|
1312
|
+
explicit AllocateFusedReduction(
|
1313
|
+
IrBuilderPasskey passkey,
|
1314
|
+
GroupedGridReduction* grouped_grid_reduction)
|
1315
|
+
: AllocateFusedReduction(
|
1316
|
+
passkey,
|
1317
|
+
dynamic_cast<Expr*>(grouped_grid_reduction)) {}
|
1318
|
+
|
1319
|
+
explicit AllocateFusedReduction(
|
1320
|
+
IrBuilderPasskey passkey,
|
1321
|
+
GroupedGridWelford* grouped_grid_welford)
|
1322
|
+
: AllocateFusedReduction(
|
1323
|
+
passkey,
|
1324
|
+
dynamic_cast<Expr*>(grouped_grid_welford)) {}
|
1325
|
+
|
1326
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
1327
|
+
|
1328
|
+
const char* getOpString() const override {
|
1329
|
+
return "AllocateFusedReduction";
|
1330
|
+
}
|
1331
|
+
|
1332
|
+
std::string toString(int indent_size = 0) const override;
|
1333
|
+
std::string toInlineString(int indent_size = 0) const override;
|
1334
|
+
|
1335
|
+
//! GridReduction, GridWelford, GroupedGridReduction or GroupedGridWelford
|
1336
|
+
Expr* gridExpr() const {
|
1337
|
+
return attribute(0)->asExpr();
|
1338
|
+
}
|
1339
|
+
|
1340
|
+
TensorIndex* out() const;
|
1341
|
+
|
1342
|
+
const ParallelTypeBitmap& threadPredicate() const;
|
1343
|
+
};
|
1344
|
+
|
1345
|
+
class GetRNGSeedAndOffsetFromHost : public Expr {
|
1346
|
+
public:
|
1347
|
+
using Expr::Expr;
|
1348
|
+
|
1349
|
+
GetRNGSeedAndOffsetFromHost(
|
1350
|
+
IrBuilderPasskey,
|
1351
|
+
Val* seed_ptr,
|
1352
|
+
Val* seed_val,
|
1353
|
+
Val* first_offset_ptr,
|
1354
|
+
Val* first_offset_val,
|
1355
|
+
int64_t offsets = -1);
|
1356
|
+
|
1357
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
1358
|
+
|
1359
|
+
const char* getOpString() const override {
|
1360
|
+
return "GetRNGSeedAndOffsetFromHost";
|
1361
|
+
}
|
1362
|
+
|
1363
|
+
std::string toString(int indent_size = 0) const override;
|
1364
|
+
std::string toInlineString(int indent_size = 0) const override;
|
1365
|
+
|
1366
|
+
const int64_t& offsets() const {
|
1367
|
+
return attribute<int64_t>(0);
|
1368
|
+
}
|
1369
|
+
|
1370
|
+
int64_t& offsets() {
|
1371
|
+
return attribute<int64_t>(0);
|
1372
|
+
}
|
1373
|
+
|
1374
|
+
std::vector<PolymorphicValue> evaluate(
|
1375
|
+
const ExpressionEvaluator& ee,
|
1376
|
+
const std::vector<PolymorphicValue>& inputs) const override;
|
1377
|
+
};
|
1378
|
+
|
1379
|
+
// Expr for driver API cuTensorMapEncodeTiled
|
1380
|
+
class EncodeTensorMapTiled : public Expr {
|
1381
|
+
public:
|
1382
|
+
using Expr::Expr;
|
1383
|
+
|
1384
|
+
EncodeTensorMapTiled(
|
1385
|
+
IrBuilderPasskey,
|
1386
|
+
Val* output,
|
1387
|
+
DataType data_type,
|
1388
|
+
Val* global_address,
|
1389
|
+
Val* global_dim,
|
1390
|
+
Val* global_strides,
|
1391
|
+
Val* box_dim,
|
1392
|
+
Val* element_strides,
|
1393
|
+
tma::TensorMapInterleave interleave,
|
1394
|
+
MmaInputSmemSwizzle swizzle,
|
1395
|
+
tma::TensorMapL2Promotion l2_promotion,
|
1396
|
+
tma::TensorMapFloatOOBFill oob_fill);
|
1397
|
+
|
1398
|
+
NVFUSER_DECLARE_CLONE_AND_CREATE
|
1399
|
+
|
1400
|
+
const char* getOpString() const override {
|
1401
|
+
return "EncodeTensorMapTiled";
|
1402
|
+
}
|
1403
|
+
|
1404
|
+
std::string toString(int indent_size = 0) const override;
|
1405
|
+
std::string toInlineString(int indent_size = 0) const override;
|
1406
|
+
|
1407
|
+
Val* globalAddress() const {
|
1408
|
+
return input(0);
|
1409
|
+
}
|
1410
|
+
|
1411
|
+
Val* globalDim() const {
|
1412
|
+
return input(1);
|
1413
|
+
}
|
1414
|
+
|
1415
|
+
Val* globalStrides() const {
|
1416
|
+
return input(2);
|
1417
|
+
}
|
1418
|
+
|
1419
|
+
Val* boxDim() const {
|
1420
|
+
return input(3);
|
1421
|
+
}
|
1422
|
+
|
1423
|
+
Val* elementStrides() const {
|
1424
|
+
return input(4);
|
1425
|
+
}
|
1426
|
+
|
1427
|
+
const DataType& dataType() const {
|
1428
|
+
return attribute<DataType>(0);
|
1429
|
+
}
|
1430
|
+
|
1431
|
+
const int64_t& tensorRank() const {
|
1432
|
+
return attribute<int64_t>(1);
|
1433
|
+
}
|
1434
|
+
|
1435
|
+
const tma::TensorMapInterleave& interleave() const {
|
1436
|
+
return attribute<tma::TensorMapInterleave>(2);
|
1437
|
+
}
|
1438
|
+
|
1439
|
+
const MmaInputSmemSwizzle& swizzle() const {
|
1440
|
+
return attribute<MmaInputSmemSwizzle>(3);
|
1441
|
+
}
|
1442
|
+
|
1443
|
+
const tma::TensorMapL2Promotion& l2Promotion() const {
|
1444
|
+
return attribute<tma::TensorMapL2Promotion>(4);
|
1445
|
+
}
|
1446
|
+
|
1447
|
+
const tma::TensorMapFloatOOBFill& oobFill() const {
|
1448
|
+
return attribute<tma::TensorMapFloatOOBFill>(5);
|
1449
|
+
}
|
1450
|
+
|
1451
|
+
std::vector<PolymorphicValue> evaluate(
|
1452
|
+
const ExpressionEvaluator& ee,
|
1453
|
+
const std::vector<PolymorphicValue>& inputs) const override;
|
1454
|
+
};
|
1455
|
+
|
1456
|
+
} // namespace kir
|
1457
|
+
} // namespace nvfuser
|