nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,522 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <visibility.h>
|
12
|
+
|
13
|
+
#include <disjoint_set.h>
|
14
|
+
#include <ir/all_nodes.h>
|
15
|
+
#include <iter_visitor.h>
|
16
|
+
#include <unordered_map>
|
17
|
+
#include <vector>
|
18
|
+
|
19
|
+
namespace nvfuser {
|
20
|
+
|
21
|
+
class LogicalDomainMap;
|
22
|
+
|
23
|
+
namespace {
|
24
|
+
|
25
|
+
// Enable pair<IterDomain*, size_t> in a set, size_t must be unique in set
|
26
|
+
struct id_int_lt {
|
27
|
+
bool operator()(
|
28
|
+
const std::pair<IterDomain*, size_t>& first,
|
29
|
+
const std::pair<IterDomain*, size_t>& second) const {
|
30
|
+
return first.second < second.second;
|
31
|
+
}
|
32
|
+
};
|
33
|
+
|
34
|
+
} // namespace
|
35
|
+
|
36
|
+
// Uses the history of _target_domain, and replays that history using the
|
37
|
+
// provided map.
|
38
|
+
//
|
39
|
+
// target_domain contains the history we want replayed.
|
40
|
+
//
|
41
|
+
// id_map maps IterDomains in that history to the IterDomains we want it
|
42
|
+
// replayed on.
|
43
|
+
//
|
44
|
+
// error_on_failure = true will cause the replay to error if we can't replay any
|
45
|
+
// operation in target_domain's history due to missing IDs in the id_map.
|
46
|
+
//
|
47
|
+
// If error_on_failure = false, replay will replay everything it can, and ignore
|
48
|
+
// operations it can't.
|
49
|
+
class ReplayTransformations : public IterVisitor {
|
50
|
+
public:
|
51
|
+
ReplayTransformations(
|
52
|
+
const std::vector<IterDomain*>& target_domain,
|
53
|
+
std::unordered_map<IterDomain*, IterDomain*> id_map);
|
54
|
+
|
55
|
+
ReplayTransformations& setErrorOnFailure(bool error_on_failure) {
|
56
|
+
error_on_failure_ = error_on_failure;
|
57
|
+
return *this;
|
58
|
+
}
|
59
|
+
|
60
|
+
ReplayTransformations& setReplaySwizzle(bool replay_swizzle) {
|
61
|
+
replay_swizzle_ = replay_swizzle;
|
62
|
+
return *this;
|
63
|
+
}
|
64
|
+
|
65
|
+
ReplayTransformations& setReplayResize(bool replay_resize) {
|
66
|
+
replay_resize_ = replay_resize;
|
67
|
+
return *this;
|
68
|
+
}
|
69
|
+
|
70
|
+
ReplayTransformations& setReplayRFactor(bool replay_rfactor) {
|
71
|
+
replay_rfactor_ = replay_rfactor;
|
72
|
+
return *this;
|
73
|
+
}
|
74
|
+
|
75
|
+
// Replays outputs that were generated from ids.first on ids.second
|
76
|
+
void runReplay();
|
77
|
+
|
78
|
+
// Returns map from provided target domain to their corresponding IDs
|
79
|
+
const std::unordered_map<IterDomain*, IterDomain*>& getReplay() {
|
80
|
+
if (!ran_replay_) {
|
81
|
+
runReplay();
|
82
|
+
}
|
83
|
+
return id_map_;
|
84
|
+
}
|
85
|
+
|
86
|
+
// Returns loop_ids_ the size_t marks the order in which they were put into
|
87
|
+
// the map, this is part of the structure because it's used to generate the
|
88
|
+
// order from 'getLeafIDs'
|
89
|
+
const std::unordered_map<IterDomain*, size_t>& getUnorderedLeafIDs() {
|
90
|
+
if (!ran_replay_) {
|
91
|
+
runReplay();
|
92
|
+
}
|
93
|
+
return loop_ids_;
|
94
|
+
}
|
95
|
+
|
96
|
+
// Returns all terminating IDs that resulted from the replay. Leaf IDs are run
|
97
|
+
// to run deterministic, but otherwise in no specific order.
|
98
|
+
const std::vector<IterDomain*>& getLeafIDs() {
|
99
|
+
if (!ran_replay_) {
|
100
|
+
runReplay();
|
101
|
+
}
|
102
|
+
return loop_vec_;
|
103
|
+
}
|
104
|
+
|
105
|
+
protected:
|
106
|
+
using IterVisitor::handle;
|
107
|
+
|
108
|
+
// Transform dispatch
|
109
|
+
void dispatch(Expr* e) override;
|
110
|
+
|
111
|
+
// We're going to replay this split operation on the corresponding ID
|
112
|
+
void handle(Split* s) override;
|
113
|
+
|
114
|
+
// We're going to replay this merge operation on the corresponding IDs
|
115
|
+
void handle(Merge* m) override;
|
116
|
+
|
117
|
+
// We're going to replay this swizzle operation on the corresponding IDs
|
118
|
+
// if replaying swizzle is enabled.
|
119
|
+
void handle(Swizzle* m) override;
|
120
|
+
void handle(Swizzle2D* m) override;
|
121
|
+
|
122
|
+
void handle(Resize* resize) override;
|
123
|
+
|
124
|
+
size_t newCounter() {
|
125
|
+
return counter_++;
|
126
|
+
}
|
127
|
+
|
128
|
+
protected:
|
129
|
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
130
|
+
const std::vector<IterDomain*>& target_domain_;
|
131
|
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
132
|
+
std::unordered_map<IterDomain*, IterDomain*> id_map_;
|
133
|
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
134
|
+
std::unordered_map<IterDomain*, size_t> loop_ids_;
|
135
|
+
|
136
|
+
private:
|
137
|
+
bool error_on_failure_ = true;
|
138
|
+
|
139
|
+
// Indicates if we want to replay swizzle ops on the replayed
|
140
|
+
// tensor.
|
141
|
+
// The swizzle op will be replayed if true,
|
142
|
+
// The swizzle inputs will be directly forwarded, and therefore skipping
|
143
|
+
// the swizzle op if false.
|
144
|
+
// Currently this options should always be off but
|
145
|
+
// later we may have cases in scheduling large fusions where
|
146
|
+
// this functionality could be useful.
|
147
|
+
bool replay_swizzle_ = false;
|
148
|
+
|
149
|
+
// Indicates if we want to replay resize ops on the replayed
|
150
|
+
// tensor.
|
151
|
+
bool replay_resize_ = false;
|
152
|
+
|
153
|
+
// Whether to copy the `rf` flag from ops producing `target_domain`.
|
154
|
+
bool replay_rfactor_ = false;
|
155
|
+
|
156
|
+
size_t counter_ = 0;
|
157
|
+
|
158
|
+
std::vector<IterDomain*> loop_vec_;
|
159
|
+
|
160
|
+
bool ran_replay_ = false; // Mark if replay has been run
|
161
|
+
};
|
162
|
+
|
163
|
+
// Maps that track information relevant to best effort replay about newly added
|
164
|
+
// or squeezed broadcast axes
|
165
|
+
//
|
166
|
+
// For example if we have consumer: T0[i0, b1, b2, i3] and producer:
|
167
|
+
// T1[i0, i3]
|
168
|
+
//
|
169
|
+
// If consumer transformations are:
|
170
|
+
// -> T[i0, b1o, b1i, b2o, b2i, i3]
|
171
|
+
// -> T[i0*b1i, b1o, b2o, b2i, i3]
|
172
|
+
// -> T[i0*b1i*b2o, b1o, b2i, i3]
|
173
|
+
// -> T[i0*b1i*b2o*i3, b1o, b2i]
|
174
|
+
//
|
175
|
+
// forwarding_map would forward i0->i0*b1i and i0*b1i->i0*b1i*b2o
|
176
|
+
// compliment_map would have the entry i0->b1i and i0*b1i->b2o
|
177
|
+
//
|
178
|
+
// The first is to fast forward transformations in consumer involving broadcast
|
179
|
+
// axes not in producer. The compliment map is to use later to compute what loop
|
180
|
+
// nodes we may have after the forwarding process is finished. Leaf nodes are
|
181
|
+
// only important for replayCasP, so look there to see how this is done. Forward
|
182
|
+
// map is used for replayCasP and replayPasC.
|
183
|
+
//
|
184
|
+
// The producer forwarding map is filled when producer broadcast
|
185
|
+
// domains are squeezed.
|
186
|
+
class ForwardingInfo {
|
187
|
+
public:
|
188
|
+
// Map IterDomain* axes that can safely be forwarded to their output.
|
189
|
+
std::unordered_map<IterDomain*, IterDomain*> producer_forwarding_map;
|
190
|
+
std::unordered_map<IterDomain*, IterDomain*> consumer_forwarding_map;
|
191
|
+
|
192
|
+
// Given a forward id map id_input -> id_forwarded
|
193
|
+
// Track the other inputs in the expr that id_input is an input to. These will
|
194
|
+
// be used to adjust the replay's loop tracking. Don't need to track one to
|
195
|
+
// many as currently transformations on IterDomains can only have maximum 2
|
196
|
+
// inputs, but maybe in the future we'll have more.
|
197
|
+
std::unordered_map<IterDomain*, std::vector<IterDomain*>>
|
198
|
+
producer_compliment_map;
|
199
|
+
std::unordered_map<IterDomain*, std::vector<IterDomain*>>
|
200
|
+
consumer_compliment_map;
|
201
|
+
|
202
|
+
ForwardingInfo(const TensorView* producer, const TensorView* consumer);
|
203
|
+
|
204
|
+
ForwardingInfo() = delete;
|
205
|
+
};
|
206
|
+
|
207
|
+
/*
|
208
|
+
* Short Description:
|
209
|
+
*
|
210
|
+
* Given an Expr in target_domain, check if its inputs are in replay_map. If so,
|
211
|
+
* check if the mapped domain in replay_map are recorded to be transformed by an
|
212
|
+
* "equivelent" operation in replay_domain's history. If so, forward the
|
213
|
+
* operation and update replay_map to map the outputs of the expressions across
|
214
|
+
* target_domain and reference_domain.
|
215
|
+
*
|
216
|
+
* Long Description:
|
217
|
+
*
|
218
|
+
* replay_map maps root IDs in the history of target_domain to root IDs in the
|
219
|
+
* history replay_domain. PasC and CasP is just a convenient mechanism to have
|
220
|
+
* BestEffortReplay make this base root mapping.
|
221
|
+
*
|
222
|
+
* Note: See ForwardingInfo in transform_iter.cpp for more information on
|
223
|
+
* forwarding.
|
224
|
+
*
|
225
|
+
* Motivation:
|
226
|
+
*
|
227
|
+
* Consider the following program:
|
228
|
+
*
|
229
|
+
* T1[I0, R1] = T0[I0, I1]
|
230
|
+
* T2[I0] = T1[I0, R1i]
|
231
|
+
*
|
232
|
+
* T1->split(1, factor)
|
233
|
+
* T1->rFactor(2)
|
234
|
+
*
|
235
|
+
* T4[I0, R1orf, I1irf] = T0[I0, I1]
|
236
|
+
* T1[I0, R1i] = T4[I0, R1orf, I1irf]
|
237
|
+
* T2[I0] = T1[I0, R1i]
|
238
|
+
*
|
239
|
+
* There's an issue when we want to replay T4 to have transformations similar to
|
240
|
+
* those on T0. Primarily T0's "rfactor" domain has a strict match requirement
|
241
|
+
* on T4's root domain. If transformations on top of T0 don't match T4's
|
242
|
+
* transformations (from T4's root domain to T4's logical domain), T4 cannot be
|
243
|
+
* replayed like T0 on those domains as they would generate incorrect code in
|
244
|
+
* the system today.
|
245
|
+
*
|
246
|
+
* Side note potentially for the future: In theory we could actually disconnect
|
247
|
+
* T4's view from it's logical domain. This would allow logical domains to be
|
248
|
+
* "reversible". The way this would have to be implemented is that there just
|
249
|
+
* needs to be a path of transformations from a tensors loop domains, to its
|
250
|
+
* root domains, and its logical domain. It shouldn't really matter if those
|
251
|
+
* connections are forward or backward through transformations. The only thing
|
252
|
+
* that really matters is they're connected. This is left for future work as it
|
253
|
+
* could have significant impact on other parts of the system like how loops are
|
254
|
+
* generated and expressions are sorted.
|
255
|
+
*
|
256
|
+
* T0 doesn't have this constraint if we want to replay T0 as T4, so this is
|
257
|
+
* directional based on rfactor. Therefore to replay T0 transformations onto T4
|
258
|
+
* we want to make sure those transformations are consistent with T4 (between
|
259
|
+
* T4's root and logical domain). Best Effort Replay does not actually add any
|
260
|
+
* transformations to the tensors provided. However, it will provide information
|
261
|
+
* to determine producers's transformations are consistent with consumers
|
262
|
+
* transformations (or the other way around). Best Effort Replay will return
|
263
|
+
* discovered mappings between tensors that it detects to be matching based on
|
264
|
+
* provided initial information (or just through p2c/c2p root domain mappings).
|
265
|
+
*
|
266
|
+
* Transformations have a concept of "permissiveness" used for broadcast and
|
267
|
+
* squeeze. For example:
|
268
|
+
*
|
269
|
+
* T1[I0, B1] = T0[I0]
|
270
|
+
* T2[I0, I1] = T1[I0, B1]
|
271
|
+
*
|
272
|
+
* We may want to replay T1 and T0 based on transformations on T2. These
|
273
|
+
* transformations may involve B1. We could even have:
|
274
|
+
*
|
275
|
+
* T2->merge(0, 1)->split(0, 128)
|
276
|
+
*
|
277
|
+
* resulting in:
|
278
|
+
*
|
279
|
+
* T2[(I0*I1)/128, 128]
|
280
|
+
*
|
281
|
+
* T0 doesn't have I1 so it can't technicaly be transformed in an exactly
|
282
|
+
* consistent way. However, it may still be desired to "inline" T0 into T1 and
|
283
|
+
* in result T1 into T2. It may further be desired to bind BIDx and TIDx to the
|
284
|
+
* two dimensions in the problem. This example doesn't "technically" result in
|
285
|
+
* thread to thread communication, but since our scope in mind is a shared
|
286
|
+
* global memory it results in duplicate reads. These duplicate reads are
|
287
|
+
* automatically cached in our memory hierarchy. So in a way there is implicit
|
288
|
+
* communication in that a memory location is read by multiple threads.
|
289
|
+
*
|
290
|
+
* This is where forwarding and permissiveness come into play. When we transform
|
291
|
+
* T1 with the first merge, we will mark the result I0*B1 of T1 to be
|
292
|
+
* "permissively" mapped to I0 of T0, so when we perform the split, we split
|
293
|
+
* T0's I0 dimension to I0/128 and 128. This is to help us mark inlining and
|
294
|
+
* paralellization across these dimensions so we can effectively reason about
|
295
|
+
* the "not full" dimension in T0. This is where the concept of forward map in
|
296
|
+
* BestEffortReplay comes in.
|
297
|
+
*
|
298
|
+
* Permissiveness can also be considered "symmetric" across broadcast and
|
299
|
+
* squeeze as they are similar operations, however broadcast and squeeze do have
|
300
|
+
* different implications since squeeze doesn't result in the implicit
|
301
|
+
* communication described in the previous paragraph. However, as far as
|
302
|
+
* forwarding is concerned they're symmetric. Indexing/parallelization has
|
303
|
+
* significant logic dedicated to broadcast resolutions (unlike squeeze).
|
304
|
+
*
|
305
|
+
* This class provides a mechanism to annalyze all of the above concepts. It
|
306
|
+
* can also run through transformations in target according to a manually
|
307
|
+
* specified IterDomain to IterDomain replay_map. If equal transformations
|
308
|
+
* already exist in replay_domain history, we will not redo those
|
309
|
+
* transformations, but instead update replay_map to reflect forwarding the
|
310
|
+
* existing transformations based on a notion of expresions being "equal" (input
|
311
|
+
* IterDomains mapped and transformation expression parameters matching, or the
|
312
|
+
* iter domain that doesn't match is in a forwarding map). The replay map is the
|
313
|
+
* "best effort" part of BestEffortReplay, it doesn't actually perform new
|
314
|
+
* transformations to enforce matching, it just detects existing matching
|
315
|
+
* transforms. However, we still include rfactor validation within.
|
316
|
+
*/
|
317
|
+
|
318
|
+
class BestEffortReplay {
|
319
|
+
private:
|
320
|
+
std::unordered_map<IterDomain*, IterDomain*> target2replay_id_map_;
|
321
|
+
std::unordered_map<IterDomain*, IterDomain*> replay_forward_id_map_;
|
322
|
+
std::unordered_map<IterDomain*, IterDomain*> target_forward_id_map_;
|
323
|
+
std::unordered_map<IterDomain*, size_t> loop_ids_;
|
324
|
+
std::vector<IterDomain*> forwarded_ids_;
|
325
|
+
std::unordered_map<IterDomain*, IterDomain*> skipped_resize_id_map_;
|
326
|
+
|
327
|
+
// Need to track which id's have been forwarded. Later will need to make sure
|
328
|
+
// loop nodes to produce "compliment" axes are properly tracked. i.e.
|
329
|
+
// T[i0, b1, b2, i3]
|
330
|
+
// -> T[i0, b1o, b1i, b2o, b2i, i3]
|
331
|
+
// -> T[i0*b1i*b2o, b1o, b2i, i3]
|
332
|
+
// -> T[i0*b1i*b2o*i3, b1o, b2i]
|
333
|
+
// If we forwarded i0 -> i0*b1i*b2o*i3, we need to know that b1o and b2i
|
334
|
+
// are loop nodes even though their split wasn't part of targets replay. These
|
335
|
+
// are important IterDomains to track for transformation replays as otherwise
|
336
|
+
// we could easily drop axes we need by accident
|
337
|
+
|
338
|
+
// Counter to make sure best effort replay loop_ids can be grabbed
|
339
|
+
// deterministicly, important to make sure replays are run to run
|
340
|
+
// deterministic.
|
341
|
+
size_t counter = 0;
|
342
|
+
|
343
|
+
// Determine if current replay will ignore swizzle ops.
|
344
|
+
// When not skipping swizzles, swizzle ops will have to be matched
|
345
|
+
// same way as split and merge to progress forward on the mapping.
|
346
|
+
//
|
347
|
+
// When skipping swizzles, mismatched swizzle ops will not stop matching
|
348
|
+
// further down the tensor domains but only the swizzle outputs will be on
|
349
|
+
// the target to replay map, since we only generate one-to-one maps in
|
350
|
+
// BestEffortReplay and the swizzle outputs is just picked as a convention
|
351
|
+
// for simpler and uniform mapping behavior. The swizzle op inputs will be
|
352
|
+
// added by the disjoint set passes when building the iterdomain graph.
|
353
|
+
//
|
354
|
+
// Example:
|
355
|
+
// Target:
|
356
|
+
// I0o, I0i = split I0
|
357
|
+
// Ix0o, Ix0i = swizzle I0o, I0i
|
358
|
+
// I02 = merge Ix0o, Ix0i
|
359
|
+
// Replay:
|
360
|
+
// I1o, I1i = split I1
|
361
|
+
// I12 = merge I1o, I1i
|
362
|
+
//
|
363
|
+
// BestEffortReplay **no** skip swizzle gives:
|
364
|
+
// {
|
365
|
+
// I0->I1,
|
366
|
+
// I0o->I1o,
|
367
|
+
// I0i->I1i,
|
368
|
+
// }
|
369
|
+
//
|
370
|
+
// BestEffortReplay skip swizzle gives:
|
371
|
+
// {
|
372
|
+
// I0->I1,
|
373
|
+
// Ix0o->I1o,
|
374
|
+
// Ix0i->I1i,
|
375
|
+
// I02->I12
|
376
|
+
// }
|
377
|
+
//
|
378
|
+
// TODO: Reevaluate swizzle and transform replays. We have some concepts on
|
379
|
+
// iter domain mapping we should formalize. It would be good to have these
|
380
|
+
// options accessible while specified in a consistent manner.
|
381
|
+
// https://github.com/ftxj/pytorch/pull/1#pullrequestreview-1210168522
|
382
|
+
bool skip_replay_swizzle_ = true;
|
383
|
+
bool skip_target_swizzle_ = true;
|
384
|
+
|
385
|
+
bool error_on_failure_ = true;
|
386
|
+
|
387
|
+
bool inReplayForwardMap(IterDomain* id) const {
|
388
|
+
return replay_forward_id_map_.find(id) != replay_forward_id_map_.end();
|
389
|
+
}
|
390
|
+
|
391
|
+
bool inTargetForwardMap(IterDomain* id) const {
|
392
|
+
return target_forward_id_map_.find(id) != target_forward_id_map_.end();
|
393
|
+
}
|
394
|
+
|
395
|
+
IterDomain* getReplayForwardedId(IterDomain* id) const {
|
396
|
+
auto forwarded_id_it = replay_forward_id_map_.find(id);
|
397
|
+
if (forwarded_id_it == replay_forward_id_map_.end()) {
|
398
|
+
return id;
|
399
|
+
} else {
|
400
|
+
return getReplayForwardedId(forwarded_id_it->second);
|
401
|
+
}
|
402
|
+
}
|
403
|
+
|
404
|
+
IterDomain* getTargetForwardedId(IterDomain* id) const {
|
405
|
+
auto forwarded_id_it = target_forward_id_map_.find(id);
|
406
|
+
if (forwarded_id_it == target_forward_id_map_.end()) {
|
407
|
+
return id;
|
408
|
+
} else {
|
409
|
+
return getTargetForwardedId(forwarded_id_it->second);
|
410
|
+
}
|
411
|
+
}
|
412
|
+
|
413
|
+
//! Adds complimenting IDs of forwarded IDs to the loop map
|
414
|
+
void addComplimentLeafIDs(
|
415
|
+
const std::unordered_map<IterDomain*, IterDomain*>& forwarding_map,
|
416
|
+
const std::unordered_map<IterDomain*, std::vector<IterDomain*>>&
|
417
|
+
compliment_map);
|
418
|
+
|
419
|
+
// Skip swizzle step to make sure both target and
|
420
|
+
// replay swizzles are skipped while the mapping
|
421
|
+
// makes progress. This makes sure that, for example
|
422
|
+
// different tensors can still be inlined despite
|
423
|
+
// different local swizzle patterns.
|
424
|
+
void skipSwizzles(
|
425
|
+
const std::unordered_map<IterDomain*, Expr*>& target_id2expr,
|
426
|
+
const std::unordered_map<IterDomain*, Expr*>& replay_id2expr);
|
427
|
+
|
428
|
+
// Skip resize in both target and replay domains
|
429
|
+
void skipResizes(
|
430
|
+
const std::vector<Expr*>& target_exprs,
|
431
|
+
const std::vector<Expr*>& replay_exprs);
|
432
|
+
|
433
|
+
public:
|
434
|
+
// When skip_resize is true, resize is ignored or in other words forwarded
|
435
|
+
BestEffortReplay(
|
436
|
+
const std::vector<IterDomain*>& replay_domain,
|
437
|
+
const std::vector<IterDomain*>& target_domain,
|
438
|
+
std::unordered_map<IterDomain*, IterDomain*> target2replay_map,
|
439
|
+
std::unordered_map<IterDomain*, IterDomain*> replay_forward_id_map = {},
|
440
|
+
std::unordered_map<IterDomain*, IterDomain*> target_forward_id_map = {},
|
441
|
+
bool skip_replay_swizzle = true,
|
442
|
+
bool skip_target_swizzle = true,
|
443
|
+
bool skip_resize = false,
|
444
|
+
bool error_on_failure = true);
|
445
|
+
|
446
|
+
// Return iter domain map from target_domain IDs to their "replayed"
|
447
|
+
// replay_domain IDs. If not in map, was not replayed.
|
448
|
+
const std::unordered_map<IterDomain*, IterDomain*>& getReplay() const {
|
449
|
+
return target2replay_id_map_;
|
450
|
+
}
|
451
|
+
|
452
|
+
// ids in replay that did not have matching transforms in target_domain
|
453
|
+
const std::unordered_map<IterDomain*, size_t>& getUnorderedLeafIDs() {
|
454
|
+
return loop_ids_;
|
455
|
+
}
|
456
|
+
|
457
|
+
// Returned ordered set of IDs in getUnorderedLeafIDs
|
458
|
+
std::vector<IterDomain*> getLeafIDs() {
|
459
|
+
std::set<std::pair<IterDomain*, size_t>, id_int_lt> ordered_set;
|
460
|
+
for (auto entry : loop_ids_) {
|
461
|
+
ordered_set.emplace(entry);
|
462
|
+
}
|
463
|
+
|
464
|
+
std::vector<IterDomain*> loop_vec_;
|
465
|
+
loop_vec_.resize(ordered_set.size());
|
466
|
+
std::transform(
|
467
|
+
ordered_set.begin(),
|
468
|
+
ordered_set.end(),
|
469
|
+
loop_vec_.begin(),
|
470
|
+
[](std::pair<IterDomain*, size_t> entry) { return entry.first; });
|
471
|
+
return loop_vec_;
|
472
|
+
}
|
473
|
+
|
474
|
+
// Get a disjoint sets representing the equivalence of IterDomains. The
|
475
|
+
// equivalence is defined by forwarding and replay. Two IterDomains are
|
476
|
+
// equivalent if:
|
477
|
+
// - They are mapped together through forwarding, or
|
478
|
+
// - They are mapped together through replay
|
479
|
+
// For example, if I have the following producer-consumer pair:
|
480
|
+
// T0[I0, I1]
|
481
|
+
// T1[(I0'*b1)*b2, I1'] = broadcast(T0)
|
482
|
+
// Then there will be two equivalent sets"
|
483
|
+
// - {I1, I1'}
|
484
|
+
// - {I0, I0', I0'*b1, (I0'*b1)*b2}
|
485
|
+
NVF_API DisjointSets<IterDomain*> getIterDomainEquivalence();
|
486
|
+
|
487
|
+
// Runs a best effort replay that ignores broadcast axes that appear in
|
488
|
+
// consumer that are not mapped to producer in logical_map.
|
489
|
+
//
|
490
|
+
// When skip_resize is true, resize is ignored or in other words forwarded
|
491
|
+
NVF_API static BestEffortReplay replayCasP(
|
492
|
+
const TensorView* consumer,
|
493
|
+
const TensorView* producer,
|
494
|
+
int64_t producer_compute_at_axis,
|
495
|
+
const LogicalDomainMap& logical_map,
|
496
|
+
bool skip_consumer_swizzle = true,
|
497
|
+
bool skip_producer_swizzle = true,
|
498
|
+
bool skip_resize = true);
|
499
|
+
|
500
|
+
// Runs a best effort replay that ignores broadcast axes that appear in
|
501
|
+
// consumer that are not mapped to producer in logical_map.
|
502
|
+
//
|
503
|
+
// When skip_resize is true, resize is ignored or in other words forwarded
|
504
|
+
NVF_API static BestEffortReplay replayPasC(
|
505
|
+
const TensorView* producer,
|
506
|
+
const TensorView* consumer,
|
507
|
+
int64_t consumer_compute_at_axis,
|
508
|
+
const LogicalDomainMap& logical_map,
|
509
|
+
bool skip_producer_swizzle = true,
|
510
|
+
bool skip_consumer_swizzle = true,
|
511
|
+
bool skip_resize = true);
|
512
|
+
|
513
|
+
// Find the first position i where td1[i] is not the same as td2[i]. "Same"
|
514
|
+
// means the DAG and input IDs to generate td1[i] and td2[i] are the same.
|
515
|
+
// td1 and td2 are assumed to have some matching iter domains, as this is a
|
516
|
+
// strict same-ness check.
|
517
|
+
static int64_t findFirstMismatchedID(
|
518
|
+
const TensorDomain* td1,
|
519
|
+
const TensorDomain* td2);
|
520
|
+
};
|
521
|
+
|
522
|
+
} // namespace nvfuser
|