nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,208 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <device_lower/analysis/trivial_broadcast.h>
|
11
|
+
#include <id_model/id_model.h>
|
12
|
+
#include <ir/base_nodes.h>
|
13
|
+
#include <ir/interface_nodes.h>
|
14
|
+
#include <options.h>
|
15
|
+
#include <type.h>
|
16
|
+
|
17
|
+
// Just for PredicateInfo. Should be moved to its own header file
|
18
|
+
#include <index_compute.h>
|
19
|
+
|
20
|
+
#include <unordered_map>
|
21
|
+
|
22
|
+
namespace nvfuser {
|
23
|
+
|
24
|
+
struct IndexingInfo {
|
25
|
+
std::vector<IterDomain*> loop_domains;
|
26
|
+
// Indexing traversal path from loop domains
|
27
|
+
ExprPath<ExprGroup> traversal_path;
|
28
|
+
// Index mappings of ID groups along the traversal path
|
29
|
+
std::unordered_map<ValGroup, Val*> index_map;
|
30
|
+
// Mappings from ID groups to dependent loop groups
|
31
|
+
std::unordered_map<ValGroup, ValGroups> loop_group_dependencies;
|
32
|
+
};
|
33
|
+
|
34
|
+
struct IndexingAllocationInfo {
|
35
|
+
std::vector<IterDomain*> domains;
|
36
|
+
std::vector<Val*> strides;
|
37
|
+
std::vector<bool> contiguity;
|
38
|
+
};
|
39
|
+
|
40
|
+
// The basic algorithm of indexing is:
|
41
|
+
//
|
42
|
+
// 1. Find the loop domains
|
43
|
+
// 2. Find the allocation domains
|
44
|
+
// 3. Find the path from the loop domains to the allocation domains
|
45
|
+
// 4. Set the initial index vals for the loop domains
|
46
|
+
// 5. Propagate the initial indices of the loop domains to the allocation
|
47
|
+
// domains
|
48
|
+
//
|
49
|
+
// The indexing traversal is done on the AlmostExact graph augmented
|
50
|
+
// with the loop promotion map since both the loop and allocations
|
51
|
+
// domains may be promoted.
|
52
|
+
class TensorIndexer {
|
53
|
+
public:
|
54
|
+
// Using non-const references of IdModel because traversalGraph() returns a
|
55
|
+
// non-const reference
|
56
|
+
TensorIndexer(IdModel& id_model);
|
57
|
+
|
58
|
+
bool isContigIndexingEnabled() const {
|
59
|
+
return !isOptionDisabled(DisableOption::ContigIndexing);
|
60
|
+
}
|
61
|
+
|
62
|
+
// Get a linear index of a given tensor appearing in a given expr, either
|
63
|
+
// as a consumer or a producer. The predicate indexing will have a
|
64
|
+
// separate interface.
|
65
|
+
//
|
66
|
+
// The actual for-loops are required for handling circular buffering
|
67
|
+
Val* getLinearIndex(
|
68
|
+
TensorView* tv,
|
69
|
+
const Expr* expr,
|
70
|
+
const std::vector<ForLoop*>& loops) const;
|
71
|
+
|
72
|
+
// Get the index of a loop domain.
|
73
|
+
Val* getLoopIndex(IterDomain* loop_id, const std::vector<ForLoop*>& for_loops)
|
74
|
+
const;
|
75
|
+
|
76
|
+
// Get the index of the given ID groups
|
77
|
+
std::vector<Val*> getIndexFor(
|
78
|
+
const Expr* expr,
|
79
|
+
bool as_consumer,
|
80
|
+
const std::vector<IterDomain*>& index_ids,
|
81
|
+
const std::vector<ForLoop*>& loops) const;
|
82
|
+
|
83
|
+
// Get the contig indices of the given ID groups with their strides
|
84
|
+
std::pair<std::vector<Val*>, std::vector<Val*>> getContigIndexFor(
|
85
|
+
const Expr* expr,
|
86
|
+
bool as_consumer,
|
87
|
+
const IndexingAllocationInfo& alloc_info,
|
88
|
+
const std::vector<ForLoop*>& loops) const;
|
89
|
+
|
90
|
+
// The AlmostExact graph is used since size-1 splits and merges
|
91
|
+
// should not affect actual index exprs.
|
92
|
+
// Returns non-const reference because indexing may create new domains and
|
93
|
+
// need to update the graph.
|
94
|
+
|
95
|
+
static IdMappingMode traversalGraphType() {
|
96
|
+
return IdMappingMode::ALMOSTEXACT;
|
97
|
+
}
|
98
|
+
|
99
|
+
ValGraph& traversalGraph() const {
|
100
|
+
return id_model_.idGraph(traversalGraphType());
|
101
|
+
}
|
102
|
+
|
103
|
+
// Traverse exprs and set allocation info for each tensor
|
104
|
+
void setupAllocationDomains(const std::vector<Expr*>& exprs);
|
105
|
+
|
106
|
+
// Get the list of predicates of a given tensor appearing in a given
|
107
|
+
// expr as a consumer. Each predicate corresponds to a domain of the
|
108
|
+
// tensor, which is by default one of the logical domains but can be
|
109
|
+
// an intermediate domain with contiguous indexing.
|
110
|
+
//
|
111
|
+
// An optional ForLoop parameter specifies a loop that is either
|
112
|
+
// unswitched/unrolled or vectorized, both of which are handled by
|
113
|
+
// UnswitchPredicate. For normal inline predicates, the parameter
|
114
|
+
// should be nullptr.
|
115
|
+
std::vector<PredicateInfo> getPredicates(
|
116
|
+
TensorView* tv,
|
117
|
+
const Expr* expr,
|
118
|
+
const std::vector<ForLoop*>& for_loops,
|
119
|
+
ForLoop* unswitched_loop = nullptr) const;
|
120
|
+
|
121
|
+
// Get the indexing traversal path for indexing a given list of IDs
|
122
|
+
// for a given expr
|
123
|
+
ExprPath<ExprGroup> getIndexingPath(
|
124
|
+
const Expr* expr,
|
125
|
+
const std::vector<IterDomain*>& index_ids) const;
|
126
|
+
|
127
|
+
private:
|
128
|
+
// Build a map of loop groups to their index Vals. See the comment
|
129
|
+
// on loop_index_map_.
|
130
|
+
void buildLoopIndexMap();
|
131
|
+
|
132
|
+
const IndexingAllocationInfo& getIndexingAllocationInfo(
|
133
|
+
TensorView* tv) const {
|
134
|
+
auto it = alloc_info_.find(tv);
|
135
|
+
NVF_ERROR(
|
136
|
+
it != alloc_info_.end(),
|
137
|
+
"No allocation info found for ",
|
138
|
+
tv->toString());
|
139
|
+
return it->second;
|
140
|
+
}
|
141
|
+
|
142
|
+
// Returns the index map as well as its traversal path of given
|
143
|
+
// index domains appearing in a given expr. Used by
|
144
|
+
// getIndexFor.
|
145
|
+
IndexingInfo computeIndex(
|
146
|
+
const Expr* expr,
|
147
|
+
const std::vector<IterDomain*>& index_ids,
|
148
|
+
const std::vector<ForLoop*>& for_loops) const;
|
149
|
+
|
150
|
+
// Propagate the loop indices of a given list of loop domains to the
|
151
|
+
// traversal graph (i.e., the AlmostExact graph). Uses the loop
|
152
|
+
// index map, which is built for the Loop graph.
|
153
|
+
std::unordered_map<ValGroup, Val*> getInitialIndexMap(
|
154
|
+
const std::vector<IterDomain*>& loop_domains,
|
155
|
+
const std::vector<ForLoop*>& for_loops) const;
|
156
|
+
|
157
|
+
// Get the loop domains of a given expr. Currently, they're always
|
158
|
+
// the loop domains of a consumer tensor, but in the future this
|
159
|
+
// function may return the loop domains of a producer for
|
160
|
+
// producer-based indexing.
|
161
|
+
std::vector<IterDomain*> getLoopDomains(const Expr* expr) const;
|
162
|
+
|
163
|
+
// For a given indexng traversal path toward allocation_domains,
|
164
|
+
// return the contiguous domains and their strides that can provide
|
165
|
+
// equivalent indexing results.
|
166
|
+
//
|
167
|
+
// Currently, only backward traversal is supported.
|
168
|
+
std::pair<std::vector<ValGroup>, std::vector<Val*>> getContigDomainsAndStrides(
|
169
|
+
const IndexingAllocationInfo& alloc_info,
|
170
|
+
const ExprPath<ExprGroup>& traversal_path) const;
|
171
|
+
|
172
|
+
// Get a replace map for tensor indexing. Examples include replacing
|
173
|
+
// an index of a vectorized loop with zero.
|
174
|
+
//
|
175
|
+
// This replacement map is used to replace a tensor index after an
|
176
|
+
// index map is generated. Since replacment is only done for loop
|
177
|
+
// domains, this could be done as part of getInitialIndexMap. One
|
178
|
+
// reason that we might want to first generate an index and do some
|
179
|
+
// replacements, rather than using final index vals to build the
|
180
|
+
// index map, is that one index map could be used for multiple
|
181
|
+
// indices. For normal tensor indexing, this may not matter, but for
|
182
|
+
// predicate indexing, it needs to generate both start and stop
|
183
|
+
// predicates, and one index map would be sufficient for both
|
184
|
+
// indices by using different replacement maps.
|
185
|
+
std::unordered_map<Val*, Val*> getIndexReplacementMap(
|
186
|
+
const Expr* expr,
|
187
|
+
bool as_consumer,
|
188
|
+
const std::vector<IterDomain*>& loop_domains,
|
189
|
+
const std::vector<ForLoop*>& for_loops,
|
190
|
+
const std::unordered_map<ValGroup, Val*>& index_map) const;
|
191
|
+
|
192
|
+
private:
|
193
|
+
// Using non-const references of IdModel because traversalGraph() returns a
|
194
|
+
// non-const reference
|
195
|
+
IdModel& id_model_;
|
196
|
+
|
197
|
+
// Mappings from loop groups to their indices. Serial loops will
|
198
|
+
// be mapped a unique loop index Val. Parallel loops will be mapped
|
199
|
+
// to NamedScalar such as "threadIdx.x". This map needs to be built
|
200
|
+
// once and can be reused for different tensors.
|
201
|
+
std::unordered_map<ValGroup, Val*> loop_index_map_;
|
202
|
+
|
203
|
+
// Allocation info for each tensor. Must be filled before computing
|
204
|
+
// the index of each tensor
|
205
|
+
std::unordered_map<TensorView*, IndexingAllocationInfo> alloc_info_;
|
206
|
+
};
|
207
|
+
|
208
|
+
} // namespace nvfuser
|
@@ -0,0 +1,72 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <device_lower/utils.h>
|
11
|
+
#include <val_graph_visitor.h>
|
12
|
+
|
13
|
+
namespace nvfuser {
|
14
|
+
|
15
|
+
// BFS traversal for indexing. The only difference with the default
|
16
|
+
// ValGraphBFS is that for indexing there must be a special care taken
|
17
|
+
// when resize is involved since there can be multiple paths and
|
18
|
+
// there's only one correct path. Specifically, any resize expr group
|
19
|
+
// node must appear in the root-logical path of the consumer
|
20
|
+
// tensor. Otherwise, resize nodes should be ignored. See
|
21
|
+
// IndexingTest.ResizePath for a concrete example.
|
22
|
+
class IndexingTraversal : public ValGraphBFS {
|
23
|
+
public:
|
24
|
+
IndexingTraversal(
|
25
|
+
const Expr* expr,
|
26
|
+
const ValGraph& graph,
|
27
|
+
std::vector<NodeType> from_groups,
|
28
|
+
std::vector<NodeType> to_groups,
|
29
|
+
bool require_all_to_visited = true);
|
30
|
+
|
31
|
+
~IndexingTraversal() override = default;
|
32
|
+
|
33
|
+
static ExprPath getExprsBetween(
|
34
|
+
const Expr* expr,
|
35
|
+
const ValGraph& graph,
|
36
|
+
const std::vector<IterDomain*>& from_domains,
|
37
|
+
const std::vector<IterDomain*>& to_domains);
|
38
|
+
|
39
|
+
static std::optional<ExprPath> getExprsBetweenForResize(
|
40
|
+
const Expr* expr,
|
41
|
+
const ValGraph& graph,
|
42
|
+
const std::vector<IterDomain*>& from_domains,
|
43
|
+
const std::vector<IterDomain*>& to_domains);
|
44
|
+
|
45
|
+
using ValGraphBFS::isVisited;
|
46
|
+
|
47
|
+
bool excludeFromTraversal(const NodeType& group) const override {
|
48
|
+
if (const ExprGroup* eg = std::get_if<ExprGroup>(&group)) {
|
49
|
+
if ((*eg)->empty()) {
|
50
|
+
return false;
|
51
|
+
}
|
52
|
+
auto resize = dynamic_cast<Resize*>((*eg)->front());
|
53
|
+
if (resize == nullptr) {
|
54
|
+
return false;
|
55
|
+
}
|
56
|
+
if (std::none_of((*eg)->begin(), (*eg)->end(), [&](Expr* expr) -> bool {
|
57
|
+
return resize_paths_.find(expr->as<Resize>()) !=
|
58
|
+
resize_paths_.end();
|
59
|
+
})) {
|
60
|
+
// This resize node should never be traversed for indexing of
|
61
|
+
// the given expr
|
62
|
+
return true;
|
63
|
+
}
|
64
|
+
}
|
65
|
+
return false;
|
66
|
+
}
|
67
|
+
|
68
|
+
private:
|
69
|
+
std::unordered_set<Resize*> resize_paths_;
|
70
|
+
};
|
71
|
+
|
72
|
+
} // namespace nvfuser
|
@@ -0,0 +1,62 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <device_lower/analysis/index_compute.h>
|
11
|
+
#include <device_lower/lower2device.h>
|
12
|
+
#include <device_lower/utils.h>
|
13
|
+
#include <id_model/id_model.h>
|
14
|
+
#include <id_model/to_string.h>
|
15
|
+
|
16
|
+
namespace nvfuser {
|
17
|
+
namespace indexing_utils {
|
18
|
+
|
19
|
+
// Get a matching ForLoop for a given loop iter domain. There may not
|
20
|
+
// be such a loop if this loop-nest is for initializing a reduction
|
21
|
+
// buffer.
|
22
|
+
inline ForLoop* getForLoop(
|
23
|
+
IterDomain* loop_id,
|
24
|
+
const std::vector<ForLoop*>& for_loops,
|
25
|
+
const ValGraph& loop_graph) {
|
26
|
+
auto it = std::find_if(
|
27
|
+
for_loops.begin(), for_loops.end(), [&](ForLoop* for_loop) -> bool {
|
28
|
+
IterDomain* for_loop_id = for_loop->iter_domain();
|
29
|
+
return loop_graph.disjointValSets().strictAreMapped(
|
30
|
+
loop_id, for_loop_id);
|
31
|
+
});
|
32
|
+
if (it != for_loops.end()) {
|
33
|
+
return *it;
|
34
|
+
} else {
|
35
|
+
return nullptr;
|
36
|
+
}
|
37
|
+
}
|
38
|
+
|
39
|
+
// Check if unswitching a given for-loop actually matters. For example,
|
40
|
+
// if a loop is parallelized, unswitching doesn't mean anything as we
|
41
|
+
// don't unswitch threading dimensions, e.g., "threadIdx.x + ... < N"
|
42
|
+
// is generated rather than "blockDim.x + ... < N".
|
43
|
+
inline bool isEffectiveUnswitchLoop(ForLoop* fl) {
|
44
|
+
// Threaded domain is not unswitched
|
45
|
+
if (fl->iter_domain()->isThread() || fl->iter_domain()->isDeviceDim()) {
|
46
|
+
return false;
|
47
|
+
}
|
48
|
+
|
49
|
+
// If it's vectorized, it must be true that any of the iteration
|
50
|
+
// values can be used to generate the predicates of the tensor, so
|
51
|
+
// unswitching has no effect. Same for loops that are known to be
|
52
|
+
// safe to just predicate at the end.
|
53
|
+
if (fl->iter_domain()->getParallelType() == ParallelType::Vectorize ||
|
54
|
+
lower_utils::predicateAtEnd(fl)) {
|
55
|
+
return false;
|
56
|
+
}
|
57
|
+
|
58
|
+
return true;
|
59
|
+
}
|
60
|
+
|
61
|
+
} // namespace indexing_utils
|
62
|
+
} // namespace nvfuser
|
@@ -0,0 +1,180 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <val_graph.h>
|
11
|
+
|
12
|
+
namespace nvfuser {
|
13
|
+
|
14
|
+
class IdModel;
|
15
|
+
struct StatefulInliningInfo;
|
16
|
+
|
17
|
+
// Callback interface for LoopPromotionMapBuilder. Allow exposing the
|
18
|
+
// temporary maps for testing and debugging
|
19
|
+
class LoopPromotionMapBuilderCallback {
|
20
|
+
public:
|
21
|
+
virtual ~LoopPromotionMapBuilderCallback() = default;
|
22
|
+
|
23
|
+
// Called after Step 1 with the root resolution map and the
|
24
|
+
// corresponding IEL graph
|
25
|
+
virtual void postStep1(
|
26
|
+
const std::unordered_map<ValGroup, IterDomain*>& iel_root_resolution_map,
|
27
|
+
const ValGraph& iel_graph) {}
|
28
|
+
// Called after Step 2 with the IEL promotion map and the
|
29
|
+
// corresponding IEL graph
|
30
|
+
virtual void postStep2(
|
31
|
+
const std::unordered_map<ValGroup, IterDomain*>& iel_promotion_map,
|
32
|
+
const ValGraph& iel_graph) {}
|
33
|
+
// Called after Step 3 with the loop promotion map
|
34
|
+
virtual void postStep3(
|
35
|
+
const std::unordered_map<ValGroup, IterDomain*>& loop_promotion_map) {}
|
36
|
+
// Called after Step 4 with the IEL promotion map and the
|
37
|
+
// corresponding IEL graph
|
38
|
+
virtual void postStep4(
|
39
|
+
const std::unordered_map<ValGroup, IterDomain*>& iel_promotion_map,
|
40
|
+
const ValGraph& iel_graph) {}
|
41
|
+
// Called after Step 3 with the final loop promotion map
|
42
|
+
virtual void postStep5(
|
43
|
+
const std::unordered_map<ValGroup, IterDomain*>& loop_promotion_map) {}
|
44
|
+
};
|
45
|
+
|
46
|
+
class LoopPromotionMapBuilder {
|
47
|
+
public:
|
48
|
+
// Build a map of loop groups to IterDomains that represent actual
|
49
|
+
// loops. The map is built based on the broadcast resolution with
|
50
|
+
// root domains between inlined producer and consumer tensors.
|
51
|
+
//
|
52
|
+
// (For debugging only) When force_full_loop_promotion_analysis is
|
53
|
+
// true, it always performs the full loop promotion analysis even
|
54
|
+
// when it's possible to take a quicker shortcut.
|
55
|
+
static std::unordered_map<ValGroup, IterDomain*> get(
|
56
|
+
IdModel& id_model,
|
57
|
+
const StatefulInliningInfo& inlining_info,
|
58
|
+
LoopPromotionMapBuilderCallback* callback = nullptr,
|
59
|
+
bool force_full_loop_promotion_analysis = false);
|
60
|
+
|
61
|
+
private:
|
62
|
+
LoopPromotionMapBuilder(
|
63
|
+
IdModel& id_model,
|
64
|
+
const StatefulInliningInfo& inlining_info,
|
65
|
+
LoopPromotionMapBuilderCallback* callback = nullptr,
|
66
|
+
bool force_full_loop_promotion_analysis = false);
|
67
|
+
|
68
|
+
std::unordered_map<ValGroup, IterDomain*> build();
|
69
|
+
|
70
|
+
// Shortcut to build a map of promotion IDs without doing the full
|
71
|
+
// loop promotion analysis. Can only be used when the full analysis
|
72
|
+
// is not requierd.
|
73
|
+
std::unordered_map<ValGroup, IterDomain*> buildWithNoBroadcast();
|
74
|
+
|
75
|
+
ValGraph& idGraph(IdMappingMode mode);
|
76
|
+
const ValGraph& idGraph(IdMappingMode mode) const;
|
77
|
+
|
78
|
+
std::unordered_map<ValGroup, IterDomain*> buildInlineRootResolutionMap(
|
79
|
+
const ValGraph& iel_graph,
|
80
|
+
const StatefulInliningInfo& info) const;
|
81
|
+
|
82
|
+
// Helper function for building loop promotion map.
|
83
|
+
//
|
84
|
+
// Propagate promotion mappings from root IEL groups to intermediate
|
85
|
+
// and loop IEL groups by traversing IEL exprs. For each expr, if an
|
86
|
+
// input is promoted, the output needs to be promoted too. If
|
87
|
+
// there's already an equivalent expr that uses the promoted inputs,
|
88
|
+
// create a mapping from the outputs of the IEL expr to the outputs
|
89
|
+
// of the equivalent expr. We only consider exprs that are mapped
|
90
|
+
// in the loop graph as we are looking for domains that represent
|
91
|
+
// the actual loops of the input and output domains of the IEL
|
92
|
+
// expr. If no such expr is found, the IEL expr is replayed with the
|
93
|
+
// promoted inputs.
|
94
|
+
//
|
95
|
+
// This is used twice when building the promotion map. The first time
|
96
|
+
// it is used there's no loop graph promotion yet, so only the IEL
|
97
|
+
// promotions are propagated. In that case, loop_graph_promotion_map
|
98
|
+
// should be just empty.
|
99
|
+
//
|
100
|
+
// Propagation uses iel_promotion_map and
|
101
|
+
// loop_graph_promotion_map. If both are available for an IEL group,
|
102
|
+
// the former has the precedence. This is because when this function
|
103
|
+
// is used for step 4, the given iel_promotion_map starts as an
|
104
|
+
// empty map and gets populated during this propagation, so any
|
105
|
+
// mapping in the map is guaranteed to be the correct final mapping,
|
106
|
+
// whereas the loop graph may have invalid mappings for partially
|
107
|
+
// inlined domains.
|
108
|
+
void propagatePromotionsInIELGraph(
|
109
|
+
const ValGraph& iel_graph,
|
110
|
+
std::unordered_map<ValGroup, IterDomain*>& iel_promotion_map,
|
111
|
+
const ValGraph& loop_graph,
|
112
|
+
const std::unordered_map<ValGroup, IterDomain*>& loop_promotion_map);
|
113
|
+
|
114
|
+
// Same as the other propagatePromotionsInIELGraph but without loop
|
115
|
+
// graph map. This is used for step 2, where there's no loop
|
116
|
+
// graph map yet.
|
117
|
+
void propagatePromotionsInIELGraph(
|
118
|
+
const ValGraph& iel_graph,
|
119
|
+
std::unordered_map<ValGroup, IterDomain*>& iel_promotion_map);
|
120
|
+
|
121
|
+
// Given an IEL promotion map, identify the mapping of each loop
|
122
|
+
// group. The promotion must represent all the domains in each loop
|
123
|
+
// group. If a valid representative promotion is not found for a
|
124
|
+
// loop group, no mapping is added for the group.
|
125
|
+
std::unordered_map<ValGroup, IterDomain*> projectIELPromotionToLoopGraph(
|
126
|
+
const ValGraph& iel_graph,
|
127
|
+
const std::unordered_map<ValGroup, IterDomain*>& iel_promotion_map,
|
128
|
+
const ValGraph& loop_graph,
|
129
|
+
const StatefulInliningInfo& inlining_info) const;
|
130
|
+
|
131
|
+
// Find a promoted iter domain of a given loop group that covers all
|
132
|
+
// the exact groups representative of the resolved transformations
|
133
|
+
// within the loop group. Specifically, we examine each IEL group of
|
134
|
+
// the loop group, and if an IEL group has a promotion, we consider it as a
|
135
|
+
// candidate of the promotion of this loop group. If not, we include a
|
136
|
+
// domain of the IEL group as a candidate too. Once all candidates are
|
137
|
+
// obtained, we pick one that covers all the exact domains (cf. concrete
|
138
|
+
// domains in ComputeAtMap)
|
139
|
+
IterDomain* findPromotionOfLoopGroup(
|
140
|
+
const ValGroup& loop_group,
|
141
|
+
const ValGraph& iel_graph,
|
142
|
+
const std::unordered_map<ValGroup, IterDomain*>& iel_promotion_map,
|
143
|
+
const std::unordered_map<ValGroup, ValGroups>& exact_covered_ids,
|
144
|
+
const VectorOfUniqueEntries<IterDomain*>& terminal_loop_ids) const;
|
145
|
+
|
146
|
+
// Terminal loop ids are iteration domains in each loop group that:
|
147
|
+
// 1) Don't have an entry in p2c_ca_permissive_maps, which would mean a
|
148
|
+
// consumer TV's iter domain maps to this domain in a way that that domain
|
149
|
+
// is also in the same loop group
|
150
|
+
// 2) Don't have a direct IterDomain consumer within the group
|
151
|
+
VectorOfUniqueEntries<IterDomain*> computeTerminalLoopIds(
|
152
|
+
const StatefulInliningInfo& info) const;
|
153
|
+
|
154
|
+
// Given the Step-3 promotion results, returns only promotions of
|
155
|
+
// groups that are producers to partially inlined groups. Those
|
156
|
+
// partially inlined groups may not have correct promotions as of
|
157
|
+
// Step 3 and need another propagation pass.
|
158
|
+
std::unordered_map<ValGroup, IterDomain*>
|
159
|
+
getProducerPromotionsOfPartiallyInlinedGroups(
|
160
|
+
const std::unordered_map<ValGroup, IterDomain*>&
|
161
|
+
initial_loop_promotion_map,
|
162
|
+
const ValGraph& loop_graph) const;
|
163
|
+
|
164
|
+
// Basic consistency check of the given loop promotion map
|
165
|
+
void sanityCheckLoopPromotionMap(
|
166
|
+
const std::unordered_map<ValGroup, IterDomain*>& loop_promotion_map)
|
167
|
+
const;
|
168
|
+
|
169
|
+
private:
|
170
|
+
IdModel& id_model_;
|
171
|
+
const StatefulInliningInfo& inlining_info_;
|
172
|
+
LoopPromotionMapBuilderCallback* callback_ = nullptr;
|
173
|
+
|
174
|
+
// (For debugging only) When force_full_loop_promotion_analysis_ is
|
175
|
+
// true, it always performs the full loop promotion analysis even
|
176
|
+
// when it's possible to take a quicker shortcut.
|
177
|
+
bool force_full_loop_promotion_analysis_ = false;
|
178
|
+
};
|
179
|
+
|
180
|
+
} // namespace nvfuser
|
@@ -0,0 +1,104 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <device_lower/lower2device.h>
|
11
|
+
#include <device_lower/utils.h>
|
12
|
+
#include <id_model/id_model.h>
|
13
|
+
|
14
|
+
namespace nvfuser {
|
15
|
+
|
16
|
+
// Get the domains to predicate for a given tensor used as a consumer
|
17
|
+
// of a given expr.
|
18
|
+
std::vector<IterDomain*> getPredicateDomains(
|
19
|
+
TensorView* consumer_tv,
|
20
|
+
const Expr* expr);
|
21
|
+
|
22
|
+
// Get a replace map for predicate indexing of a given tensor appearing
|
23
|
+
// in a given loop-nest.
|
24
|
+
//
|
25
|
+
// The unswitched_loop parameter is an optional ForLoop that is used
|
26
|
+
// when this predicate is for an unswitched, unrolled or vectorized
|
27
|
+
// loop.
|
28
|
+
std::unordered_map<Val*, Val*> getPredicateIndexReplacementMap(
|
29
|
+
TensorView* tv,
|
30
|
+
const std::vector<ForLoop*>& for_loops,
|
31
|
+
const std::unordered_map<ValGroup, Val*>& index_map,
|
32
|
+
const ValGraph& traversal_graph,
|
33
|
+
const ExprPath<ExprGroup>& traversal_path,
|
34
|
+
const IdModel& id_model,
|
35
|
+
bool is_start_predicate,
|
36
|
+
ForLoop* unswitched_loop = nullptr);
|
37
|
+
|
38
|
+
// Check if a given ExprGroup is a split that needs an additional
|
39
|
+
// predicate due to its non-divisibility.
|
40
|
+
inline bool isNonDivisibleSplit(const ExprGroup& expr_group) {
|
41
|
+
if (!expr_group->front()->isA<Split>()) {
|
42
|
+
return false;
|
43
|
+
}
|
44
|
+
|
45
|
+
const auto& non_divisible_split_info =
|
46
|
+
GpuLower::current()->nonDivisibleSplitInfo();
|
47
|
+
|
48
|
+
std::vector<PredicateDomainInfo> pred_info_vec;
|
49
|
+
|
50
|
+
// The splitsToPredicate map is for each tensor. Here, it's assumed
|
51
|
+
// that if any tensor has a non-divisible split that's mapped with
|
52
|
+
// expr_group, it should be considered a non-divisible split. This
|
53
|
+
// may result in duplicate predicates, which should be removed by
|
54
|
+
// the expression simplifier.
|
55
|
+
//
|
56
|
+
// For example, suppose tv0 is a 1D tensor of size 16:
|
57
|
+
//
|
58
|
+
// auto tv1 = reshape(tv0, {16}, {2, 8});
|
59
|
+
// tv1->split(1, 3);
|
60
|
+
//
|
61
|
+
// propagate_transformation(to: tv0, from: tv1)
|
62
|
+
//
|
63
|
+
// Here, the split by 3 of tv1 is not included in its non-divisible
|
64
|
+
// split list even though it is indeed non-divisible. The reason is
|
65
|
+
// that the input to the non-divisible split, the inner logical
|
66
|
+
// domain of extent 8, is predicated anyway since it's a logical
|
67
|
+
// domain. Specifically, its predicate should consist of:
|
68
|
+
//
|
69
|
+
// - Predicate for the outer logical domain
|
70
|
+
// - Predicate for the inner logical domain
|
71
|
+
//
|
72
|
+
// However, for tv0, it is indeed included in the non-divisible
|
73
|
+
// split list since the domain of extent 8 is not part
|
74
|
+
// of its logical domain.
|
75
|
+
//
|
76
|
+
// - Predicate for the sole logical domain
|
77
|
+
// - Predicate for the non-divisible split
|
78
|
+
//
|
79
|
+
// This would mean that when generating a predicate for tv1, since
|
80
|
+
// the below check would find a mapping with the non-divisible split
|
81
|
+
// for tv0, the tv1 predicate would be:
|
82
|
+
//
|
83
|
+
// - Predicate for the outer logical domain
|
84
|
+
// - Predicate for the inner logical domain
|
85
|
+
// - Predicate for the non-divisible split
|
86
|
+
//
|
87
|
+
// Here, the last two predicates are redundant since both of them
|
88
|
+
// guard the index with respect to the domain of extent 8, which is
|
89
|
+
// redundant. This is a bit annonying but should have no actual
|
90
|
+
// impact as the redundancy should be removed by the expression
|
91
|
+
// simplifier.
|
92
|
+
for (const auto& [tv, splits] :
|
93
|
+
non_divisible_split_info.splitsToPredicate()) {
|
94
|
+
if (std::find_if(splits.begin(), splits.end(), [&](Split* split) {
|
95
|
+
return expr_group->has(split);
|
96
|
+
}) != splits.end()) {
|
97
|
+
return true;
|
98
|
+
}
|
99
|
+
}
|
100
|
+
|
101
|
+
return false;
|
102
|
+
}
|
103
|
+
|
104
|
+
} // namespace nvfuser
|
@@ -0,0 +1,54 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <val_graph.h>
|
11
|
+
|
12
|
+
namespace nvfuser {
|
13
|
+
|
14
|
+
// Choose an IterDomain from the ValGroup that is amenable to transforms.
|
15
|
+
// Specifically we prefer, in descending order:
|
16
|
+
// 1. Iteration domains
|
17
|
+
// 2. Broadcast domains
|
18
|
+
// 3. Reduction domains
|
19
|
+
IterDomain* representativeId(const ValGroup& vg);
|
20
|
+
|
21
|
+
// Given a ValGraph and two ValGroups g0 and g1 in this graph, if there is
|
22
|
+
// already a merge of g0 with g1 in graph, return the output ValGroup of that
|
23
|
+
// merge. Otherwise create an new ValGroup that is a merge of g0 and g1 in
|
24
|
+
// graph, and a new ExprGroup that is the definition of the new ValGroup.
|
25
|
+
// After the merge, g0 and g1 will remain valid pointers.
|
26
|
+
ValGroup merge(ValGraph* graph, const ValGroup& g0, const ValGroup& g1);
|
27
|
+
|
28
|
+
// Given a ValGraph and a ValGroup g in this graph, if there is already a split
|
29
|
+
// of g in graph with the same factor, then return the output ValGroups of that
|
30
|
+
// split. Otherwise create two new ValGroups that are a split of g in
|
31
|
+
// graph, and a new ExprGroup that is the definition of the new ValGroups.
|
32
|
+
// After the split, g will remain valid pointers.
|
33
|
+
std::pair<ValGroup, ValGroup> split(
|
34
|
+
ValGraph* graph,
|
35
|
+
const ValGroup& g,
|
36
|
+
Val* factor,
|
37
|
+
bool inner_split = true);
|
38
|
+
std::pair<ValGroup, ValGroup> split(
|
39
|
+
ValGraph* graph,
|
40
|
+
const ValGroup& g,
|
41
|
+
int64_t factor,
|
42
|
+
bool inner_split = true);
|
43
|
+
|
44
|
+
// Given a ValGraph and two ValGroups g0 and g1 in this graph, if there is
|
45
|
+
// already a swizzle of g0 with g1 in graph, return the output ValGroups of that
|
46
|
+
// swizzle. Otherwise create two new ValGroups that are a swizzle of g0 and g1
|
47
|
+
// in graph, and a new ExprGroup that is the definition of the new ValGroups.
|
48
|
+
std::pair<ValGroup, ValGroup> swizzle(
|
49
|
+
ValGraph* graph,
|
50
|
+
SwizzleType swizzle_type,
|
51
|
+
const ValGroup& g0,
|
52
|
+
const ValGroup& g1);
|
53
|
+
|
54
|
+
} // namespace nvfuser
|