nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,118 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
#include <exceptions.h>
|
10
|
+
|
11
|
+
#include <ir/all_nodes.h>
|
12
|
+
|
13
|
+
#include <vector>
|
14
|
+
|
15
|
+
namespace nvfuser {
|
16
|
+
|
17
|
+
//! Transform for-loop structure to handle misaligned addresses
|
18
|
+
//!
|
19
|
+
//! Sections of misaligned addresses are handled sequentially
|
20
|
+
//! while aligned addresses use vectorized memory accesses.
|
21
|
+
//!
|
22
|
+
//! ---------------------------------------------------------------------------
|
23
|
+
//! Before Misaligned Vectorization:
|
24
|
+
//!
|
25
|
+
//! Inputs: T0
|
26
|
+
//! Outputs: T3
|
27
|
+
//!
|
28
|
+
//! for(...) {
|
29
|
+
//! T1[vector_size];
|
30
|
+
//! for( i : vector_size ) {
|
31
|
+
//! T1[i] = T0[...]
|
32
|
+
//! }
|
33
|
+
//!
|
34
|
+
//! T2[vector_size];
|
35
|
+
//! for( i : vector_size ) {
|
36
|
+
//! T2[i] = unaryOp(T1[i])
|
37
|
+
//! }
|
38
|
+
//!
|
39
|
+
//! for( i : vector_size ) {
|
40
|
+
//! T3[...] = T2[i]
|
41
|
+
//! }
|
42
|
+
//! }
|
43
|
+
//!
|
44
|
+
//! ---------------------------------------------------------------------------
|
45
|
+
//! After Misaligned Vectorization:
|
46
|
+
//!
|
47
|
+
//! Inputs: T0
|
48
|
+
//! Outputs: T3
|
49
|
+
//!
|
50
|
+
//! for(...) {
|
51
|
+
//! T1[vector_size];
|
52
|
+
//! T2[vector_size];
|
53
|
+
//!
|
54
|
+
//! if (inline_predicate_except_last_root_domain) {
|
55
|
+
//! index_except_last_root_domain = ...
|
56
|
+
//! address = (int64_t) &T1[index_except_last_root_domain]
|
57
|
+
//!
|
58
|
+
//! offset_size = (address % vector_size_bytes) / data_type_size_bytes
|
59
|
+
//! shift_init = vector_size - offset_size
|
60
|
+
//! shift = (shift_init == vector_size) ? 0 : shift_init
|
61
|
+
//!
|
62
|
+
//! // size of the last root domain
|
63
|
+
//! extent = ...
|
64
|
+
//! remainder = (extent - shift) % vector_size
|
65
|
+
//!
|
66
|
+
//! last_root_domain_index = ...
|
67
|
+
//!
|
68
|
+
//! // Vectorize Section
|
69
|
+
//! if ( (last_root_domain_index + shift) < (extent - remainder) ) {
|
70
|
+
//! T1[0] = vectorize_load( T0[index + shift] );
|
71
|
+
//!
|
72
|
+
//! for( i : vector_size ) {
|
73
|
+
//! T2[i] = unaryOp(T1[i])
|
74
|
+
//! }
|
75
|
+
//!
|
76
|
+
//! T3[index + shift] = vectorize_store( T2[0] );
|
77
|
+
//! }
|
78
|
+
//!
|
79
|
+
//! // Initial Section
|
80
|
+
//! if ( last_root_domain_index == 0 ) {
|
81
|
+
//! for( i : shift ) {
|
82
|
+
//! T1[i] = T0[...]
|
83
|
+
//! }
|
84
|
+
//!
|
85
|
+
//! for( i : shift ) {
|
86
|
+
//! T2[i] = unaryOp(T1[i])
|
87
|
+
//! }
|
88
|
+
//!
|
89
|
+
//! for( i : shift ) {
|
90
|
+
//! T3[...] = T2[i]
|
91
|
+
//! }
|
92
|
+
//! }
|
93
|
+
//!
|
94
|
+
//! // Remainder Section
|
95
|
+
//! if ( (last_root_domain_index + shift) >= (extent - remainder) &&
|
96
|
+
//! (last_root_domain_index + shift) < extent) {
|
97
|
+
//!
|
98
|
+
//! for( i : remainder ) {
|
99
|
+
//! T1[i] = T0[index + shift]
|
100
|
+
//! }
|
101
|
+
//!
|
102
|
+
//! for( i : remainder ) {
|
103
|
+
//! T2[i] = unaryOp(T1[i])
|
104
|
+
//! }
|
105
|
+
//!
|
106
|
+
//! for( i : remainder ) {
|
107
|
+
//! T3[index + shift] = T2[i]
|
108
|
+
//! }
|
109
|
+
//! }
|
110
|
+
//! }
|
111
|
+
//! }
|
112
|
+
//!
|
113
|
+
std::vector<Expr*> processMisalignedVectorization(
|
114
|
+
const std::vector<Expr*>& exprs);
|
115
|
+
|
116
|
+
bool containsAnyDirectChildMisalignedVectorize(const ForLoop* fl);
|
117
|
+
|
118
|
+
} // namespace nvfuser
|
@@ -0,0 +1,23 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
#include <exceptions.h>
|
10
|
+
|
11
|
+
#include <ir/all_nodes.h>
|
12
|
+
#include <kernel_ir.h>
|
13
|
+
|
14
|
+
#include <vector>
|
15
|
+
|
16
|
+
namespace nvfuser {
|
17
|
+
|
18
|
+
//! Update predicates with valid bool conditionals
|
19
|
+
//!
|
20
|
+
std::vector<Expr*> generateConditionalFromPredicate(
|
21
|
+
const std::vector<Expr*>& exprs);
|
22
|
+
|
23
|
+
} // namespace nvfuser
|
@@ -0,0 +1,24 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <dispatch.h>
|
11
|
+
#include <fusion.h>
|
12
|
+
#include <ir/all_nodes.h>
|
13
|
+
|
14
|
+
namespace nvfuser {
|
15
|
+
|
16
|
+
// TensorViews are all based on symbolic sizes. When we first initialize them
|
17
|
+
// we don't know if they're inputs or outputs which would mean that they have
|
18
|
+
// runtime shapes. Intermediate tensors (those not going to global memory) do
|
19
|
+
// not have this information. Since we need to have the correct information in
|
20
|
+
// the kernel being fetched for shapes, we want to replace input and output
|
21
|
+
// tensors to reference the runtime structure containing sizes.
|
22
|
+
void replaceSymbolicSizes(Fusion*);
|
23
|
+
|
24
|
+
} // namespace nvfuser
|
@@ -0,0 +1,115 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <ir/all_nodes.h>
|
12
|
+
|
13
|
+
#include <list>
|
14
|
+
#include <unordered_map>
|
15
|
+
#include <unordered_set>
|
16
|
+
#include <utility>
|
17
|
+
#include <vector>
|
18
|
+
|
19
|
+
namespace nvfuser {
|
20
|
+
|
21
|
+
// Hoisting common subexpressions for scalar expressions, including indices,
|
22
|
+
// predicates, tensor factories, etc.
|
23
|
+
//
|
24
|
+
// Class CommonScalarMap is updated during the lowering as new scalar
|
25
|
+
// are inserted.
|
26
|
+
//
|
27
|
+
// Once all scalars are inserted to CommonScalarMap, allocations of the
|
28
|
+
// the hoisted scalars are inserted by allocateCommonScalars. Note
|
29
|
+
// that this assumes that the CUDA code generator does not inline a
|
30
|
+
// scalar Val with allocation (PR #1434).
|
31
|
+
|
32
|
+
class CommonScalarMap {
|
33
|
+
public:
|
34
|
+
//! For the given scalar, insert the subexpressions in its definition to the
|
35
|
+
//! loop that has minimum amount of computation. For example, if I have a loop
|
36
|
+
//! FOR i1
|
37
|
+
//! FOR i2
|
38
|
+
//! FOR i3
|
39
|
+
//! FOR i4
|
40
|
+
//! index = ((i1*1 + i2*2) + i3*3) + i4*4
|
41
|
+
//! and I want to hoist `index`. Then this function will try insert i1*1 to
|
42
|
+
//! common_scalar_map_[FOR i1], try insert i1*1 + i2*2 to
|
43
|
+
//! common_scalar_map_[FOR i2], try insert ((i1*1 + i2*2) + i3*3) to
|
44
|
+
//! common_scalar_map_[FOR i3], try insert ((i1*1 + i2*2) + i3*3) + i4*4 to
|
45
|
+
//! common_scalar_map_[FOR i4], Before insertion, this function recursively
|
46
|
+
//! uses reuseScalarIfAlreadyComputed to find existing
|
47
|
+
//! expressions/subexpressions in common_scalar_map_ that can be reused. If a
|
48
|
+
//! reuse oppportunity is found, then this function will modify the definition
|
49
|
+
//! of `value` to use the existing subexpression. This function returns the
|
50
|
+
//! modified value whose definition reuses other expressions in the list.
|
51
|
+
Val* hoistScalar(Val* value, const std::vector<ForLoop*>& loops);
|
52
|
+
|
53
|
+
//! common_scalar_map_ stores all seen indices in a given loop, however, we
|
54
|
+
//! don't want to create separate allocation for all of them. We are only
|
55
|
+
//! interested in allocating for the indices that is actually hoisted, or used
|
56
|
+
//! more than once. This method returns the Vals that will get its separate
|
57
|
+
//! allocation.
|
58
|
+
std::vector<Val*> getHoistedScalars(ForLoop* loop) const;
|
59
|
+
|
60
|
+
//! Initialize the common_scalar_map_ with lowered exprs. If some scalar is
|
61
|
+
//! already computed in these lowered exprs and is recomputed in indexing or
|
62
|
+
//! predicate math, then we should reuse these existing computation.
|
63
|
+
void initialize(const std::vector<Expr*> exprs);
|
64
|
+
|
65
|
+
private:
|
66
|
+
//! This is the underlying implementation of the public hoistScalar, with some
|
67
|
+
//! additional arguments and return values.
|
68
|
+
//! Returns (hoisted value, has tensor index dependency)
|
69
|
+
std::pair<Val*, bool> hoistScalarImpl(
|
70
|
+
Val* value,
|
71
|
+
const std::vector<ForLoop*>& loops,
|
72
|
+
std::vector<Val*>&
|
73
|
+
seen_subexprs, // Stores the subexpressions that has already been seen
|
74
|
+
// during the recursion. This is used to detect
|
75
|
+
// self-reuse. For example, if I have
|
76
|
+
// i3 = i1 * i2 + i1 * i2
|
77
|
+
// when visiting the second i1 * i2, I will have the
|
78
|
+
// first i1 * i2 in this vector, so that we know we can
|
79
|
+
// reuse that i1 * i2.
|
80
|
+
int64_t position, // if `value` is given to `hoistScalar` (i.e., is_give
|
81
|
+
// == true), then this is the position of the outer-most
|
82
|
+
// loop nest that contains all the dependencies of
|
83
|
+
// `value`. if `value` is a subexpression of the value
|
84
|
+
// given to `hoistScalar`, then this is the position of
|
85
|
+
// the outer-most loop nest that contains all the
|
86
|
+
// dependencies of its parent.
|
87
|
+
bool is_given = false // true for the given scalar from the public
|
88
|
+
// `hoistScalar`, false otherwise.
|
89
|
+
);
|
90
|
+
|
91
|
+
//! If there is already an expression in common_scalar_map_[loop] which is
|
92
|
+
//! sameAs `value`, then just return that expression. Otherwise, if there is a
|
93
|
+
//! subexpression of an existing expression sameAs `value`, then that
|
94
|
+
//! subexpression will be split out as a separate item in the mapped list, and
|
95
|
+
//! that subexpression will be returned. If nothing sameAs `value`, then
|
96
|
+
//! return nullptr.
|
97
|
+
Val* reuseScalarIfAlreadyComputed(Val* value, ForLoop* loop);
|
98
|
+
|
99
|
+
private:
|
100
|
+
//! Map to hold hoisted common indices. The order matters and indicates data
|
101
|
+
//! dependency. For example, my list might have [i1*4, i1*4+2, i1*4/16]
|
102
|
+
std::unordered_map<ForLoop*, std::list<Val*>> common_scalar_map_;
|
103
|
+
|
104
|
+
//! A set to identify that if a val is hoisted (an expression used in the
|
105
|
+
//! inner loop, but its value only depend on outer loop variables, so the
|
106
|
+
//! computation of this expression is hoisted to an outer loop) or reused (one
|
107
|
+
//! expression is used in multiple indices/predicates).
|
108
|
+
std::unordered_set<Val*> hoisted_or_reused_;
|
109
|
+
};
|
110
|
+
|
111
|
+
//! Insert allocations of hoisted indices. Must be called after
|
112
|
+
//! collecting all common indices.
|
113
|
+
std::vector<Expr*> allocateCommonScalars(const std::vector<Expr*>& exprs);
|
114
|
+
|
115
|
+
} // namespace nvfuser
|
@@ -0,0 +1,98 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
#include <exceptions.h>
|
10
|
+
|
11
|
+
#include <device_lower/analysis/thread_predicate.h>
|
12
|
+
#include <device_lower/utils.h>
|
13
|
+
#include <kernel_ir.h>
|
14
|
+
#include <kernel_ir_dispatch.h>
|
15
|
+
#include <logical_domain_map.h>
|
16
|
+
|
17
|
+
#include <bitset>
|
18
|
+
#include <unordered_map>
|
19
|
+
|
20
|
+
namespace nvfuser {
|
21
|
+
|
22
|
+
//! Unroll pass
|
23
|
+
//!
|
24
|
+
//! A bit deceptively: UnrollPass adds all predicates, so it needs to be run
|
25
|
+
//! even if we don't unroll any loops.
|
26
|
+
//!
|
27
|
+
//! Unrolling pass will get IR that looks something like:
|
28
|
+
//! for( i : I0o{ceil(I0/4)} ) {
|
29
|
+
//! for( j : I1o{ceil(I1/128)} ) {
|
30
|
+
//! for( k : I0i{4} )
|
31
|
+
//! for( l : I1i{128} )
|
32
|
+
//! T0[I0o{ceil(I0/4)}, I1o{ceil(I1/128)}, I0iU{4}, I1i{128}] = ...
|
33
|
+
//!
|
34
|
+
//! And it will return the following:
|
35
|
+
//! for( i : I0o{ceil(I0/4)} ) {
|
36
|
+
//! for( j : I1o{ceil(I1/128)} ) {
|
37
|
+
//!
|
38
|
+
//! if( i * 4 + 3 < I && j * 128 + 127 < J ){
|
39
|
+
//! for( k : I0i{4} )
|
40
|
+
//! for( l : I1i{128} )
|
41
|
+
//! T0[ ( i * 4 + k ) * J + j * 128 + l ] = ...
|
42
|
+
//! } else {
|
43
|
+
//! for( k : I0i{4} )
|
44
|
+
//! for( l : I1i{128} )
|
45
|
+
//! if( i * 4 + k < I && j * 128 + l < J)
|
46
|
+
//! T0[ ( i * 4 + k ) * J + j * 128 + l ] = ...
|
47
|
+
//! }
|
48
|
+
//!
|
49
|
+
//! }
|
50
|
+
//! }
|
51
|
+
//!
|
52
|
+
//! As can be seen it generates two sets of loops for I0i{4} and I1i{128}. The
|
53
|
+
//! first set is protected by a predicate that makes sure there's a full
|
54
|
+
//! internal tile we can iterate over. This way we remove the predicate nested
|
55
|
+
//! in the inner most loop. There's of course a second set of loops, which has a
|
56
|
+
//! predicate still in the inner most loop, making sure that we cover edges and
|
57
|
+
//! corners.
|
58
|
+
//!
|
59
|
+
class UnrollPass : kir::ExprMutator {
|
60
|
+
public:
|
61
|
+
// Take the incoming exprs and run loop unrolling, returning the new IR
|
62
|
+
static std::vector<Expr*> runPass(const std::vector<Expr*>& exprs);
|
63
|
+
|
64
|
+
static bool canOmitElseClause(ForLoop* fl);
|
65
|
+
|
66
|
+
private:
|
67
|
+
void registerReplace(Expr* reference, Expr* new_expr);
|
68
|
+
|
69
|
+
// Generate the for Expr replacement map
|
70
|
+
UnrollPass(const std::vector<Expr*>& exprs);
|
71
|
+
|
72
|
+
const std::unordered_map<Expr*, Expr*>& replacementMap() const {
|
73
|
+
return expr_replacement_map_;
|
74
|
+
}
|
75
|
+
|
76
|
+
using kir::ExprMutator::handle;
|
77
|
+
|
78
|
+
void handle(ForLoop* fl) final;
|
79
|
+
|
80
|
+
void dispatch(Expr* expr) final;
|
81
|
+
|
82
|
+
private:
|
83
|
+
// We will track which loops in the incoming IR will be replaced and by what
|
84
|
+
std::unordered_map<Expr*, Expr*> expr_replacement_map_;
|
85
|
+
|
86
|
+
// keep track if we're within an unrolled loop
|
87
|
+
bool look_for_unroll_ = true;
|
88
|
+
|
89
|
+
// Indicates if the currently visited expression is inside a
|
90
|
+
// unswitched path
|
91
|
+
bool unswitched_loop_ = false;
|
92
|
+
|
93
|
+
// As we generate inline predicates check if we actually generated a
|
94
|
+
// non-trivial one.
|
95
|
+
bool non_trivial_pred_found_ = false;
|
96
|
+
};
|
97
|
+
|
98
|
+
} // namespace nvfuser
|
@@ -0,0 +1,45 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <vector>
|
12
|
+
|
13
|
+
namespace nvfuser {
|
14
|
+
|
15
|
+
class Expr;
|
16
|
+
|
17
|
+
// Apply loop-invariant code hoisting to serial WelfordOps. For
|
18
|
+
// example, when the innermost loop looks like:
|
19
|
+
//
|
20
|
+
// for () {
|
21
|
+
// welfordCombine(...);
|
22
|
+
/// }
|
23
|
+
//
|
24
|
+
// The count input should be invariant when the loop is not a
|
25
|
+
// reduction loop, and then this can be transformed as:
|
26
|
+
//
|
27
|
+
// After:
|
28
|
+
// nvfuser_index_t new_count = outN()[0] + 1;
|
29
|
+
// float reciprocal = 1 / new_count;
|
30
|
+
// for () {
|
31
|
+
// welfordVectorized(..., new_count, reciprocal);
|
32
|
+
// }
|
33
|
+
//
|
34
|
+
// Here, welfordVectorized does not need to compute the division. This
|
35
|
+
// transformation can be applied when the innermost loop is a
|
36
|
+
// non-reduction domain and there's no predicate depending on the loop
|
37
|
+
// index of the innermost loop. A common case is when the read of a
|
38
|
+
// fusion input is vectorized and that input is fed to an outer
|
39
|
+
// welford reduction. In this case, the innermost domain is a
|
40
|
+
// non-reduction domain and is vectorized, so the prediacte should not
|
41
|
+
// have any dependency with the loop index, which enables the code
|
42
|
+
// moition as the above.
|
43
|
+
std::vector<Expr*> vectorizeWelford(const std::vector<Expr*>& exprs);
|
44
|
+
|
45
|
+
} // namespace nvfuser
|
@@ -0,0 +1,23 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <kernel_ir.h>
|
12
|
+
|
13
|
+
namespace nvfuser {
|
14
|
+
|
15
|
+
struct WarpPaddedParallelInfo {
|
16
|
+
bool is_tidx_padded = false;
|
17
|
+
bool is_tidx_single_warp = false;
|
18
|
+
bool has_warp_reduction = false;
|
19
|
+
};
|
20
|
+
|
21
|
+
std::vector<Expr*> fuseWarpReduce(const std::vector<Expr*> exprs);
|
22
|
+
|
23
|
+
} // namespace nvfuser
|