nvfuser-cu121-torch25 0.2.25.dev20250201__cp310-cp310-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +20 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,297 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <ir/internal_nodes.h>
|
12
|
+
#include <scheduler/tools/maxinfo_propagator.h>
|
13
|
+
#include <visibility.h>
|
14
|
+
|
15
|
+
#include <algorithm>
|
16
|
+
#include <unordered_map>
|
17
|
+
#include <unordered_set>
|
18
|
+
#include <vector>
|
19
|
+
|
20
|
+
namespace nvfuser {
|
21
|
+
|
22
|
+
/*
|
23
|
+
* compute_at is a relative property between two TensorViews which marks at what
|
24
|
+
* iteration domain we're going to generate a tensor to be consumed by another.
|
25
|
+
* For example if we have: T2[I, J, K] = T1[I, J, K] * 2.0 and then we call
|
26
|
+
* T2.split(axis = 0, factor = ...): T2[Io, Ii, J, K] = T1[I, J, K] * 2.0 where
|
27
|
+
* Io is the outer axes from the split, and Ii is the inner axes from the split.
|
28
|
+
* then we call T1.compute_at(T2, axis=1) we would expect to have:
|
29
|
+
* T2[Io, Ii, J, K] = T1[Io, Ii, J, K] * 2.0
|
30
|
+
* which would produce the following loop nest structure:
|
31
|
+
*
|
32
|
+
* for(io : Io)
|
33
|
+
* for(ii : Ii)
|
34
|
+
* for(j : J)
|
35
|
+
* for(k : K)
|
36
|
+
* //produce T1:
|
37
|
+
* T1[io, ii, j, k] = ...
|
38
|
+
* for(ii : Ii)
|
39
|
+
* for(j : J)
|
40
|
+
* for(k : K)
|
41
|
+
* //consume T1, produce T2
|
42
|
+
* T2[io, ii, j, k] = T1[io, ii, j, k] * 2.0
|
43
|
+
*
|
44
|
+
* This file provides the replay function that allows us to construct T1's
|
45
|
+
* domain from T2 at a desired level (compute_at_axis) without modifying any
|
46
|
+
* unnecessary parts of the domain.
|
47
|
+
*
|
48
|
+
* EXAMPLES:
|
49
|
+
*
|
50
|
+
* ANOTHER ITER EXAMPLE:
|
51
|
+
* T2[I, J, K] = T1[I, J, K] * 2.0
|
52
|
+
* T2.split(axis = 0, factor = ...)
|
53
|
+
* T2[Io, Ii, J, K] = T1[I, J, K] * 2.0
|
54
|
+
* T2.split(axis = 2, factor = ...)
|
55
|
+
* T2[Io, Ii, Jo, Ji, K] = T1[I, J, K] * 2.0
|
56
|
+
* T1.compute_at(T2, axis=1)
|
57
|
+
* T2[Io, Ii, Jo, Ji, K] = T1[Io, Ii, J, K] * 2.0
|
58
|
+
*
|
59
|
+
* Note: compute_at axis:
|
60
|
+
* T2[ 0 Io, 1 Ii, 2 Jo, 3 Ji, 4 K 5 ] //5 is inline, 0 is at "root" which means
|
61
|
+
* completely separate loop nests.
|
62
|
+
*
|
63
|
+
* for(io : Io)
|
64
|
+
* for(ii : Ii)
|
65
|
+
* for(j : J)
|
66
|
+
* for(k : K)
|
67
|
+
* //produce T1, this is the view that replay generates:
|
68
|
+
* T1[io, ii, j, k] = ...
|
69
|
+
* for(ii : Ii)
|
70
|
+
* for(jo : Jo)
|
71
|
+
* for(ji : Ji)
|
72
|
+
* for(k : K)
|
73
|
+
* //consume T1, produce T2
|
74
|
+
* T2[io, ii, jo, ji, k] = T1[io, ii, jo, ji, k] * 2.0
|
75
|
+
* //consumer view on T1 will be produced at a later stage.
|
76
|
+
*
|
77
|
+
*
|
78
|
+
* SIMPLE REDUCTION EXAMPLE:
|
79
|
+
* T1[I, J, K] = ...
|
80
|
+
* T2[I, R, K] = T1[I, J, K] //.sum(axis = 1), we reduce on R/J to produce
|
81
|
+
* T2[I, K] T2.split(axis = 0, factor = ...) T2[Io, Ii, R, K] = T1[I, J, K]
|
82
|
+
* T1.compute_at(T2, axis=3)
|
83
|
+
* T2[Io, Ii, R, K] = T1[Io, Ii, J, K]
|
84
|
+
*
|
85
|
+
* for(io : Io)
|
86
|
+
* for(ii : Ii)
|
87
|
+
* for(k : K)
|
88
|
+
* T2[io, ii, k] = init
|
89
|
+
* for(r : R)
|
90
|
+
* for(k : K)
|
91
|
+
* //produce T1:
|
92
|
+
* T1[io, ii, r, k] = ...
|
93
|
+
* //consume T1 produce T2:
|
94
|
+
* T2[io, ii, k] += T1[io, ii, r, k]
|
95
|
+
*
|
96
|
+
*
|
97
|
+
* REDUCTION EXAMPLE RESULTING IN AN ERROR:
|
98
|
+
* T1[I, R, K] = ... //R is reduction domain, we reduce on R to produce T1[I,
|
99
|
+
* K] T2[I, K] = T1[I, K]
|
100
|
+
*
|
101
|
+
* for(i : I)
|
102
|
+
* for(k : K)
|
103
|
+
* T1[i, k] = init
|
104
|
+
* for(r : R)
|
105
|
+
* for(k : K)
|
106
|
+
* T1[i, k] += ...[i, r, k]
|
107
|
+
* for(i : I)
|
108
|
+
* for(k : K)
|
109
|
+
* T2[i, k] = T1[i, k]
|
110
|
+
*
|
111
|
+
* T1.compute_at(T2, axis=2)
|
112
|
+
* This should be an error, or a warning and changed to:
|
113
|
+
* T1.compute_at(T2, axis=1)
|
114
|
+
* The error is because the kernel would have to be:
|
115
|
+
*
|
116
|
+
* for(i : I)
|
117
|
+
* T1[i, k] = init
|
118
|
+
* for(r : R)
|
119
|
+
* for(k : K)
|
120
|
+
* T1[i, k] += ...[i, r, k]
|
121
|
+
* for(k : K)
|
122
|
+
* T2[i, k] = T1[i, k]
|
123
|
+
*
|
124
|
+
* Otherwise we would produce incorrect results.
|
125
|
+
*
|
126
|
+
*/
|
127
|
+
|
128
|
+
class TensorDomain;
|
129
|
+
class TensorView;
|
130
|
+
class LogicalDomainMap;
|
131
|
+
|
132
|
+
struct TransformReplayOptions {
|
133
|
+
// In theory, it makes more sense to have skip_target_swizzle = true by
|
134
|
+
// default because this is how we index into the producer and how we propagate
|
135
|
+
// transformations. However, we are in a very funny situation that:
|
136
|
+
// BestEffortReplay for swizzle is broken. For example, if we have a
|
137
|
+
// producer <=> consumer pair like:
|
138
|
+
// I1 I0
|
139
|
+
// / \ / |
|
140
|
+
// I1o I1i I0o I0i
|
141
|
+
// | | | |
|
142
|
+
// swizzle I1i swizzle I0i <=> I3 I2
|
143
|
+
// | | | |
|
144
|
+
// I1o' I1i I0o' I0i
|
145
|
+
// \ / \ /
|
146
|
+
// I1' I0'
|
147
|
+
// where I1o', I0o' = swizzle(I1o, I0o), we never really skipped swizzle to
|
148
|
+
// map I1' with I3 and I0' with I2. But even with this error, our swizzle
|
149
|
+
// indexing worked due to luck. So effectively we were doing
|
150
|
+
// skip_target_swizzle = false. But today, we can not make this `true` for
|
151
|
+
// vectorization validation and indexing, because of another bug in
|
152
|
+
// BestEffortReplay: swizzle skip should happen in an all-or-nothing fashion.
|
153
|
+
// We can not just skip X but not skip Y, but we are not implementing this
|
154
|
+
// skip like that. If we make it `true`, this will trigger some error in some
|
155
|
+
// schedule. So here, in order to avoid exposing one bug, we are more
|
156
|
+
// explicitly using a wrong behavior that we have been using because this
|
157
|
+
// wrong behavior has a better luck.
|
158
|
+
// For more info, see https://github.com/NVIDIA/Fuser/issues/554
|
159
|
+
bool skip_target_swizzle = false;
|
160
|
+
bool replay_swizzle = false;
|
161
|
+
bool replay_resize = false;
|
162
|
+
bool replay_allocation = false;
|
163
|
+
|
164
|
+
TransformReplayOptions& skipTargetSwizzle(bool value = true) {
|
165
|
+
skip_target_swizzle = value;
|
166
|
+
return *this;
|
167
|
+
}
|
168
|
+
|
169
|
+
TransformReplayOptions& replaySwizzle(bool value = true) {
|
170
|
+
replay_swizzle = value;
|
171
|
+
return *this;
|
172
|
+
}
|
173
|
+
|
174
|
+
TransformReplayOptions& replayResize(bool value = true) {
|
175
|
+
replay_resize = value;
|
176
|
+
return *this;
|
177
|
+
}
|
178
|
+
|
179
|
+
TransformReplayOptions& replayAllocation(bool value = true) {
|
180
|
+
replay_allocation = value;
|
181
|
+
return *this;
|
182
|
+
}
|
183
|
+
};
|
184
|
+
|
185
|
+
class NVF_API TransformReplay {
|
186
|
+
public:
|
187
|
+
// Replay producer as consumer, returns {producer, producer_compute_at_axis}.
|
188
|
+
//
|
189
|
+
// replay_resize indicates whether resize should be replayed or
|
190
|
+
// ignored. It is only replayed when replaying a producer for
|
191
|
+
// indexing.
|
192
|
+
// replay_allocation indicates whether to replace the producer's allocation
|
193
|
+
// domain with corresponding consumer's allocation domain. By default, we
|
194
|
+
// should preserve producer's current allocation domain, and if that
|
195
|
+
// allocation domain is inconsistent with the replay, an error will be raised.
|
196
|
+
// This option is used in cacheBefore, cacheAfter, and cacheFork
|
197
|
+
static std::pair<TensorDomain*, int64_t> replayPasC(
|
198
|
+
const TensorView* producer,
|
199
|
+
const TensorView* consumer,
|
200
|
+
int64_t consumer_compute_at_axis,
|
201
|
+
TransformReplayOptions opt = {});
|
202
|
+
static std::pair<TensorDomain*, int64_t> replayPasC(
|
203
|
+
const TensorView* producer,
|
204
|
+
const TensorView* consumer,
|
205
|
+
int64_t consumer_compute_at_axis,
|
206
|
+
const LogicalDomainMap& logical_map,
|
207
|
+
TransformReplayOptions opt = {});
|
208
|
+
|
209
|
+
// Replay producer as consumer, returns {replayed_consumer_domain,
|
210
|
+
// consumer_compute_at_axis}.
|
211
|
+
//
|
212
|
+
// Unlike replayPasC, it always ignores resize.
|
213
|
+
static std::pair<TensorDomain*, int64_t> replayCasP(
|
214
|
+
const TensorView* consumer,
|
215
|
+
const TensorView* producer,
|
216
|
+
int64_t producer_compute_at_axis,
|
217
|
+
TransformReplayOptions opt = {});
|
218
|
+
static std::pair<TensorDomain*, int64_t> replayCasP(
|
219
|
+
const TensorView* consumer,
|
220
|
+
const TensorView* producer,
|
221
|
+
int64_t producer_compute_at_axis,
|
222
|
+
const LogicalDomainMap& logical_map,
|
223
|
+
TransformReplayOptions opt = {});
|
224
|
+
|
225
|
+
// Self replay.
|
226
|
+
static TensorDomain* fullSelfReplay(
|
227
|
+
const TensorDomain* new_self_root,
|
228
|
+
const TensorDomain* self);
|
229
|
+
|
230
|
+
// Returns the loop position in producer that matches with `consumer_pos` in
|
231
|
+
// consumer. Returns -1 if matching is impossible. This function can be used
|
232
|
+
// to test if replay is needed for getting matching outer dims. This function
|
233
|
+
// should be consistent with `replayPasC`: if you pass the tensors just
|
234
|
+
// replayed by replayPasC as inputs, you should return exactly the same
|
235
|
+
// position as `replayPasC`. However, this function is more tolerant than
|
236
|
+
// fully matching `replayPasC`: if in the consumer, there are unmappable
|
237
|
+
// dimensions, these dimensions are just ignored.
|
238
|
+
//
|
239
|
+
// When skip_resize is true, mapping is done more permissively by
|
240
|
+
// skipping resize ops. For example, that is done when this is used
|
241
|
+
// by TransformPropagator, whereas it isn't when used for
|
242
|
+
// determining the inlining position by MaxPosCalculator as inlining
|
243
|
+
// isn't allowed with different extents.
|
244
|
+
static int64_t getMatchedLeafPosWithoutReplayPasC(
|
245
|
+
const TensorView* producer,
|
246
|
+
const TensorView* consumer,
|
247
|
+
int64_t consumer_pos,
|
248
|
+
bool skip_resize = false);
|
249
|
+
|
250
|
+
// Returns the loop position in consumer that matches with `producer_pos` in
|
251
|
+
// producer. Behavior similar to getMatchedLeafPosWithoutReplayPasC, except
|
252
|
+
// that we are also ignoring reductions in the producer.
|
253
|
+
//
|
254
|
+
// When skip_resize is true, mapping is done more permissively by
|
255
|
+
// skipping resize ops. For example, that is done when this is used
|
256
|
+
// by TransformPropagator, whereas it isn't when used for
|
257
|
+
// determining the inlining position by MaxPosCalculator as inlining
|
258
|
+
// isn't allowed with different extents.
|
259
|
+
static int64_t getMatchedLeafPosWithoutReplayCasP(
|
260
|
+
const TensorView* consumer,
|
261
|
+
const TensorView* producer,
|
262
|
+
int64_t producer_pos,
|
263
|
+
bool skip_resize = false);
|
264
|
+
|
265
|
+
// tests if two tensors has fully matching transformations
|
266
|
+
static bool fullSelfMatching(
|
267
|
+
const TensorView* replay,
|
268
|
+
const TensorView* target);
|
269
|
+
};
|
270
|
+
|
271
|
+
class NVF_API TransformPropagator
|
272
|
+
: public MaxLogicalDomainInfoSpanningTree::Propagator {
|
273
|
+
protected:
|
274
|
+
std::unordered_map<TensorView*, int64_t> replayed_pos_;
|
275
|
+
|
276
|
+
public:
|
277
|
+
void propagateC2P(TensorView* from, TensorView* to) override;
|
278
|
+
void propagateP2C(TensorView* from, TensorView* to) override;
|
279
|
+
void propagateSibling(TensorView* from, TensorView* to) override;
|
280
|
+
TransformPropagator(TensorView* from, int64_t pos = -1);
|
281
|
+
};
|
282
|
+
|
283
|
+
struct MostInlinedTransformPropagator
|
284
|
+
: public MaxLogicalDomainInfoSpanningTree::Propagator {
|
285
|
+
void propagateC2P(TensorView* from, TensorView* to) override;
|
286
|
+
void propagateP2C(TensorView* from, TensorView* to) override;
|
287
|
+
void propagateSibling(TensorView* from, TensorView* to) override;
|
288
|
+
};
|
289
|
+
|
290
|
+
// Replays an `Expr` with the new input, `new_in`. This function currently has
|
291
|
+
// the following limitations:
|
292
|
+
// 1. It requires `e` to be a unary op, and therefore takes a single new input.
|
293
|
+
// 2. It requires `e` to be a TensorView op, which takes and produces only
|
294
|
+
// TensorViews.
|
295
|
+
Expr* replayExprWithNewInput(Expr* e, Val* new_in);
|
296
|
+
|
297
|
+
} // namespace nvfuser
|
@@ -0,0 +1,33 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
|
12
|
+
#include <ir/all_nodes.h>
|
13
|
+
#include <transform_iter.h>
|
14
|
+
|
15
|
+
#include <algorithm>
|
16
|
+
#include <vector>
|
17
|
+
|
18
|
+
namespace nvfuser {
|
19
|
+
|
20
|
+
// TODO: Only replay dispatch is really borrowed from TransformIter, we should
|
21
|
+
// reevaluate the reuse of dispatch for classes that inherit TransformIter.
|
22
|
+
class TransformRFactor {
|
23
|
+
public:
|
24
|
+
// Transform the provided tensor domain to two domains, a producer and
|
25
|
+
// consumer domain. These domains are created by taking axes and reducing them
|
26
|
+
// in the producer domain, and taking the remaining reduction axes and
|
27
|
+
// reducing them in the consumer domain.
|
28
|
+
static std::pair<TensorDomain*, TensorDomain*> runReplay(
|
29
|
+
TensorDomain*,
|
30
|
+
std::vector<int64_t> axes);
|
31
|
+
};
|
32
|
+
|
33
|
+
} // namespace nvfuser
|
@@ -0,0 +1,136 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <visibility.h>
|
12
|
+
|
13
|
+
#include <ir/all_nodes.h>
|
14
|
+
|
15
|
+
#include <memory>
|
16
|
+
#include <vector>
|
17
|
+
|
18
|
+
namespace nvfuser {
|
19
|
+
|
20
|
+
class ViewTransform;
|
21
|
+
|
22
|
+
//!
|
23
|
+
//! The goal of analyzeView is to find the minimum number of transformations
|
24
|
+
//! to convert from the original size to the new size. A naive view algorithm
|
25
|
+
//! would merge all axis together and then split according to the new sizes.
|
26
|
+
//!
|
27
|
+
//! This implementation will keep the original domains, if the domains are the
|
28
|
+
//! same size in the original and new shapes. If an original domain is not
|
29
|
+
//! evenly divisible by the new domain, we will merge the minimum number of
|
30
|
+
//! adjacent original domains.
|
31
|
+
//!
|
32
|
+
//! The view transformations are processed in the following order:
|
33
|
+
//! 1. Squeeze - Removes size-1 broadcast dimensions
|
34
|
+
//! 2. Keep, Merge, Split - Used to create new logical domain
|
35
|
+
//! 3. Broadcast - Inserts size-1 dimensions
|
36
|
+
//!
|
37
|
+
//! Broadcast is handled last because size-1 dimension can be inserted anywhere
|
38
|
+
//! in the new shape.
|
39
|
+
//!
|
40
|
+
|
41
|
+
struct AnalyzeViewResult {
|
42
|
+
std::vector<bool> broadcast_axes;
|
43
|
+
std::vector<bool> squeeze_axes;
|
44
|
+
std::vector<std::shared_ptr<ViewTransform>> transforms;
|
45
|
+
|
46
|
+
std::string toString() const;
|
47
|
+
|
48
|
+
bool operator==(const AnalyzeViewResult& other) const;
|
49
|
+
|
50
|
+
bool operator!=(const AnalyzeViewResult& other) const {
|
51
|
+
return !(*this == other);
|
52
|
+
}
|
53
|
+
|
54
|
+
size_t hash() const;
|
55
|
+
};
|
56
|
+
|
57
|
+
struct AnalyzeViewConstraint {
|
58
|
+
// 1 if size 1 dimension, otherwise 0;
|
59
|
+
std::vector<int64_t> original_constraint;
|
60
|
+
std::vector<int64_t> new_constraint;
|
61
|
+
// Just the positions of true in AnalyzeViewResult::squeeze_axes
|
62
|
+
std::vector<int64_t> squeeze_string;
|
63
|
+
// Just the positions of true in AnalyzeViewResult:broadcast_axes
|
64
|
+
std::vector<int64_t> broadcast_string;
|
65
|
+
// A stringified version of the transformations:
|
66
|
+
std::vector<int64_t> split_merge_string;
|
67
|
+
|
68
|
+
std::vector<int64_t> conglomerateString() const {
|
69
|
+
// Don't think this is necessary but just being safe. Using
|
70
|
+
// -3 as a dilimeter between value groups.
|
71
|
+
std::vector<int64_t> conglomerate = {
|
72
|
+
(int64_t)original_constraint.size(),
|
73
|
+
(int64_t)new_constraint.size(),
|
74
|
+
-3};
|
75
|
+
auto add_vec = [&conglomerate](const std::vector<int64_t>& vec) {
|
76
|
+
conglomerate.insert(conglomerate.end(), vec.begin(), vec.end());
|
77
|
+
conglomerate.push_back(-3);
|
78
|
+
};
|
79
|
+
add_vec(original_constraint);
|
80
|
+
add_vec(new_constraint);
|
81
|
+
add_vec(squeeze_string);
|
82
|
+
add_vec(broadcast_string);
|
83
|
+
add_vec(split_merge_string);
|
84
|
+
return conglomerate;
|
85
|
+
}
|
86
|
+
|
87
|
+
bool operator==(const AnalyzeViewConstraint& other) const {
|
88
|
+
return other.conglomerateString() == this->conglomerateString();
|
89
|
+
}
|
90
|
+
|
91
|
+
// Naive hashing function, likely has a lot of collisions, but may not matter
|
92
|
+
// too much if we don't expact many types of views.
|
93
|
+
size_t hash() const {
|
94
|
+
size_t hash_value = 0;
|
95
|
+
for (auto val : conglomerateString()) {
|
96
|
+
if (val == std::numeric_limits<int64_t>::max()) {
|
97
|
+
continue;
|
98
|
+
}
|
99
|
+
hash_value += val;
|
100
|
+
}
|
101
|
+
return hash_value;
|
102
|
+
}
|
103
|
+
};
|
104
|
+
|
105
|
+
//! Infer -1 value in new view std::vector<int64_t> based on original view
|
106
|
+
//! std::vector<int64_t>. This shouldn't generally be used directly but is
|
107
|
+
//! useful for testing.
|
108
|
+
NVF_API std::pair<std::vector<int64_t>, std::vector<int64_t>> inferViewShapes(
|
109
|
+
const std::vector<int64_t>& original_sizes,
|
110
|
+
const std::vector<int64_t>& new_sizes);
|
111
|
+
|
112
|
+
// Find the transformations necessary to convert TensorView
|
113
|
+
// from original size to new size.
|
114
|
+
AnalyzeViewResult analyzeView(
|
115
|
+
const TensorView* tv,
|
116
|
+
const std::vector<int64_t>& original_sizes,
|
117
|
+
const std::vector<int64_t>& new_sizes);
|
118
|
+
|
119
|
+
// Find the constraints derived from the view transformations
|
120
|
+
NVF_API AnalyzeViewConstraint analyzeViewConstraint(
|
121
|
+
const std::vector<int64_t>& original_sizes,
|
122
|
+
const std::vector<int64_t>& new_sizes);
|
123
|
+
|
124
|
+
// Generate a new TensorDomain from the given view transformations.
|
125
|
+
// The original root domain is kept in the new TensorDomain,
|
126
|
+
// but a new logical domain is created from the view transformations.
|
127
|
+
TensorDomain* transformView(
|
128
|
+
TensorDomain* original_domain,
|
129
|
+
const AnalyzeViewResult& view_analysis);
|
130
|
+
|
131
|
+
//! Apply the reshape transformations of view_analysis to inp_tv
|
132
|
+
NVF_API TensorView* reshape(
|
133
|
+
TensorView* inp_tv,
|
134
|
+
const AnalyzeViewResult& view_analysis);
|
135
|
+
|
136
|
+
} // namespace nvfuser
|