nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,124 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
#include <exceptions.h>
|
10
|
+
#include <python_frontend/fusion_record.h>
|
11
|
+
#include <serde/factory.h>
|
12
|
+
|
13
|
+
namespace nvfuser::serde {
|
14
|
+
|
15
|
+
// Forward definition for RecordFunctor
|
16
|
+
struct RecordFunctor;
|
17
|
+
|
18
|
+
// OpRecord Function Signatures
|
19
|
+
// ========================================================================
|
20
|
+
// Unary Functions
|
21
|
+
typedef std::function<TensorView*(TensorView*)> unary_tv_fn;
|
22
|
+
typedef std::function<Val*(Val*)> unary_val_fn;
|
23
|
+
|
24
|
+
// ========================================================================
|
25
|
+
// Binary Functions
|
26
|
+
typedef std::function<TensorView*(TensorView*, TensorView*)> binary_tv_fn;
|
27
|
+
typedef std::function<Val*(Val*, Val*)> binary_val_fn;
|
28
|
+
typedef std::function<TensorView*(TensorView*, Val*)> binary_tv_val_fn;
|
29
|
+
typedef std::function<TensorView*(Val*, TensorView*)> binary_val_tv_fn;
|
30
|
+
|
31
|
+
// ========================================================================
|
32
|
+
// Ternary Functions
|
33
|
+
// Binary with Alpha Functions
|
34
|
+
typedef std::function<TensorView*(TensorView*, TensorView*, TensorView*)>
|
35
|
+
ternary_tv_fn;
|
36
|
+
typedef std::function<Val*(Val*, Val*, Val*)> ternary_val_fn;
|
37
|
+
typedef std::function<TensorView*(TensorView*, TensorView*, Val*)>
|
38
|
+
ternary_tv_tv_val_fn;
|
39
|
+
typedef std::function<TensorView*(TensorView*, Val*, TensorView*)>
|
40
|
+
ternary_tv_val_tv_fn;
|
41
|
+
typedef std::function<TensorView*(Val*, TensorView*, TensorView*)>
|
42
|
+
ternary_val_tv_tv_fn;
|
43
|
+
typedef std::function<TensorView*(Val*, Val*, TensorView*)>
|
44
|
+
ternary_val_val_tv_fn;
|
45
|
+
typedef std::function<TensorView*(TensorView*, Val*, Val*)>
|
46
|
+
ternary_tv_val_val_fn;
|
47
|
+
typedef std::function<TensorView*(Val*, TensorView*, Val*)>
|
48
|
+
ternary_val_tv_val_fn;
|
49
|
+
|
50
|
+
// ========================================================================
|
51
|
+
// Ternary with Alpha Functions
|
52
|
+
typedef std::function<TensorView*(TensorView*, TensorView*, TensorView*, Val*)>
|
53
|
+
ternary_alpha_tv_fn;
|
54
|
+
typedef std::function<Val*(Val*, Val*, Val*, Val*)> ternary_alpha_val_fn;
|
55
|
+
typedef std::function<TensorView*(TensorView*, TensorView*, Val*, Val*)>
|
56
|
+
ternary_alpha_tv_tv_val_fn;
|
57
|
+
typedef std::function<TensorView*(TensorView*, Val*, TensorView*, Val*)>
|
58
|
+
ternary_alpha_tv_val_tv_fn;
|
59
|
+
typedef std::function<TensorView*(Val*, TensorView*, TensorView*, Val*)>
|
60
|
+
ternary_alpha_val_tv_tv_fn;
|
61
|
+
typedef std::function<TensorView*(Val*, Val*, TensorView*, Val*)>
|
62
|
+
ternary_alpha_val_val_tv_fn;
|
63
|
+
typedef std::function<TensorView*(TensorView*, Val*, Val*, Val*)>
|
64
|
+
ternary_alpha_tv_val_val_fn;
|
65
|
+
typedef std::function<TensorView*(Val*, TensorView*, Val*, Val*)>
|
66
|
+
ternary_alpha_val_tv_val_fn;
|
67
|
+
// ========================================================================
|
68
|
+
|
69
|
+
//! The RecordFunctorFactory class is used to deserialize the flatbuffer
|
70
|
+
//! RecordFunctor table. We create an enum type for each RecordFunctor class.
|
71
|
+
//! Each template specialization has a unique RecordType and parser function.
|
72
|
+
class RecordFunctorFactory
|
73
|
+
: public Factory<RecordFunctor, python_frontend::RecordFunctor*> {
|
74
|
+
public:
|
75
|
+
RecordFunctorFactory()
|
76
|
+
: Factory((nvfuser::toUnderlying(RecordType::MAX) + 1)) {
|
77
|
+
setupFunctionMaps();
|
78
|
+
registerAllParsers();
|
79
|
+
}
|
80
|
+
|
81
|
+
private:
|
82
|
+
void registerAllParsers();
|
83
|
+
void setupFunctionMaps();
|
84
|
+
|
85
|
+
// String to Operation maps
|
86
|
+
// Unary Functions
|
87
|
+
std::unordered_map<std::string, unary_tv_fn> unary_tv;
|
88
|
+
std::unordered_map<std::string, unary_val_fn> unary_val;
|
89
|
+
|
90
|
+
// Binary Functions
|
91
|
+
std::unordered_map<std::string, binary_tv_fn> binary_tv;
|
92
|
+
std::unordered_map<std::string, binary_val_fn> binary_val;
|
93
|
+
std::unordered_map<std::string, binary_tv_val_fn> binary_tv_val;
|
94
|
+
std::unordered_map<std::string, binary_val_tv_fn> binary_val_tv;
|
95
|
+
|
96
|
+
// Ternary Functions
|
97
|
+
// Binary with Alpha Functions
|
98
|
+
std::unordered_map<std::string, ternary_tv_fn> ternary_tv;
|
99
|
+
std::unordered_map<std::string, ternary_val_fn> ternary_val;
|
100
|
+
std::unordered_map<std::string, ternary_tv_tv_val_fn> ternary_tv_tv_val;
|
101
|
+
std::unordered_map<std::string, ternary_tv_val_tv_fn> ternary_tv_val_tv;
|
102
|
+
std::unordered_map<std::string, ternary_val_tv_tv_fn> ternary_val_tv_tv;
|
103
|
+
std::unordered_map<std::string, ternary_val_val_tv_fn> ternary_val_val_tv;
|
104
|
+
std::unordered_map<std::string, ternary_tv_val_val_fn> ternary_tv_val_val;
|
105
|
+
std::unordered_map<std::string, ternary_val_tv_val_fn> ternary_val_tv_val;
|
106
|
+
|
107
|
+
// Ternary with Alpha Functions
|
108
|
+
std::unordered_map<std::string, ternary_alpha_tv_fn> ternary_alpha_tv;
|
109
|
+
std::unordered_map<std::string, ternary_alpha_val_fn> ternary_alpha_val;
|
110
|
+
std::unordered_map<std::string, ternary_alpha_tv_tv_val_fn>
|
111
|
+
ternary_alpha_tv_tv_val;
|
112
|
+
std::unordered_map<std::string, ternary_alpha_tv_val_tv_fn>
|
113
|
+
ternary_alpha_tv_val_tv;
|
114
|
+
std::unordered_map<std::string, ternary_alpha_val_tv_tv_fn>
|
115
|
+
ternary_alpha_val_tv_tv;
|
116
|
+
std::unordered_map<std::string, ternary_alpha_val_val_tv_fn>
|
117
|
+
ternary_alpha_val_val_tv;
|
118
|
+
std::unordered_map<std::string, ternary_alpha_tv_val_val_fn>
|
119
|
+
ternary_alpha_tv_val_val;
|
120
|
+
std::unordered_map<std::string, ternary_alpha_val_tv_val_fn>
|
121
|
+
ternary_alpha_val_tv_val;
|
122
|
+
};
|
123
|
+
|
124
|
+
} // namespace nvfuser::serde
|
@@ -0,0 +1,52 @@
|
|
1
|
+
|
2
|
+
// clang-format off
|
3
|
+
/*
|
4
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
5
|
+
* All rights reserved.
|
6
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
7
|
+
*/
|
8
|
+
// clang-format on
|
9
|
+
#pragma once
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <runtime/executor_kernel_arg.h>
|
12
|
+
#include <serde/factory.h>
|
13
|
+
#include <serde/fusion_cache_generated.h>
|
14
|
+
#include <visibility.h>
|
15
|
+
#include <functional>
|
16
|
+
#include <memory>
|
17
|
+
|
18
|
+
namespace nvfuser::serde {
|
19
|
+
|
20
|
+
//! The PolymorphicValueFactory class is used to deserialize the flatbuffer
|
21
|
+
//! PolymorphicValue table. This factory creates Bool, ComplexDouble, Double,
|
22
|
+
//! Long, CPU Scalar, and CUDA Tensor objects. These arguments are stored in
|
23
|
+
//! KernelArgumentHolder, which is used to schedule the fusion in
|
24
|
+
//! FusionKernelRuntime and to run a kernel in KernelExecutor.
|
25
|
+
class PolymorphicValueFactory
|
26
|
+
: public Factory<PolymorphicValue, nvfuser::PolymorphicValue> {
|
27
|
+
public:
|
28
|
+
PolymorphicValueFactory()
|
29
|
+
: Factory((nvfuser::toUnderlying(PolymorphicValueData::MAX) + 1)) {
|
30
|
+
registerAllParsers();
|
31
|
+
}
|
32
|
+
|
33
|
+
private:
|
34
|
+
void registerAllParsers();
|
35
|
+
};
|
36
|
+
|
37
|
+
nvfuser::PolymorphicValue deserializePolymorphicValue(const Scalar* c);
|
38
|
+
|
39
|
+
flatbuffers::Offset<PolymorphicValue> serializePolymorphicValue(
|
40
|
+
flatbuffers::FlatBufferBuilder& builder,
|
41
|
+
std::shared_ptr<nvfuser::PolymorphicValue> v);
|
42
|
+
|
43
|
+
flatbuffers::Offset<Scalar> serializeScalarCpu(
|
44
|
+
flatbuffers::FlatBufferBuilder& builder,
|
45
|
+
const at::Tensor& tensor);
|
46
|
+
|
47
|
+
NVF_API flatbuffers::Offset<Scalar> serializeScalar(
|
48
|
+
flatbuffers::FlatBufferBuilder& builder,
|
49
|
+
const nvfuser::PolymorphicValue& v,
|
50
|
+
nvfuser::DataType t);
|
51
|
+
|
52
|
+
} // namespace nvfuser::serde
|
@@ -0,0 +1,34 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
#include <exceptions.h>
|
10
|
+
#include <serde/fusion_cache_generated.h>
|
11
|
+
#include <type.h>
|
12
|
+
|
13
|
+
namespace nvfuser::serde {
|
14
|
+
|
15
|
+
//! A function to map the serde dtype to its corresponding nvfuser prim dtype
|
16
|
+
PrimDataType mapToNvfuserDtype(long data_type);
|
17
|
+
|
18
|
+
//! A function to map the serde dtype to its corresponding nvfuser datatype
|
19
|
+
nvfuser::DataType mapToDtypeStruct(long data_type);
|
20
|
+
|
21
|
+
//! A function to map the serde dtype to its corresponding aten dtype
|
22
|
+
at::ScalarType mapToAtenDtype(long data_type);
|
23
|
+
|
24
|
+
template <typename T>
|
25
|
+
std::vector<T> parseVector(const flatbuffers::Vector<T>* fb_vector) {
|
26
|
+
std::vector<T> result(fb_vector->begin(), fb_vector->end());
|
27
|
+
return result;
|
28
|
+
}
|
29
|
+
|
30
|
+
// Flatbuffer stores bool values as uint8_t.
|
31
|
+
std::vector<bool> parseBoolVector(
|
32
|
+
const flatbuffers::Vector<uint8_t>* fb_vector);
|
33
|
+
|
34
|
+
} // namespace nvfuser::serde
|
@@ -0,0 +1,127 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <visibility.h>
|
12
|
+
#include <functional>
|
13
|
+
#include <memory>
|
14
|
+
#include <string>
|
15
|
+
|
16
|
+
namespace nvfuser {
|
17
|
+
|
18
|
+
// Note [Struct Support in PolymorphicValue]
|
19
|
+
//
|
20
|
+
// PolymorphicValue supports structs, which is just a list of named fields. The
|
21
|
+
// most straightforward way to support structs is to use a map from field name
|
22
|
+
// to value, something like:
|
23
|
+
// template <typename T>
|
24
|
+
// using Struct = std::unordered_map<std::string, T>;
|
25
|
+
// using PolymorphicValue = DynamicType<Containers<Struct>, ...>;
|
26
|
+
// However, the performance of this approach is not ideal. So instead of making
|
27
|
+
// the struct support truly dynamic fields by using a map, we decide to make it
|
28
|
+
// semi-dynamic: each struct type in nvFuser must be backed by a real struct in
|
29
|
+
// C++, which mean, the fields have static storage types. But, on the other
|
30
|
+
// hand, struct fields can also be accessed dynamically, that is, you can get or
|
31
|
+
// set a struct field without knowing the actual C++ struct and the type of the
|
32
|
+
// field. Instead, by using solely the string name of the field, you shall be
|
33
|
+
// able to access fields as a PolymorphicValue. For example, if your struct is
|
34
|
+
// defined as:
|
35
|
+
// struct A { int64_t x; double y; };
|
36
|
+
// PolymorphicValue v = some struct of type A;
|
37
|
+
// Then you can access the fields statically like:
|
38
|
+
// const int64_t& x = v->*&A::x;
|
39
|
+
// v->*&A::x = 1;
|
40
|
+
// Static accesses should be very efficient, as fast as dynamic casts + pointer
|
41
|
+
// dereferences. However, if you don't have access to the definition of `A`, you
|
42
|
+
// can still access the fields dynamically:
|
43
|
+
// PolymorphicValue x = v->*"x";
|
44
|
+
// v->*"x" = 1;
|
45
|
+
// Dynamic accesses are slower than static accesses, because you need to do
|
46
|
+
// string comparisons to find the field, and do casts between the actual field
|
47
|
+
// type and PolymorphicValue. This can be slow especially when the struct has
|
48
|
+
// some fields of containers like std::vector<int64_t>, because you need to do
|
49
|
+
// the conversion between std::vector<PolymorphicValue> and std::vector<int64_t>
|
50
|
+
// every time you get or set a field.
|
51
|
+
//
|
52
|
+
// The implementation of this feature requires a few components working
|
53
|
+
// together:
|
54
|
+
// 1. StructType: a data type that describes the name and fields of a struct.
|
55
|
+
// More importantly, it stores a function that can create an instance of a
|
56
|
+
// struct without requiring the caller to know the actual struct type.
|
57
|
+
// 2. Struct: a base class for all structs, which provides the virtual interface
|
58
|
+
// for accessing fields dynamically, as well as an interface for getting the
|
59
|
+
// StructType of the struct.
|
60
|
+
// 3. StructHandle: a wrapper around Struct, which maintains the ownership of
|
61
|
+
// struct objects and provides the overloaded ->* operator for accessing
|
62
|
+
// fields statically and dynamically. StructHandle is a candidate type for
|
63
|
+
// PolymorphicValue.
|
64
|
+
// 4. Accessor: a helper class returned by the dynamic ->* operator, which
|
65
|
+
// provides the overloaded casting to PolymorphicValue and = operator for
|
66
|
+
// getting and setting fields dynamically.
|
67
|
+
//
|
68
|
+
// With the above components, define a struct type that supports dynamic access
|
69
|
+
// to fields is basically subclassing Struct and implementing the virtual
|
70
|
+
// methods. Please check the test PolymorphicValueTest.Struct for an example.
|
71
|
+
|
72
|
+
struct Struct {
|
73
|
+
virtual ~Struct() = default;
|
74
|
+
|
75
|
+
virtual StructType type() const = 0;
|
76
|
+
virtual std::function<PolymorphicValue()> getter(
|
77
|
+
const std::string& key) const = 0;
|
78
|
+
virtual std::function<void(const PolymorphicValue&)> setter(
|
79
|
+
const std::string& key) = 0;
|
80
|
+
};
|
81
|
+
|
82
|
+
class Accessor {
|
83
|
+
std::function<PolymorphicValue()> getter_;
|
84
|
+
std::function<void(const PolymorphicValue&)> setter_;
|
85
|
+
|
86
|
+
public:
|
87
|
+
Accessor(
|
88
|
+
std::function<PolymorphicValue()> getter,
|
89
|
+
std::function<void(const PolymorphicValue&)> setter)
|
90
|
+
: getter_(std::move(getter)), setter_(std::move(setter)) {}
|
91
|
+
Accessor(const Accessor& value) = default;
|
92
|
+
Accessor(Accessor&& value) = default;
|
93
|
+
Accessor& operator=(const Accessor& value) = default;
|
94
|
+
Accessor& operator=(Accessor&& value) = default;
|
95
|
+
|
96
|
+
inline const Accessor& operator=(const PolymorphicValue& value) const {
|
97
|
+
setter_(std::move(value));
|
98
|
+
return *this;
|
99
|
+
}
|
100
|
+
|
101
|
+
inline operator PolymorphicValue() const {
|
102
|
+
return getter_();
|
103
|
+
}
|
104
|
+
};
|
105
|
+
|
106
|
+
inline Accessor StructHandle::operator->*(const std::string& key) const {
|
107
|
+
return Accessor(struct_ptr_->getter(key), struct_ptr_->setter(key));
|
108
|
+
}
|
109
|
+
|
110
|
+
// If a struct type is only used in kernel and we will never create an instance
|
111
|
+
// on the host, we can just use this dummy struct as a placeholder for the
|
112
|
+
// convenience
|
113
|
+
struct NVF_API NotImplementedStruct : public Struct {
|
114
|
+
StructType type() const override;
|
115
|
+
|
116
|
+
std::function<PolymorphicValue()> getter(
|
117
|
+
const std::string& key) const override {
|
118
|
+
NVF_THROW("Not implemented");
|
119
|
+
}
|
120
|
+
|
121
|
+
std::function<void(const PolymorphicValue&)> setter(
|
122
|
+
const std::string& key) override {
|
123
|
+
NVF_THROW("Not implemented");
|
124
|
+
}
|
125
|
+
};
|
126
|
+
|
127
|
+
} // namespace nvfuser
|
@@ -0,0 +1,54 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <visibility.h>
|
12
|
+
|
13
|
+
#include <ir/interface_nodes.h>
|
14
|
+
#include <type.h>
|
15
|
+
|
16
|
+
#include <utility>
|
17
|
+
|
18
|
+
//
|
19
|
+
// The operations defined in this header is intended as user facing functions.
|
20
|
+
// The user will provide the necessary input Vals and the function will
|
21
|
+
// create the correct intermediate nodes and return the output Vals.
|
22
|
+
//
|
23
|
+
|
24
|
+
namespace nvfuser {
|
25
|
+
|
26
|
+
NVF_API std::pair<Val*, Val*> dispatchSwizzle(
|
27
|
+
Swizzle2DType type,
|
28
|
+
Val* x,
|
29
|
+
Val* y,
|
30
|
+
Val* maybe_size_x,
|
31
|
+
Val* maybe_size_y);
|
32
|
+
|
33
|
+
NVF_API std::pair<Val*, Val*> dispatchSwizzle(
|
34
|
+
SwizzleType type,
|
35
|
+
Val* x,
|
36
|
+
Val* y,
|
37
|
+
Val* maybe_size_x,
|
38
|
+
Val* maybe_size_y);
|
39
|
+
|
40
|
+
NVF_API std::pair<Val*, Val*> dispatchUnSwizzle(
|
41
|
+
Swizzle2DType type,
|
42
|
+
Val* x,
|
43
|
+
Val* y,
|
44
|
+
Val* maybe_size_x,
|
45
|
+
Val* maybe_size_y);
|
46
|
+
|
47
|
+
NVF_API std::pair<Val*, Val*> dispatchUnSwizzle(
|
48
|
+
SwizzleType type,
|
49
|
+
Val* x,
|
50
|
+
Val* y,
|
51
|
+
Val* maybe_size_x,
|
52
|
+
Val* maybe_size_y);
|
53
|
+
|
54
|
+
} // namespace nvfuser
|
@@ -0,0 +1,40 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <utils.h>
|
11
|
+
|
12
|
+
#include <string>
|
13
|
+
|
14
|
+
namespace nvfuser {
|
15
|
+
|
16
|
+
class LibraryLoader : public NonCopyable {
|
17
|
+
public:
|
18
|
+
LibraryLoader() = default;
|
19
|
+
|
20
|
+
~LibraryLoader();
|
21
|
+
|
22
|
+
void* getSymbol(const char* symbol_name);
|
23
|
+
|
24
|
+
std::string filename() const {
|
25
|
+
return filename_;
|
26
|
+
}
|
27
|
+
|
28
|
+
void setFilename(std::string filename) {
|
29
|
+
filename_ = filename;
|
30
|
+
}
|
31
|
+
|
32
|
+
private:
|
33
|
+
std::string filename_ = "";
|
34
|
+
void* handle_ = nullptr;
|
35
|
+
};
|
36
|
+
|
37
|
+
// Return true if compute-sanitizer is attached
|
38
|
+
bool detectComputeSanitizer();
|
39
|
+
|
40
|
+
} // namespace nvfuser
|
@@ -0,0 +1,118 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <expr_evaluator.h>
|
12
|
+
#include <ir/interface_nodes.h>
|
13
|
+
#include <polymorphic_value.h>
|
14
|
+
#include <type.h>
|
15
|
+
|
16
|
+
namespace nvfuser {
|
17
|
+
|
18
|
+
struct TensorMetaData : public Struct {
|
19
|
+
PrimDataType dtype;
|
20
|
+
void* data;
|
21
|
+
// References to the data fields. Does not own the data. The ownership might
|
22
|
+
// belong to at::Tensor or the *_data fields.
|
23
|
+
c10::IntArrayRef logical_size;
|
24
|
+
c10::IntArrayRef logical_stride;
|
25
|
+
c10::IntArrayRef alloc_size;
|
26
|
+
c10::IntArrayRef alloc_stride;
|
27
|
+
// The actual data for the above fields. Maybe empty if the fields are not
|
28
|
+
// owned by this object.
|
29
|
+
std::vector<int64_t> logical_size_data;
|
30
|
+
std::vector<int64_t> logical_stride_data;
|
31
|
+
std::vector<int64_t> alloc_size_data;
|
32
|
+
std::vector<int64_t> alloc_stride_data;
|
33
|
+
|
34
|
+
std::function<PolymorphicValue()> getter(
|
35
|
+
const std::string& key) const override {
|
36
|
+
if (key == "data") {
|
37
|
+
return [this]() { return PolymorphicValue(Pointer(data, dtype)); };
|
38
|
+
} else if (key == "logical_size") {
|
39
|
+
if (!logical_size_data.empty()) {
|
40
|
+
return [this]() { return PolymorphicValue(logical_size_data); };
|
41
|
+
} else {
|
42
|
+
return [this]() { return PolymorphicValue(logical_size.vec()); };
|
43
|
+
}
|
44
|
+
} else if (key == "logical_stride") {
|
45
|
+
if (!logical_stride_data.empty()) {
|
46
|
+
return [this]() { return PolymorphicValue(logical_stride_data); };
|
47
|
+
} else {
|
48
|
+
return [this]() { return PolymorphicValue(logical_stride.vec()); };
|
49
|
+
}
|
50
|
+
} else if (key == "alloc_size") {
|
51
|
+
if (!alloc_size_data.empty()) {
|
52
|
+
return [this]() { return PolymorphicValue(alloc_size_data); };
|
53
|
+
} else {
|
54
|
+
return [this]() { return PolymorphicValue(alloc_size.vec()); };
|
55
|
+
}
|
56
|
+
} else if (key == "alloc_stride") {
|
57
|
+
if (!alloc_stride_data.empty()) {
|
58
|
+
return [this]() { return PolymorphicValue(alloc_stride_data); };
|
59
|
+
} else {
|
60
|
+
return [this]() { return PolymorphicValue(alloc_stride.vec()); };
|
61
|
+
}
|
62
|
+
} else {
|
63
|
+
NVF_THROW("Unknown key ", key);
|
64
|
+
}
|
65
|
+
}
|
66
|
+
|
67
|
+
std::function<void(const PolymorphicValue&)> setter(
|
68
|
+
const std::string& key) override {
|
69
|
+
if (key == "data") {
|
70
|
+
return [this](const PolymorphicValue& value) { data = (void*)value; };
|
71
|
+
} else if (key == "logical_size") {
|
72
|
+
return [this](const PolymorphicValue& value) {
|
73
|
+
logical_size_data = (std::vector<int64_t>)value;
|
74
|
+
logical_size = c10::makeArrayRef(logical_size_data);
|
75
|
+
};
|
76
|
+
} else if (key == "logical_stride") {
|
77
|
+
return [this](const PolymorphicValue& value) {
|
78
|
+
logical_stride_data = (std::vector<int64_t>)value;
|
79
|
+
logical_stride = c10::makeArrayRef(logical_stride_data);
|
80
|
+
};
|
81
|
+
} else if (key == "alloc_size") {
|
82
|
+
return [this](const PolymorphicValue& value) {
|
83
|
+
alloc_size_data = (std::vector<int64_t>)value;
|
84
|
+
alloc_size = c10::makeArrayRef(alloc_size_data);
|
85
|
+
};
|
86
|
+
} else if (key == "alloc_stride") {
|
87
|
+
return [this](const PolymorphicValue& value) {
|
88
|
+
alloc_stride_data = (std::vector<int64_t>)value;
|
89
|
+
alloc_stride = c10::makeArrayRef(alloc_stride_data);
|
90
|
+
};
|
91
|
+
} else {
|
92
|
+
NVF_THROW("Unknown key ", key);
|
93
|
+
}
|
94
|
+
}
|
95
|
+
|
96
|
+
StructType type() const override {
|
97
|
+
NVF_ERROR(logical_size.size() == logical_stride.size());
|
98
|
+
NVF_ERROR(alloc_size.size() == alloc_stride.size());
|
99
|
+
return globalTensorMetaData(dtype, logical_size.size(), alloc_size.size());
|
100
|
+
}
|
101
|
+
};
|
102
|
+
|
103
|
+
// Given an ATen tensor, whose sizes and strides are w.r.t to the logical domain
|
104
|
+
// of its corresponding TensorView, compute the sizes and strides of the tensor
|
105
|
+
// with respect to its allocation domain.
|
106
|
+
// For example, if the logical domain is [I1, I2], and the allocation domain is
|
107
|
+
// [I2*I1], and the tensor's size is [5, 3] and stride is [2, 10], then the
|
108
|
+
// resulting size will be [15] and stride will be [2]
|
109
|
+
// Another example, if the logical domain is [I1*I2] and the allocation domain
|
110
|
+
// is [I1, I2], and the tensor's size is [15] and stride is [7], and the extent
|
111
|
+
// of I2 is 5, then the resulting size will be [3, 5] and stride will be [35, 7]
|
112
|
+
std::pair<std::vector<int64_t>, std::vector<int64_t>>
|
113
|
+
inferAndValidateAllocationSizesAndStrides(
|
114
|
+
const at::Tensor& tensor,
|
115
|
+
TensorView* tv,
|
116
|
+
ExpressionEvaluator ee);
|
117
|
+
|
118
|
+
} // namespace nvfuser
|
@@ -0,0 +1,124 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
|
9
|
+
#pragma once
|
10
|
+
|
11
|
+
#include <ostream>
|
12
|
+
|
13
|
+
#include <mma_type.h>
|
14
|
+
#include <type.h>
|
15
|
+
|
16
|
+
// Note: [TMA support in nvFuser]
|
17
|
+
//
|
18
|
+
// Recommended reading:
|
19
|
+
// https://developer.nvidia.com/blog/nvidia-hopper-architecture-in-depth/
|
20
|
+
// https://docs.nvidia.com/cuda/hopper-tuning-guide/index.html#tensor-memory-accelerator
|
21
|
+
//
|
22
|
+
// TMA (Tensor Memory Accelerator) is a hardware accelerator for transfering
|
23
|
+
// tensors up to 5D between global memory and shared memory. It supports tiled
|
24
|
+
// data and im2col data. nvFuser currently only supports tiled data.
|
25
|
+
//
|
26
|
+
// The tiled data transfer allows users to transfer a tile of a tensor between
|
27
|
+
// global memory and shared memory. It is helpful to think of the tile as a
|
28
|
+
// slice of the tensor. For example, if you have a 2D tensor you can do
|
29
|
+
// something like:
|
30
|
+
// smem_tensor = gmem_tensor[i:i+16:2, j:j+32:1]
|
31
|
+
// Or in the language of affine transformations, the gmem_tensor must be
|
32
|
+
// transformed as:
|
33
|
+
// root domain: [I1, I2]
|
34
|
+
// split: [I1/16, 16, I2]
|
35
|
+
// split: [I1/16, 8, 2, I2]
|
36
|
+
// split: [I1/16, 8, 2, I2/32, 32]
|
37
|
+
// loop domain: [I1/16, 8, 2, I2/32, 32]
|
38
|
+
//
|
39
|
+
// Because TMA does bulk transfer, there is a dedicated paralle type `Bulk` for
|
40
|
+
// it. In the above example, the gmem_tensor must be parallelized as
|
41
|
+
// [I1/16, Bulk{8}, 2, I2/32, Bulk{32}]
|
42
|
+
// `Bulk` is a bit similar to `Vectorize` in some aspect, for example, both says
|
43
|
+
// that we are copying a batch of data. Indeed, while considering `Bulk` as
|
44
|
+
// representing a general N-dimensional slice that can have flexible extent and
|
45
|
+
// step, we can consider `Vectorize` as a limited version of `Bulk` that must
|
46
|
+
// represent a one-dimensional slice in the innermost dimension and the step of
|
47
|
+
// the slice must be 1 and the extent of the slice must be a power of 2. Like
|
48
|
+
// vectorize, a loop parallelized as `Bulk` is a trivial loop. Currently, we
|
49
|
+
// only support whole tensor copy, so the consumer tensor of TMA store can not
|
50
|
+
// be transformed, and all its `IterDomain`s must be parallelized as `Bulk`.
|
51
|
+
//
|
52
|
+
// To use TMA, we need to encode a tensor map of the global memory tensor we
|
53
|
+
// want to transfer. The tensor map is a set of parameters that describes the
|
54
|
+
// address and layout of the tensor in global memory, as well as the extent and
|
55
|
+
// step of our slice. There are also other features configured in this tensor
|
56
|
+
// map, such as: Does this tensor has overlap? How do we want to swizzle the
|
57
|
+
// shared memory to avoid bank conflict? How do we want L2 cache to be used? Do
|
58
|
+
// we want to automatically fill out-of-bound data? The tensor map must reside
|
59
|
+
// in constant memory, and nvFuser implements this as a kernel argument declared
|
60
|
+
// with __grid_constant__ qualifier. The tensor map is an opaque type to the
|
61
|
+
// user. It must be created by driver API cuTensorMapEncodeTiled, see:
|
62
|
+
// https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__TENSOR__MEMORY.html
|
63
|
+
// In nvFuser, the task of calling `cuTensorMapEncodeTiled` to get the tensor
|
64
|
+
// map is on the executor, implemented at: `kir::EncodeTensorMapTiled::evaluate`
|
65
|
+
//
|
66
|
+
// TMA are supported in nvFuser by inline PTX in memory.cu. Currently, we do a
|
67
|
+
// sync immediately after the TMA PTX, which makes TMA synchronous. In the
|
68
|
+
// future, we will need more advanced sync analysis and insert the correct syncs
|
69
|
+
// at the correct point.
|
70
|
+
//
|
71
|
+
// During lowering, the index of the global tensor of the TMA expr must be
|
72
|
+
// lowered as a kir::TensorIndex whose index has dtype `struct` with name
|
73
|
+
// `Hopper::CpAsyncBulkTensorTileIndex`. The first field of this struct is the
|
74
|
+
// pointer to the tensor map in constant memory, and the second field is an
|
75
|
+
// array for the N-dimensional coordinate. The tensor map will be defined by an
|
76
|
+
// expression of `kir::EncodeTensorMapTiled`. The evaluation of this expression
|
77
|
+
// will be hoisted to the host, and will not be generated in the kernel. Because
|
78
|
+
// we currently only support whole tensor copy, the N-dimensional coordinate is
|
79
|
+
// always just zeros.
|
80
|
+
//
|
81
|
+
// Currently, because we only support very limited schedule, predicates for TMA
|
82
|
+
// exprs are not needed, therefore not generated.
|
83
|
+
|
84
|
+
namespace nvfuser {
|
85
|
+
namespace tma {
|
86
|
+
|
87
|
+
enum class TensorMapInterleave { NoInterleave, B16, B32 };
|
88
|
+
enum class TensorMapL2Promotion { NoL2Promotion, B64, B128, B256 };
|
89
|
+
enum class TensorMapFloatOOBFill { NoOOBFill, NaN_Request_Zero_FMA };
|
90
|
+
|
91
|
+
std::ostream& operator<<(std::ostream& os, TensorMapInterleave interleave);
|
92
|
+
std::ostream& operator<<(std::ostream& os, TensorMapL2Promotion l2_promotion);
|
93
|
+
std::ostream& operator<<(std::ostream& os, TensorMapFloatOOBFill oob_fill);
|
94
|
+
|
95
|
+
// Wrapper for:
|
96
|
+
//
|
97
|
+
// CUresult cuTensorMapEncodeTiled(
|
98
|
+
// CUtensorMap* tensorMap,
|
99
|
+
// CUtensorMapDataType tensorDataType,
|
100
|
+
// cuuint32_t tensorRank,
|
101
|
+
// void* globalAddress,
|
102
|
+
// const cuuint64_t* globalDim,
|
103
|
+
// const cuuint64_t* globalStrides,
|
104
|
+
// const cuuint32_t* boxDim,
|
105
|
+
// const cuuint32_t* elementStrides,
|
106
|
+
// CUtensorMapInterleave interleave,
|
107
|
+
// CUtensorMapSwizzle swizzle,
|
108
|
+
// CUtensorMapL2promotion l2Promotion,
|
109
|
+
// CUtensorMapFloatOOBfill oobFill);
|
110
|
+
|
111
|
+
Val* encodeTensorMapTiled(
|
112
|
+
DataType data_type,
|
113
|
+
Val* global_address,
|
114
|
+
Val* global_dim,
|
115
|
+
Val* global_strides,
|
116
|
+
Val* box_dim,
|
117
|
+
Val* element_strides,
|
118
|
+
TensorMapInterleave interleave,
|
119
|
+
MmaInputSmemSwizzle swizzle,
|
120
|
+
TensorMapL2Promotion l2_promotion,
|
121
|
+
TensorMapFloatOOBFill oob_fill);
|
122
|
+
|
123
|
+
} // namespace tma
|
124
|
+
} // namespace nvfuser
|