nvfuser-cu121-torch25 0.2.25.dev20250201__cp310-cp310-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +20 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,129 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <dynamic_type/type_traits.h>
|
11
|
+
|
12
|
+
#include <any>
|
13
|
+
#include <cstddef>
|
14
|
+
#include <cstring>
|
15
|
+
#include <functional>
|
16
|
+
#include <ostream>
|
17
|
+
|
18
|
+
namespace nvfuser {
|
19
|
+
|
20
|
+
class Opaque;
|
21
|
+
|
22
|
+
template <typename T>
|
23
|
+
struct OpaqueEquals {
|
24
|
+
bool operator()(const Opaque& a, const Opaque& b) const;
|
25
|
+
};
|
26
|
+
|
27
|
+
template <typename T>
|
28
|
+
struct OpaqueToBytes {
|
29
|
+
std::vector<std::byte> operator()(const Opaque& a) const;
|
30
|
+
};
|
31
|
+
|
32
|
+
class Opaque {
|
33
|
+
std::any value_;
|
34
|
+
std::function<bool(const Opaque&, const Opaque&)> equals_;
|
35
|
+
std::function<std::vector<std::byte>(const Opaque&)> to_bytes_;
|
36
|
+
size_t size_;
|
37
|
+
|
38
|
+
public:
|
39
|
+
template <typename T>
|
40
|
+
explicit Opaque(T value)
|
41
|
+
: value_(std::move(value)),
|
42
|
+
equals_(OpaqueEquals<T>{}),
|
43
|
+
to_bytes_(OpaqueToBytes<T>{}),
|
44
|
+
size_(sizeof(T)) {}
|
45
|
+
|
46
|
+
bool operator==(const Opaque& other) const {
|
47
|
+
if (this == &other) {
|
48
|
+
return true;
|
49
|
+
}
|
50
|
+
if (value_.type() != other.value_.type()) {
|
51
|
+
// Note that because C++ is a statically typed language, there is no way
|
52
|
+
// to completely accurately compare equality of opaque values. The
|
53
|
+
// behavior here is just an approximation. For example 1 == 1.0 but
|
54
|
+
// Opaque(1) != Opaque(1.0).
|
55
|
+
return false;
|
56
|
+
}
|
57
|
+
return equals_(*this, other);
|
58
|
+
}
|
59
|
+
|
60
|
+
bool operator!=(const Opaque& other) const {
|
61
|
+
return !(*this == other);
|
62
|
+
}
|
63
|
+
|
64
|
+
const std::any& any() const {
|
65
|
+
return value_;
|
66
|
+
}
|
67
|
+
|
68
|
+
template <typename T>
|
69
|
+
const T& as() const {
|
70
|
+
return std::any_cast<const T&>(value_);
|
71
|
+
}
|
72
|
+
|
73
|
+
template <typename T>
|
74
|
+
T& as() {
|
75
|
+
return std::any_cast<T&>(value_);
|
76
|
+
}
|
77
|
+
|
78
|
+
std::vector<std::byte> bytes() const {
|
79
|
+
return to_bytes_(*this);
|
80
|
+
}
|
81
|
+
|
82
|
+
size_t size() const {
|
83
|
+
return size_;
|
84
|
+
}
|
85
|
+
};
|
86
|
+
|
87
|
+
template <typename T>
|
88
|
+
bool OpaqueEquals<T>::operator()(const Opaque& a, const Opaque& b) const {
|
89
|
+
if constexpr (dynamic_type::opcheck<T> == dynamic_type::opcheck<T>) {
|
90
|
+
// If T == T exists, use it
|
91
|
+
return a.as<T>() == b.as<T>();
|
92
|
+
} else {
|
93
|
+
// Otherwise, do bitwise compare. Note that bitwise comparison is not always
|
94
|
+
// correct. So this is only an approximation. For example:
|
95
|
+
// struct A {
|
96
|
+
// int64_t x;
|
97
|
+
// std::vector<float> y;
|
98
|
+
// };
|
99
|
+
// Opaque(A{1, {2.0}}) != Opaque(A{1, {2.0}});
|
100
|
+
// Another example:
|
101
|
+
// struct A {
|
102
|
+
// int32_t i;
|
103
|
+
// double d;
|
104
|
+
// };
|
105
|
+
// /*maybe:*/ Opaque(A{1, 2.0}) == Opaque(A{1, 2.0});
|
106
|
+
// /*maybe:*/ Opaque(A{1, 2.0}) != Opaque(A{1, 2.0});
|
107
|
+
// Because the struct is not packed, usually C++ compiler will allocate A as
|
108
|
+
// something like below:
|
109
|
+
// [=== i (32bits) ===][=== empty (32bits) ===][====== d (64bits) ======]
|
110
|
+
// The padding bits are not initialized and can be different between two
|
111
|
+
// instances of A. So the comparison result is not even deterministic.
|
112
|
+
// This path should only be used for packed POD structs. For other types,
|
113
|
+
// the user should provide an overloaded operator==.
|
114
|
+
return std::memcmp(&a.as<T>(), &b.as<T>(), sizeof(T)) == 0;
|
115
|
+
}
|
116
|
+
}
|
117
|
+
|
118
|
+
template <typename T>
|
119
|
+
std::vector<std::byte> OpaqueToBytes<T>::operator()(const Opaque& a) const {
|
120
|
+
return std::vector<std::byte>(
|
121
|
+
(const std::byte*)&a.as<T>(), (const std::byte*)(&a.as<T>() + 1));
|
122
|
+
}
|
123
|
+
|
124
|
+
inline std::ostream& operator<<(std::ostream& os, const Opaque& opaque) {
|
125
|
+
os << "Opaque<" << opaque.any().type().name() << ">";
|
126
|
+
return os;
|
127
|
+
}
|
128
|
+
|
129
|
+
} // namespace nvfuser
|
@@ -0,0 +1,192 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <exceptions.h>
|
11
|
+
#include <visibility.h>
|
12
|
+
|
13
|
+
#include <ir/interface_nodes.h>
|
14
|
+
#include <type.h>
|
15
|
+
|
16
|
+
//
|
17
|
+
// The operations defined in this header is intended as user facing functions.
|
18
|
+
// The user will provide the necessary input TensorViews and the function will
|
19
|
+
// create the correct intermediate nodes and return the output TensorViews.
|
20
|
+
//
|
21
|
+
|
22
|
+
namespace nvfuser {
|
23
|
+
|
24
|
+
NVF_API Val* set(Val*);
|
25
|
+
NVF_API TensorView* set(TensorView*);
|
26
|
+
|
27
|
+
// segment_set hints segmenter to break kernel
|
28
|
+
NVF_API Val* segment_set(Val*);
|
29
|
+
NVF_API TensorView* segment_set(TensorView*);
|
30
|
+
|
31
|
+
NVF_API TensorView* view(TensorView* x, DataType dtype);
|
32
|
+
|
33
|
+
NVF_API TensorView* reshape(
|
34
|
+
TensorView* x,
|
35
|
+
const std::vector<int64_t>& original_sizes,
|
36
|
+
const std::vector<int64_t>& new_sizes);
|
37
|
+
|
38
|
+
//! Dynamic version of reshape. The number of dimensions is statically
|
39
|
+
//! fixed as the length of the new_sizes vector, but the size Vals can be
|
40
|
+
//! symbolic, which are then concretized at run time with actual
|
41
|
+
//! fusion inputs.
|
42
|
+
NVF_API TensorView* reshape(TensorView* x, const std::vector<Val*>& new_sizes);
|
43
|
+
|
44
|
+
NVF_API TensorView* flatten(
|
45
|
+
TensorView* x,
|
46
|
+
int64_t start_dim = 0,
|
47
|
+
int64_t end_dim = -1);
|
48
|
+
|
49
|
+
//! Squeeze the selected dimensions.
|
50
|
+
//!
|
51
|
+
//! NOTE: This function throws an error when encountering an unsqueezable
|
52
|
+
//! dimension. This behavior differs from PyTorch.
|
53
|
+
NVF_API TensorView* squeeze(
|
54
|
+
TensorView* x,
|
55
|
+
const std::vector<int64_t>& dims,
|
56
|
+
bool squeeze_expanded = false);
|
57
|
+
|
58
|
+
TensorView* squeeze(TensorView* x, std::initializer_list<int64_t> dims);
|
59
|
+
|
60
|
+
//! Squeeze the dimensions corresponding to "true" in to_squeeze, i.e. remove
|
61
|
+
//! those broadcasted dimensions.
|
62
|
+
//!
|
63
|
+
//! NOTE: This function throws an error when encountering an unsqueezable
|
64
|
+
//! dimension. This behavior differs from PyTorch.
|
65
|
+
//!
|
66
|
+
//! If squeeze_expanded is true, then expanded Broadcasts will be removed just
|
67
|
+
//! as if they were not expanded. If squeeze_expanded is false, then it is an
|
68
|
+
//! error for an expanded broadcast to have a corresponding "true" value in
|
69
|
+
//! to_squeeze.
|
70
|
+
NVF_API TensorView* squeeze(
|
71
|
+
TensorView* x,
|
72
|
+
const std::vector<bool>& to_squeeze,
|
73
|
+
bool squeeze_expanded = false);
|
74
|
+
|
75
|
+
NVF_API TensorView* unsqueeze(TensorView* x, int64_t dim);
|
76
|
+
|
77
|
+
//! Permute a tensor as specified by axis mappings.
|
78
|
+
//!
|
79
|
+
//! The transposition mapping is specified with a list of pairs from
|
80
|
+
//! new to old positions. Positions are relative to the noReduction
|
81
|
+
//! domain.
|
82
|
+
//!
|
83
|
+
//! \param x Tensor to transpose
|
84
|
+
//! \param new2old vector mapping from new to old positions.
|
85
|
+
NVF_API TensorView* permute(TensorView* x, const std::vector<int64_t>& new2old);
|
86
|
+
NVF_API TensorView* permute(
|
87
|
+
TensorView* x,
|
88
|
+
const std::initializer_list<int64_t>& new2old);
|
89
|
+
|
90
|
+
//! Same as above, but with the TensorView::reorder-like API.
|
91
|
+
NVF_API TensorView* permute(
|
92
|
+
TensorView* x,
|
93
|
+
const std::unordered_map<int64_t, int64_t>& old2new);
|
94
|
+
NVF_API TensorView* permute(
|
95
|
+
TensorView* x,
|
96
|
+
const std::initializer_list<std::pair<const int64_t, int64_t>>& old2new);
|
97
|
+
|
98
|
+
//! Transpose a tensor by swapping the two dimensions.
|
99
|
+
NVF_API TensorView* transpose(TensorView* x, int64_t dim0, int64_t dim1);
|
100
|
+
|
101
|
+
//! Transpose a 2D tensor.
|
102
|
+
NVF_API TensorView* transpose(TensorView* x);
|
103
|
+
|
104
|
+
//! Pad a tensor by given widths by specified value. Similar to torch.pad, the
|
105
|
+
//! pad_widths vector specifies the padding widths of the innermost N
|
106
|
+
//! dimensions, where N is half the size of the width vector. If value is
|
107
|
+
//! omitted, a default value of zero is assumed. The provied value will be cast
|
108
|
+
//! to the dtype of the argument x.
|
109
|
+
//! TODO: Support other padding types
|
110
|
+
NVF_API TensorView* pad(
|
111
|
+
TensorView* x,
|
112
|
+
const std::vector<Val*>& pad_widths,
|
113
|
+
Val* value = nullptr,
|
114
|
+
std::optional<IterType> iter_type_opt = std::nullopt);
|
115
|
+
|
116
|
+
//! Concatenate tensors in the given dimension
|
117
|
+
//!
|
118
|
+
//! * manual_padding is a flag to skip the pad operation in the cat composite
|
119
|
+
//! operation.
|
120
|
+
NVF_API TensorView* cat(
|
121
|
+
const std::vector<TensorView*>& inputs,
|
122
|
+
int64_t dim,
|
123
|
+
std::optional<IterType> iter_type_opt = std::nullopt,
|
124
|
+
bool manual_padding = false);
|
125
|
+
|
126
|
+
//! Return a tensor where each dimension is sliced as specified by the
|
127
|
+
//! ranges parameter. Stepping must be one at this moment. The semantics of
|
128
|
+
//! slicing with negative values and values >= extent follow those of numpy and
|
129
|
+
//! PyTorch.
|
130
|
+
//!
|
131
|
+
//! * manual_normalization is a flag to skip using the normalize_slice_range
|
132
|
+
//! lambda to normalize the ranges arguments for each tensor dimension.
|
133
|
+
NVF_API TensorView* slice(
|
134
|
+
TensorView* inp,
|
135
|
+
const std::vector<Slice>& ranges,
|
136
|
+
bool manual_normalization = false);
|
137
|
+
|
138
|
+
//! A variant of the above `slice` function. This is closer to the Python API.
|
139
|
+
NVF_API TensorView* slice(
|
140
|
+
TensorView* inp,
|
141
|
+
const std::vector<int64_t>& starts,
|
142
|
+
const std::vector<int64_t>& stops,
|
143
|
+
const std::vector<int64_t>& steps);
|
144
|
+
|
145
|
+
//! Same as above except that `steps` are all 1.
|
146
|
+
NVF_API TensorView* slice(
|
147
|
+
TensorView* inp,
|
148
|
+
const std::vector<int64_t>& starts,
|
149
|
+
const std::vector<int64_t>& stops);
|
150
|
+
|
151
|
+
// Splits `in`'s dimension `dim` into `chunks` chunks. All but the last chunk
|
152
|
+
// will be of size `ceil(dim_size/chunks)`. Unlike `torch.chunk` which returns
|
153
|
+
// only positive-size chunks and therefore may return fewer than `chunks` of
|
154
|
+
// them, this function returns exactly `chunks` chunks and a chunk of negative
|
155
|
+
// size will lead to a concretization error. This difference is because that we
|
156
|
+
// can't precompute the number of positive-size chunks when the dimension size
|
157
|
+
// is symbolic.
|
158
|
+
std::vector<TensorView*> chunk(TensorView* in, int64_t chunks, int64_t dim);
|
159
|
+
|
160
|
+
// Broadcasts inp based on bool vector. Size of broadcast bool vector should be
|
161
|
+
// the number of dims desired in the broadcasted tensor. This vector should be
|
162
|
+
// true if output dim should be a broadcasted dim, and false if it is not a
|
163
|
+
// broadcasted dim. Number of false entires must match the number of input dims.
|
164
|
+
NVF_API TensorView* broadcast(
|
165
|
+
TensorView* inp,
|
166
|
+
const std::vector<bool>& is_broadcast_dim);
|
167
|
+
|
168
|
+
// Expands input based on provided sizes. expand_sizes should be larger than
|
169
|
+
// the input's root domain (really rfactor) and will broadcast on inner
|
170
|
+
// dimensions. expand_sizes should be -1 for any dimension that should remain a
|
171
|
+
// symbolic size. For dimensions that remain broadcast after the expand should
|
172
|
+
// be set to 1, any dimension being expanded must be marked as a broadcast in
|
173
|
+
// the input and will be expanded to the provided constant size. Any dimension
|
174
|
+
// that's symbolic in the input but specified as a non -1 value will be set to
|
175
|
+
// that constant value.
|
176
|
+
NVF_API TensorView* expand(
|
177
|
+
TensorView* inp,
|
178
|
+
const std::vector<Val*>& expanded_sizes);
|
179
|
+
|
180
|
+
// Expands input based on other. For dimensions in inp that are broadcast with a
|
181
|
+
// matching entry in other that's either a broadcast with expanded extent or a
|
182
|
+
// non broadcasted iter domain, inp will be expanded to other's size.
|
183
|
+
NVF_API TensorView* expand_as(TensorView* inp, TensorView* other);
|
184
|
+
|
185
|
+
// Repeat each dimension for a given time. The repeat_times parameter
|
186
|
+
// must have the same number of elements as the dimensionality of the
|
187
|
+
// input tensor (excluding reduction IDs).
|
188
|
+
NVF_API TensorView* repeat(
|
189
|
+
TensorView* inp,
|
190
|
+
const std::vector<int64_t>& repeat_times);
|
191
|
+
|
192
|
+
} // namespace nvfuser
|
@@ -0,0 +1,13 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
#include <ops/alias.h>
|
10
|
+
#include <ops/arith.h>
|
11
|
+
#include <ops/composite.h>
|
12
|
+
#include <ops/indexing.h>
|
13
|
+
#include <ops/normalization.h>
|