nvfuser-cu121-torch25 0.2.25.dev20250201__cp310-cp310-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +20 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,183 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <ATen/core/ivalue.h>
|
11
|
+
#include <exceptions.h>
|
12
|
+
#include <fusion.h>
|
13
|
+
#include <scheduler/pointwise_heuristic.h>
|
14
|
+
#include <scheduler/registry.h>
|
15
|
+
#include <visibility.h>
|
16
|
+
|
17
|
+
namespace nvfuser {
|
18
|
+
|
19
|
+
/*
|
20
|
+
* The 2D pointwise scheduling logic is a bit interesting. We'll start by giving
|
21
|
+
* motivation for what the scheduling is attempting to do. What we're going to
|
22
|
+
* do with the scheduling is attempt to make it two dimensional in a way that
|
23
|
+
* minimizes the refetching of broadcasted dimensions. If we think of the
|
24
|
+
* trivial case:
|
25
|
+
* T0[i0, b1]
|
26
|
+
* T1[b0, i1]
|
27
|
+
* T2[i0, i1] = T0 + T1
|
28
|
+
* If we scheduled T2 as 1-dimensional we would do something along the lines of
|
29
|
+
* merging i0 and i1 then splitting out a block and thread dimension. If i1 is
|
30
|
+
* greater than the thread dimension, then all threads would pull the same value
|
31
|
+
* from T0. However, they would all be pulling different values from T1. In this
|
32
|
+
* case we have perfect reuse of the broadcast dimension T0 but potentially no
|
33
|
+
* reuse of the broadcast dimension of T1. "Potentially" because if i1 isn't too
|
34
|
+
* big it should be efficiently cached in L2. If i1 is big, then by the time we
|
35
|
+
* increment the i0 dimension the i1 dimension will be pushed out of cache.
|
36
|
+
*
|
37
|
+
* Instead what we do is we map this to a two dimensional problem. Instead of
|
38
|
+
* having the schedule that merges the two dimensions, we'll actually leave the
|
39
|
+
* dimensions separate and we'll take i0, split it to BIDy, TIDy, and take i1
|
40
|
+
* and split it to BIDx and TIDx. Therefore we'll have a parallelization on T2
|
41
|
+
* like [BIDy, TIDy | BIDx, TIDx], where | denotes the separation of the
|
42
|
+
* original i0 and i1. This helps because all threads in the TIDx dimension will
|
43
|
+
* reuse the same value in the i0 dimension (holding BIDy and TIDy constant),
|
44
|
+
* all the threads in the TIDy dimension (holding BIDx, and TIDx constant) will
|
45
|
+
* reuse the same value in the i1 dimension. This reuse of values reduces the
|
46
|
+
* number of redundant values pulled from T0 and T1. The same thing can be said
|
47
|
+
* for when incrementing BIDy, but since BIDy is strided on BIDx there's no
|
48
|
+
* effective increment of BIDy without incrementing BIDx. Since all threads are
|
49
|
+
* executed within a block we can effectively consider the block incrementing
|
50
|
+
* TIDx BDIMx times while holding TIDy constant and incrementing TIDy BDIMy
|
51
|
+
* times while holding TIDx constant. Since multiple BIDx's are running at the
|
52
|
+
* same time on the device we can consider a wave on the GPU of incrementing
|
53
|
+
* BIDx (wave number of times), while holding TIDy constant BDIMy * wave number
|
54
|
+
* of times.
|
55
|
+
*
|
56
|
+
* If instead we have a situation like:
|
57
|
+
* T0[i0, i1, b2]
|
58
|
+
* T1[i0, b1, i2]
|
59
|
+
* T2[i0, i1, i2] = T0 + T1
|
60
|
+
* It makes sense that the break point would be in position 2, between i1 and
|
61
|
+
* i2. This is because when we map [i0, i1 | i2] to [BIDy, TIDy| BIDx, TIDx]
|
62
|
+
* BIDx, and TIDx will access the same elements of T0 on b2, and TIDy will
|
63
|
+
* likely access the same elements of T1 (as long as i1 > BDIMy). Even if i1 on
|
64
|
+
* the order of BDIMy we'll only access ~two unique elements per increment of
|
65
|
+
* BIDx or TIDx. This means we'll still reuse many of the same values and limit
|
66
|
+
* the amount we need to read duplicate values in T0 and T1.
|
67
|
+
*
|
68
|
+
* If instead we have:
|
69
|
+
* T0[i0, b1, i2]
|
70
|
+
* T1[b0, i1, i2]
|
71
|
+
* T2[i0, i1, i2] = T0 + T1
|
72
|
+
* The analysis gets a bit more complicated. First if i2 is very large and i0
|
73
|
+
* and i1 are relatively small it would make sense to have [i0, i1 | i2]. If b0
|
74
|
+
* is very small it's unlikely beneficial to have [i0 | i1, i2] as there would
|
75
|
+
* be small reuse on b0, and potentially no reuse on b1. If i2 is very small it
|
76
|
+
* may be worthwhile to have [i0 | i1, i2]. If i1 and i2 are not small, and
|
77
|
+
* their product is relatively large (i.e. you can't fit T2[i, :, :] in L2) then
|
78
|
+
* it's unlikely we'll get any significant reuse across i0.
|
79
|
+
*
|
80
|
+
* What we should (but don't due to complexity) assume then, is that we will get
|
81
|
+
* strong reuse across TIDx and TIDy for dimensions that are on the inner
|
82
|
+
* portion of the 2D tile.
|
83
|
+
*
|
84
|
+
* For example if we have:
|
85
|
+
* T0[i0, b1, i2]
|
86
|
+
* T1[b0, b1, i2]
|
87
|
+
* T2[b0, i1, i2]
|
88
|
+
* T3[i0, i1, i2] = T0 + T1 + T2
|
89
|
+
* We may want to break point at position 1 or position 2 (i.e. [i0 | i1, i2] or
|
90
|
+
* [i0, i1 | i2]). We can't immediately tell from the structure.
|
91
|
+
*
|
92
|
+
* If we choose [i0, i1 | i2] then we'll get:
|
93
|
+
* Strong reuse of T0 on TIDy (b1 dim)
|
94
|
+
* Perfect reuse across T1 on TIDy (b0 and b1)
|
95
|
+
* If BIDx is bound to the LHS of the tile we'll get:
|
96
|
+
* Maybe strong reuse of T0 on BIDx (b1 dim if it's large)
|
97
|
+
* Perfect reuse across T1 on BIDx
|
98
|
+
* Potentially no reuse on T2 if i1 is very large
|
99
|
+
*
|
100
|
+
* If we pick [i0 | i1, i2], then we'll get:
|
101
|
+
* We'll perfect reuse across TIDy on T1 and T2 on b0
|
102
|
+
* Some reuse on T0 and T1 on b1 across BIDx if i2 is relatively small and BIDx
|
103
|
+
* is bound to the RHS of the 2D schedule Perfect reuse on T1 and T2 on b0
|
104
|
+
* across BIDx if BIDx is bound to the LHS of the 2D schedule
|
105
|
+
*
|
106
|
+
* Materializing these benefits is dependent on the decisions the scheduler
|
107
|
+
* makes when parallelizing the problem. The heuristics logic at the moment is
|
108
|
+
* fairly simplistic where it assumes that there's only reuse across the break
|
109
|
+
* points for tensors that have no iteration domain on the entire side of the
|
110
|
+
* breakpoint. This is not optimal but for the time being it seems sufficient.
|
111
|
+
* We would ideally take into consideration the parallelization scheme and
|
112
|
+
* partial broadcasting on the lhs or rhs.
|
113
|
+
*
|
114
|
+
* An example of how this analysis is done is given the DAG:
|
115
|
+
* T0[i0, i1, b2] float
|
116
|
+
* T1[i0, b1, i2] half
|
117
|
+
* T2[i0, b1, i2] = cast(T1, float)
|
118
|
+
* T4[i0, i1, i2] float = T0 + T2
|
119
|
+
* With values of 10, 100, 1000 as [i0, i1, i2]
|
120
|
+
* Our break point analysis for positions 0, 1, 2, 3 will be:
|
121
|
+
*
|
122
|
+
* 0: 10*10 * 100*10 * 1000*10 = 1e9
|
123
|
+
* 1: 10*10 * 100*10 * 1000*10 = 1e9
|
124
|
+
* 2: 10*10 * 100*10 * 1000*6 = 6e8
|
125
|
+
* 3: 10*10 * 100*10 * 1000*10 = 1e9
|
126
|
+
*
|
127
|
+
* Where for each computation the LHS of the * pairs is the number of elements
|
128
|
+
* in that dimension on the reference and the RHS of the * pairs is the
|
129
|
+
* broadcast multiple where any tensor that has all broadcasts on the rhs or lhs
|
130
|
+
* of the break point doesn't contribute to the broadcast multiple of the rhs or
|
131
|
+
* lhs.
|
132
|
+
*
|
133
|
+
* So we'll pick position 2 since we're confident we can get broadcast reuse on
|
134
|
+
* the rhs of tensor 0. As already mentioned this is a pretty big
|
135
|
+
* simplification/assumption and in reality it may be harder/easier to take
|
136
|
+
* advantage of broadcast on the inner or outer dimension. This is a reasonable
|
137
|
+
* way to make relative decisions on break points, however, this computation is
|
138
|
+
* ont doing an effective estimate of actual DRAM transfers which it should be
|
139
|
+
* modified to do so.
|
140
|
+
*
|
141
|
+
* For view schedules there can be some incoherent break points for example:
|
142
|
+
* T1[i0, i1*i2] = view(T0[i0, i1, i2])
|
143
|
+
* would make the position 2 "incoherent". In otherwords we cannot replay
|
144
|
+
* through the view a schedule that tries to merge i0 and i1, without i2. So for
|
145
|
+
* positions that are incoherent we won't consider break point positions there.
|
146
|
+
*
|
147
|
+
* See FusionBroadcastViewMultiples_CUDA for what we expect with view handling.
|
148
|
+
* Shortly any dimensions that are inputs or outputs of view transformations are
|
149
|
+
* considered together, since it's hard to account for partial dimensions that
|
150
|
+
* are being broadcasted. So for view it's primarily an all or nothing situation
|
151
|
+
* when it comes to the 2D pointwise scheduler.
|
152
|
+
*
|
153
|
+
* DID axes, which are not allocated, are ignored in the analysis.
|
154
|
+
* Specifically, two fusions that only differ by DID axes result in
|
155
|
+
* the same scheduling decisions.
|
156
|
+
*/
|
157
|
+
|
158
|
+
class SchedulerRuntimeInfo;
|
159
|
+
class HeuristicDataCache;
|
160
|
+
|
161
|
+
class PointWiseScheduler : public SchedulerEntry {
|
162
|
+
public:
|
163
|
+
bool canScheduleCompileTime(Fusion* fusion) override;
|
164
|
+
bool canScheduleRunTime(
|
165
|
+
Fusion* fusion,
|
166
|
+
SchedulerRuntimeInfo& runtime_info,
|
167
|
+
HeuristicDataCache* data_cache = nullptr) override {
|
168
|
+
return true;
|
169
|
+
}
|
170
|
+
|
171
|
+
std::unique_ptr<HeuristicParams> computeHeuristics(
|
172
|
+
Fusion* fusion,
|
173
|
+
SchedulerRuntimeInfo& runtime_info,
|
174
|
+
HeuristicDataCache* data_cache) override;
|
175
|
+
|
176
|
+
void schedule(Fusion* fusion, const HeuristicParams* params) override;
|
177
|
+
|
178
|
+
constexpr static SchedulerType schedulerType() {
|
179
|
+
return SchedulerType::PointWise;
|
180
|
+
}
|
181
|
+
};
|
182
|
+
|
183
|
+
} // namespace nvfuser
|
@@ -0,0 +1,118 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <scheduler/heuristic.h>
|
11
|
+
|
12
|
+
#include <sstream>
|
13
|
+
|
14
|
+
namespace nvfuser {
|
15
|
+
|
16
|
+
// Parameters of the pointwise heuristic to describe the optimial schedule.
|
17
|
+
// Warning: equal operator is intended for use in caching the kernel associated
|
18
|
+
// with these pointwise parameters. It does not check if the launch parameters
|
19
|
+
// are equivelent!
|
20
|
+
class PointwiseParams : public HeuristicParams {
|
21
|
+
public:
|
22
|
+
PointwiseParams() : HeuristicParams(SchedulerType::PointWise) {};
|
23
|
+
|
24
|
+
// Treat pointwise operation as 2-Dimensional, this is the location where we
|
25
|
+
// split from left side of the domain to right. i.e. 0 means problem is
|
26
|
+
// treated as 1-D, 1 of 3 would mean we treat the first dimension as the outer
|
27
|
+
// dimension, and all the others as an inner dimension.
|
28
|
+
int64_t break_point = 0;
|
29
|
+
|
30
|
+
// Split block across left and right dimension
|
31
|
+
bool split_block = false;
|
32
|
+
|
33
|
+
// Split grid y dimension, if otherwise it would be too large
|
34
|
+
bool split_grid_y_dim = false;
|
35
|
+
|
36
|
+
// For many instances having BIDx on the inner most dimension is the most
|
37
|
+
// performant parallel binding. However, if we're broadcasting the outer
|
38
|
+
// dimension with a large inner dimension, it can be more performant to bind
|
39
|
+
// BIDy on the inner most dimension.
|
40
|
+
bool flip_grid_binding = false;
|
41
|
+
|
42
|
+
// vectorization factor
|
43
|
+
int64_t vectorization_factor = 1;
|
44
|
+
|
45
|
+
// Unroll on top of vectorization
|
46
|
+
// In the 2D scheduler, unroll the outer dimension to reuse loaded data across
|
47
|
+
// rows, reducing loaded bytes by the unroll factor.
|
48
|
+
// Always equals 1 for 1D scheduler.
|
49
|
+
int64_t unroll_factor_outer = 1;
|
50
|
+
|
51
|
+
// In the 2D scheduler, unroll the inner dimension to reuse loaded data across
|
52
|
+
// cols, reducing loaded bytes by the unroll factor.
|
53
|
+
// Also used in 1D scheduler.
|
54
|
+
int64_t unroll_factor_inner = 1;
|
55
|
+
|
56
|
+
using HeuristicParams::HeuristicParams;
|
57
|
+
|
58
|
+
// Warning: Does not check launch parameters!
|
59
|
+
bool sameAs(const HeuristicParams* other_base) const override {
|
60
|
+
auto other = dynamic_cast<const PointwiseParams*>(other_base);
|
61
|
+
if (other == nullptr) {
|
62
|
+
return false;
|
63
|
+
}
|
64
|
+
bool attr_equal = other->cparams == cparams &&
|
65
|
+
other->vectorization_factor == vectorization_factor &&
|
66
|
+
other->break_point == break_point &&
|
67
|
+
other->split_block == split_block &&
|
68
|
+
other->split_grid_y_dim == split_grid_y_dim &&
|
69
|
+
other->unroll_factor_outer == unroll_factor_outer &&
|
70
|
+
other->unroll_factor_inner == unroll_factor_inner &&
|
71
|
+
other->flip_grid_binding == flip_grid_binding;
|
72
|
+
return attr_equal;
|
73
|
+
}
|
74
|
+
|
75
|
+
std::string toString() const override {
|
76
|
+
std::stringstream ss;
|
77
|
+
ss << "\n===== Pointwise Parameters ========\n"
|
78
|
+
<< (tag.empty() ? "" : "Tag: ") << tag << " Pointwise Characteristics:\n"
|
79
|
+
<< " Gridx: " << lparams.gdimx() << " BlckY: " << lparams.bdimy()
|
80
|
+
<< " BlckX: " << lparams.bdimx() << "\n";
|
81
|
+
if (break_point) {
|
82
|
+
ss << "2D Schedule\n"
|
83
|
+
<< " Bcast break point: " << break_point << "\n";
|
84
|
+
if (split_block) {
|
85
|
+
ss << "Split block into y-dim\n";
|
86
|
+
}
|
87
|
+
if (split_grid_y_dim) {
|
88
|
+
ss << " Split y grid dim\n";
|
89
|
+
}
|
90
|
+
}
|
91
|
+
ss << "vectorization_factor: " << vectorization_factor << "\n";
|
92
|
+
ss << "unroll_factor_outer: " << unroll_factor_outer << "\n";
|
93
|
+
ss << "unroll_factor_inner: " << unroll_factor_inner << "\n";
|
94
|
+
if (flip_grid_binding) {
|
95
|
+
ss << "Flip BIDx/BIDy bindings\n";
|
96
|
+
}
|
97
|
+
ss << "====================================\n";
|
98
|
+
return ss.str();
|
99
|
+
}
|
100
|
+
|
101
|
+
// Warning: Hash is not based on launch parameters!
|
102
|
+
size_t hash() const override {
|
103
|
+
size_t attr_hash = static_cast<size_t>(vectorization_factor) ^
|
104
|
+
static_cast<size_t>(break_point) << 4 ^
|
105
|
+
static_cast<size_t>(split_block) << 5 ^
|
106
|
+
static_cast<size_t>(split_grid_y_dim) << 6 ^
|
107
|
+
static_cast<size_t>(unroll_factor_outer) << 7 ^
|
108
|
+
static_cast<size_t>(unroll_factor_inner) << 9 ^
|
109
|
+
static_cast<size_t>(flip_grid_binding) << 10;
|
110
|
+
return attr_hash;
|
111
|
+
}
|
112
|
+
|
113
|
+
std::unique_ptr<HeuristicParams> clone() const override {
|
114
|
+
return std::make_unique<PointwiseParams>(*this);
|
115
|
+
}
|
116
|
+
};
|
117
|
+
|
118
|
+
} // namespace nvfuser
|
@@ -0,0 +1,24 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <compute_at_map.h>
|
11
|
+
#include <exceptions.h>
|
12
|
+
#include <ir/all_nodes.h>
|
13
|
+
#include <ir/utils.h>
|
14
|
+
#include <scheduler/tools/domain_map.h>
|
15
|
+
#include <scheduler/utils.h>
|
16
|
+
|
17
|
+
namespace nvfuser {
|
18
|
+
namespace pointwise_utils {
|
19
|
+
|
20
|
+
// Return reference tensor view.
|
21
|
+
TensorView* getReferenceTensor(Fusion* fusion);
|
22
|
+
|
23
|
+
} // namespace pointwise_utils
|
24
|
+
} // namespace nvfuser
|
@@ -0,0 +1,43 @@
|
|
1
|
+
// clang-format off
|
2
|
+
/*
|
3
|
+
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
|
4
|
+
* All rights reserved.
|
5
|
+
* SPDX-License-Identifier: BSD-3-Clause
|
6
|
+
*/
|
7
|
+
// clang-format on
|
8
|
+
#pragma once
|
9
|
+
|
10
|
+
#include <ATen/core/ivalue.h>
|
11
|
+
#include <exceptions.h>
|
12
|
+
#include <fusion.h>
|
13
|
+
#include <scheduler/reduction_heuristic.h>
|
14
|
+
#include <scheduler/registry.h>
|
15
|
+
#include <visibility.h>
|
16
|
+
|
17
|
+
namespace nvfuser {
|
18
|
+
|
19
|
+
class SchedulerRuntimeInfo;
|
20
|
+
class HeuristicDataCache;
|
21
|
+
|
22
|
+
class ReductionScheduler : public SchedulerEntry {
|
23
|
+
public:
|
24
|
+
bool canScheduleCompileTime(Fusion* fusion) override;
|
25
|
+
|
26
|
+
bool canScheduleRunTime(
|
27
|
+
Fusion* fusion,
|
28
|
+
SchedulerRuntimeInfo& runtime_info,
|
29
|
+
HeuristicDataCache* data_cache = nullptr) override;
|
30
|
+
|
31
|
+
std::unique_ptr<HeuristicParams> computeHeuristics(
|
32
|
+
Fusion* fusion,
|
33
|
+
SchedulerRuntimeInfo& runtime_info,
|
34
|
+
HeuristicDataCache* data_cache) override;
|
35
|
+
|
36
|
+
void schedule(Fusion* fusion, const HeuristicParams* params) override;
|
37
|
+
|
38
|
+
constexpr static SchedulerType schedulerType() {
|
39
|
+
return SchedulerType::Reduction;
|
40
|
+
}
|
41
|
+
};
|
42
|
+
|
43
|
+
} // namespace nvfuser
|