nvfuser-cu121-torch25 0.2.25.dev20250201__cp312-cp312-manylinux_2_28_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- nvfuser/_C.cpython-312-x86_64-linux-gnu.so +0 -0
- nvfuser/__init__.py +618 -0
- nvfuser/__init__.pyi +4 -0
- nvfuser/contrib/__init__.py +9 -0
- nvfuser/contrib/nn/__init__.py +13 -0
- nvfuser/contrib/nn/normalization.py +725 -0
- nvfuser/include/nvfuser/alias_analysis.h +116 -0
- nvfuser/include/nvfuser/bfs.h +929 -0
- nvfuser/include/nvfuser/codegen.h +26 -0
- nvfuser/include/nvfuser/compute_at.h +28 -0
- nvfuser/include/nvfuser/compute_at_map.h +394 -0
- nvfuser/include/nvfuser/contiguity.h +351 -0
- nvfuser/include/nvfuser/cuda_utils.h +50 -0
- nvfuser/include/nvfuser/debug.h +50 -0
- nvfuser/include/nvfuser/device_lower/analysis/bank_conflict.h +53 -0
- nvfuser/include/nvfuser/device_lower/analysis/circular_buffer.h +109 -0
- nvfuser/include/nvfuser/device_lower/analysis/device_version.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/divisible_split.h +28 -0
- nvfuser/include/nvfuser/device_lower/analysis/fused_reduction.h +36 -0
- nvfuser/include/nvfuser/device_lower/analysis/index_compute.h +322 -0
- nvfuser/include/nvfuser/device_lower/analysis/predicate_elimination.h +71 -0
- nvfuser/include/nvfuser/device_lower/analysis/sync_information.h +47 -0
- nvfuser/include/nvfuser/device_lower/analysis/tensor_memory.h +65 -0
- nvfuser/include/nvfuser/device_lower/analysis/thread_predicate.h +158 -0
- nvfuser/include/nvfuser/device_lower/analysis/tma.h +93 -0
- nvfuser/include/nvfuser/device_lower/analysis/trivial_broadcast.h +75 -0
- nvfuser/include/nvfuser/device_lower/id_model_options.h +135 -0
- nvfuser/include/nvfuser/device_lower/lower2device.h +391 -0
- nvfuser/include/nvfuser/device_lower/pass/alias_memory.h +37 -0
- nvfuser/include/nvfuser/device_lower/pass/allocation.h +32 -0
- nvfuser/include/nvfuser/device_lower/pass/circular_buffer.h +191 -0
- nvfuser/include/nvfuser/device_lower/pass/expr_sort.h +17 -0
- nvfuser/include/nvfuser/device_lower/pass/fusion_simplifier.h +21 -0
- nvfuser/include/nvfuser/device_lower/pass/grid_serialization.h +26 -0
- nvfuser/include/nvfuser/device_lower/pass/index.h +200 -0
- nvfuser/include/nvfuser/device_lower/pass/inline_ptx.h +16 -0
- nvfuser/include/nvfuser/device_lower/pass/insert_syncs.h +39 -0
- nvfuser/include/nvfuser/device_lower/pass/instrument.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/loop_rotation.h +150 -0
- nvfuser/include/nvfuser/device_lower/pass/loops.h +68 -0
- nvfuser/include/nvfuser/device_lower/pass/magic_zero.h +86 -0
- nvfuser/include/nvfuser/device_lower/pass/misaligned_vectorization.h +118 -0
- nvfuser/include/nvfuser/device_lower/pass/predicate.h +23 -0
- nvfuser/include/nvfuser/device_lower/pass/replace_size.h +24 -0
- nvfuser/include/nvfuser/device_lower/pass/scalar_hoist.h +115 -0
- nvfuser/include/nvfuser/device_lower/pass/unroll.h +98 -0
- nvfuser/include/nvfuser/device_lower/pass/vectorize_welford.h +45 -0
- nvfuser/include/nvfuser/device_lower/pass/warp_reduce.h +23 -0
- nvfuser/include/nvfuser/device_lower/utils.h +382 -0
- nvfuser/include/nvfuser/device_lower/validation.h +74 -0
- nvfuser/include/nvfuser/disjoint_set.h +556 -0
- nvfuser/include/nvfuser/dispatch.h +334 -0
- nvfuser/include/nvfuser/driver_api.h +49 -0
- nvfuser/include/nvfuser/dynamic_transform.h +316 -0
- nvfuser/include/nvfuser/dynamic_type/C++20/type_traits +37 -0
- nvfuser/include/nvfuser/dynamic_type/dynamic_type.h +969 -0
- nvfuser/include/nvfuser/dynamic_type/error.h +24 -0
- nvfuser/include/nvfuser/dynamic_type/type_traits.h +703 -0
- nvfuser/include/nvfuser/evaluator_common.h +295 -0
- nvfuser/include/nvfuser/exceptions.h +283 -0
- nvfuser/include/nvfuser/expr_evaluator.h +125 -0
- nvfuser/include/nvfuser/expr_simplifier.h +218 -0
- nvfuser/include/nvfuser/flatbuffers/allocator.h +68 -0
- nvfuser/include/nvfuser/flatbuffers/array.h +253 -0
- nvfuser/include/nvfuser/flatbuffers/base.h +486 -0
- nvfuser/include/nvfuser/flatbuffers/buffer.h +154 -0
- nvfuser/include/nvfuser/flatbuffers/buffer_ref.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/code_generator.h +80 -0
- nvfuser/include/nvfuser/flatbuffers/code_generators.h +234 -0
- nvfuser/include/nvfuser/flatbuffers/default_allocator.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/detached_buffer.h +114 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffer_builder.h +1225 -0
- nvfuser/include/nvfuser/flatbuffers/flatbuffers.h +272 -0
- nvfuser/include/nvfuser/flatbuffers/flatc.h +130 -0
- nvfuser/include/nvfuser/flatbuffers/flex_flat_util.h +36 -0
- nvfuser/include/nvfuser/flatbuffers/flexbuffers.h +1889 -0
- nvfuser/include/nvfuser/flatbuffers/grpc.h +300 -0
- nvfuser/include/nvfuser/flatbuffers/hash.h +127 -0
- nvfuser/include/nvfuser/flatbuffers/idl.h +1359 -0
- nvfuser/include/nvfuser/flatbuffers/minireflect.h +420 -0
- nvfuser/include/nvfuser/flatbuffers/reflection.h +522 -0
- nvfuser/include/nvfuser/flatbuffers/reflection_generated.h +1471 -0
- nvfuser/include/nvfuser/flatbuffers/registry.h +128 -0
- nvfuser/include/nvfuser/flatbuffers/stl_emulation.h +513 -0
- nvfuser/include/nvfuser/flatbuffers/string.h +64 -0
- nvfuser/include/nvfuser/flatbuffers/struct.h +53 -0
- nvfuser/include/nvfuser/flatbuffers/table.h +168 -0
- nvfuser/include/nvfuser/flatbuffers/util.h +731 -0
- nvfuser/include/nvfuser/flatbuffers/vector.h +393 -0
- nvfuser/include/nvfuser/flatbuffers/vector_downward.h +273 -0
- nvfuser/include/nvfuser/flatbuffers/verifier.h +317 -0
- nvfuser/include/nvfuser/fusion.h +511 -0
- nvfuser/include/nvfuser/fusion_guard.h +37 -0
- nvfuser/include/nvfuser/fusion_profiler.h +311 -0
- nvfuser/include/nvfuser/fusion_segmenter.h +751 -0
- nvfuser/include/nvfuser/global_allocator.h +27 -0
- nvfuser/include/nvfuser/grouped_reduction.h +47 -0
- nvfuser/include/nvfuser/host_ir/container.h +60 -0
- nvfuser/include/nvfuser/host_ir/executor.h +152 -0
- nvfuser/include/nvfuser/host_ir/host_ir.h +320 -0
- nvfuser/include/nvfuser/host_ir/lower.h +35 -0
- nvfuser/include/nvfuser/id_model/circular_buffer_indexing.h +56 -0
- nvfuser/include/nvfuser/id_model/contiguity.h +166 -0
- nvfuser/include/nvfuser/id_model/id_model.h +359 -0
- nvfuser/include/nvfuser/id_model/id_model_index_compute.h +81 -0
- nvfuser/include/nvfuser/id_model/indexing.h +208 -0
- nvfuser/include/nvfuser/id_model/indexing_traversal.h +72 -0
- nvfuser/include/nvfuser/id_model/indexing_utils.h +62 -0
- nvfuser/include/nvfuser/id_model/loop_promotion.h +180 -0
- nvfuser/include/nvfuser/id_model/predicate_indexing.h +104 -0
- nvfuser/include/nvfuser/id_model/schedule.h +54 -0
- nvfuser/include/nvfuser/id_model/to_string.h +87 -0
- nvfuser/include/nvfuser/id_model/transform_replay.h +58 -0
- nvfuser/include/nvfuser/id_model/utils.h +176 -0
- nvfuser/include/nvfuser/id_model/validation_utils.h +55 -0
- nvfuser/include/nvfuser/index_compute.h +651 -0
- nvfuser/include/nvfuser/instrumentation.h +107 -0
- nvfuser/include/nvfuser/ir/all_nodes.h +14 -0
- nvfuser/include/nvfuser/ir/base_nodes.h +687 -0
- nvfuser/include/nvfuser/ir/builder.h +215 -0
- nvfuser/include/nvfuser/ir/builder_passkey.h +29 -0
- nvfuser/include/nvfuser/ir/cloner.h +185 -0
- nvfuser/include/nvfuser/ir/container.h +226 -0
- nvfuser/include/nvfuser/ir/graphviz.h +119 -0
- nvfuser/include/nvfuser/ir/interface_nodes.h +957 -0
- nvfuser/include/nvfuser/ir/internal_base_nodes.h +744 -0
- nvfuser/include/nvfuser/ir/internal_nodes.h +2792 -0
- nvfuser/include/nvfuser/ir/iostream.h +98 -0
- nvfuser/include/nvfuser/ir/printer.h +57 -0
- nvfuser/include/nvfuser/ir/utils.h +801 -0
- nvfuser/include/nvfuser/iter_visitor.h +661 -0
- nvfuser/include/nvfuser/kernel.h +299 -0
- nvfuser/include/nvfuser/kernel_db/kernel_db.h +109 -0
- nvfuser/include/nvfuser/kernel_db/utils.h +37 -0
- nvfuser/include/nvfuser/kernel_ir.h +1457 -0
- nvfuser/include/nvfuser/kernel_ir_dispatch.h +147 -0
- nvfuser/include/nvfuser/linked_hash_map.h +97 -0
- nvfuser/include/nvfuser/logical_domain_map.h +577 -0
- nvfuser/include/nvfuser/macros.h +23 -0
- nvfuser/include/nvfuser/mma_type.h +257 -0
- nvfuser/include/nvfuser/multidevice/c10d_mock.h +175 -0
- nvfuser/include/nvfuser/multidevice/communication.h +232 -0
- nvfuser/include/nvfuser/multidevice/communicator.h +179 -0
- nvfuser/include/nvfuser/multidevice/device_mesh.h +95 -0
- nvfuser/include/nvfuser/multidevice/executor.h +107 -0
- nvfuser/include/nvfuser/multidevice/multidevice.h +18 -0
- nvfuser/include/nvfuser/multidevice/utils.h +187 -0
- nvfuser/include/nvfuser/non_divisible_split.h +86 -0
- nvfuser/include/nvfuser/opaque_type.h +129 -0
- nvfuser/include/nvfuser/ops/alias.h +192 -0
- nvfuser/include/nvfuser/ops/all_ops.h +13 -0
- nvfuser/include/nvfuser/ops/arith.h +712 -0
- nvfuser/include/nvfuser/ops/composite.h +130 -0
- nvfuser/include/nvfuser/ops/indexing.h +55 -0
- nvfuser/include/nvfuser/ops/normalization.h +263 -0
- nvfuser/include/nvfuser/ops/utils.h +127 -0
- nvfuser/include/nvfuser/options.h +313 -0
- nvfuser/include/nvfuser/parallel_dimension_map.h +95 -0
- nvfuser/include/nvfuser/parallel_type_bitmap.h +365 -0
- nvfuser/include/nvfuser/polymorphic_value.h +432 -0
- nvfuser/include/nvfuser/predicate_compute.h +213 -0
- nvfuser/include/nvfuser/python_frontend/distributed_tensor.h +50 -0
- nvfuser/include/nvfuser/python_frontend/fusion_cache.h +298 -0
- nvfuser/include/nvfuser/python_frontend/fusion_definition.h +372 -0
- nvfuser/include/nvfuser/python_frontend/fusion_record.h +3124 -0
- nvfuser/include/nvfuser/python_frontend/fusion_state.h +143 -0
- nvfuser/include/nvfuser/python_frontend/python_bindings.h +27 -0
- nvfuser/include/nvfuser/python_frontend/segmentation.h +246 -0
- nvfuser/include/nvfuser/python_frontend/translation.h +20 -0
- nvfuser/include/nvfuser/python_frontend/translation_utils.h +308 -0
- nvfuser/include/nvfuser/scheduler/all_schedulers.h +17 -0
- nvfuser/include/nvfuser/scheduler/ampere_multi_matmul.h +206 -0
- nvfuser/include/nvfuser/scheduler/cache_policy_refiner.h +19 -0
- nvfuser/include/nvfuser/scheduler/compile_time_info.h +322 -0
- nvfuser/include/nvfuser/scheduler/debug_utils.h +68 -0
- nvfuser/include/nvfuser/scheduler/expr_eval_sched.h +45 -0
- nvfuser/include/nvfuser/scheduler/heuristic.h +113 -0
- nvfuser/include/nvfuser/scheduler/hopper_multi_matmul.h +204 -0
- nvfuser/include/nvfuser/scheduler/mark_aliases.h +19 -0
- nvfuser/include/nvfuser/scheduler/matmul.h +40 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic.h +293 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin.h +65 -0
- nvfuser/include/nvfuser/scheduler/matmul_heuristic_plugin_api.h +99 -0
- nvfuser/include/nvfuser/scheduler/matmul_utils.h +54 -0
- nvfuser/include/nvfuser/scheduler/mma_utils.h +500 -0
- nvfuser/include/nvfuser/scheduler/multi_matmul.h +74 -0
- nvfuser/include/nvfuser/scheduler/no_op.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner.h +49 -0
- nvfuser/include/nvfuser/scheduler/normalization_inner_outer.h +51 -0
- nvfuser/include/nvfuser/scheduler/normalization_outer.h +48 -0
- nvfuser/include/nvfuser/scheduler/normalization_utils.h +379 -0
- nvfuser/include/nvfuser/scheduler/pointwise.h +183 -0
- nvfuser/include/nvfuser/scheduler/pointwise_heuristic.h +118 -0
- nvfuser/include/nvfuser/scheduler/pointwise_utils.h +24 -0
- nvfuser/include/nvfuser/scheduler/reduction.h +43 -0
- nvfuser/include/nvfuser/scheduler/reduction_heuristic.h +339 -0
- nvfuser/include/nvfuser/scheduler/reduction_utils.h +159 -0
- nvfuser/include/nvfuser/scheduler/registry.h +97 -0
- nvfuser/include/nvfuser/scheduler/registry_utils.h +111 -0
- nvfuser/include/nvfuser/scheduler/resize.h +41 -0
- nvfuser/include/nvfuser/scheduler/resize_heuristic.h +67 -0
- nvfuser/include/nvfuser/scheduler/runtime_info.h +166 -0
- nvfuser/include/nvfuser/scheduler/scheduler_types.h +80 -0
- nvfuser/include/nvfuser/scheduler/transpose.h +114 -0
- nvfuser/include/nvfuser/scheduler/transpose_heuristic.h +164 -0
- nvfuser/include/nvfuser/scheduler/utils.h +771 -0
- nvfuser/include/nvfuser/scheduler/vectorize_helper.h +349 -0
- nvfuser/include/nvfuser/serde/factory.h +55 -0
- nvfuser/include/nvfuser/serde/fusion_cache_generated.h +4319 -0
- nvfuser/include/nvfuser/serde/fusion_record.h +124 -0
- nvfuser/include/nvfuser/serde/polymorphic_value.h +52 -0
- nvfuser/include/nvfuser/serde/utils.h +34 -0
- nvfuser/include/nvfuser/struct.inl +127 -0
- nvfuser/include/nvfuser/swizzle.h +54 -0
- nvfuser/include/nvfuser/sys_utils.h +40 -0
- nvfuser/include/nvfuser/tensor_metadata.h +118 -0
- nvfuser/include/nvfuser/tma.h +124 -0
- nvfuser/include/nvfuser/transform_iter.h +522 -0
- nvfuser/include/nvfuser/transform_replay.h +297 -0
- nvfuser/include/nvfuser/transform_rfactor.h +33 -0
- nvfuser/include/nvfuser/transform_view.h +136 -0
- nvfuser/include/nvfuser/type.h +1125 -0
- nvfuser/include/nvfuser/type_promotion.h +61 -0
- nvfuser/include/nvfuser/utils.h +619 -0
- nvfuser/include/nvfuser/val_graph.h +446 -0
- nvfuser/include/nvfuser/val_graph_visitor.h +259 -0
- nvfuser/include/nvfuser/validator_utils.h +92 -0
- nvfuser/include/nvfuser/vectorization_info.h +31 -0
- nvfuser/include/nvfuser/visibility.h +21 -0
- nvfuser/lib/libnvfuser_codegen.so +0 -0
- nvfuser/nvfuser_version.py +69 -0
- nvfuser/pytorch_utils.py +184 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig-release.cmake +20 -0
- nvfuser/share/cmake/nvfuser/NvfuserConfig.cmake +106 -0
- nvfuser/utils.py +18 -0
- nvfuser/version.py +1 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/LICENSE +976 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/METADATA +16 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/RECORD +242 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/WHEEL +5 -0
- nvfuser_cu121_torch25-0.2.25.dev20250201.dist-info/top_level.txt +1 -0
- nvfuser_cu121_torch25.libs/libnvToolsExt-847d78f2.so.1.0.0 +0 -0
@@ -0,0 +1,393 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright 2021 Google Inc. All rights reserved.
|
3
|
+
*
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
* you may not use this file except in compliance with the License.
|
6
|
+
* You may obtain a copy of the License at
|
7
|
+
*
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
*
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
* See the License for the specific language governing permissions and
|
14
|
+
* limitations under the License.
|
15
|
+
*/
|
16
|
+
|
17
|
+
#ifndef FLATBUFFERS_VECTOR_H_
|
18
|
+
#define FLATBUFFERS_VECTOR_H_
|
19
|
+
|
20
|
+
#include "flatbuffers/base.h"
|
21
|
+
#include "flatbuffers/buffer.h"
|
22
|
+
#include "flatbuffers/stl_emulation.h"
|
23
|
+
|
24
|
+
namespace flatbuffers {
|
25
|
+
|
26
|
+
struct String;
|
27
|
+
|
28
|
+
// An STL compatible iterator implementation for Vector below, effectively
|
29
|
+
// calling Get() for every element.
|
30
|
+
template<typename T, typename IT, typename Data = uint8_t *>
|
31
|
+
struct VectorIterator {
|
32
|
+
typedef std::random_access_iterator_tag iterator_category;
|
33
|
+
typedef IT value_type;
|
34
|
+
typedef ptrdiff_t difference_type;
|
35
|
+
typedef IT *pointer;
|
36
|
+
typedef IT &reference;
|
37
|
+
|
38
|
+
VectorIterator(Data data, uoffset_t i)
|
39
|
+
: data_(data + IndirectHelper<T>::element_stride * i) {}
|
40
|
+
VectorIterator(const VectorIterator &other) : data_(other.data_) {}
|
41
|
+
VectorIterator() : data_(nullptr) {}
|
42
|
+
|
43
|
+
VectorIterator &operator=(const VectorIterator &other) {
|
44
|
+
data_ = other.data_;
|
45
|
+
return *this;
|
46
|
+
}
|
47
|
+
|
48
|
+
VectorIterator &operator=(VectorIterator &&other) {
|
49
|
+
data_ = other.data_;
|
50
|
+
return *this;
|
51
|
+
}
|
52
|
+
|
53
|
+
bool operator==(const VectorIterator &other) const {
|
54
|
+
return data_ == other.data_;
|
55
|
+
}
|
56
|
+
|
57
|
+
bool operator<(const VectorIterator &other) const {
|
58
|
+
return data_ < other.data_;
|
59
|
+
}
|
60
|
+
|
61
|
+
bool operator!=(const VectorIterator &other) const {
|
62
|
+
return data_ != other.data_;
|
63
|
+
}
|
64
|
+
|
65
|
+
difference_type operator-(const VectorIterator &other) const {
|
66
|
+
return (data_ - other.data_) / IndirectHelper<T>::element_stride;
|
67
|
+
}
|
68
|
+
|
69
|
+
// Note: return type is incompatible with the standard
|
70
|
+
// `reference operator*()`.
|
71
|
+
IT operator*() const { return IndirectHelper<T>::Read(data_, 0); }
|
72
|
+
|
73
|
+
// Note: return type is incompatible with the standard
|
74
|
+
// `pointer operator->()`.
|
75
|
+
IT operator->() const { return IndirectHelper<T>::Read(data_, 0); }
|
76
|
+
|
77
|
+
VectorIterator &operator++() {
|
78
|
+
data_ += IndirectHelper<T>::element_stride;
|
79
|
+
return *this;
|
80
|
+
}
|
81
|
+
|
82
|
+
VectorIterator operator++(int) {
|
83
|
+
VectorIterator temp(data_, 0);
|
84
|
+
data_ += IndirectHelper<T>::element_stride;
|
85
|
+
return temp;
|
86
|
+
}
|
87
|
+
|
88
|
+
VectorIterator operator+(const uoffset_t &offset) const {
|
89
|
+
return VectorIterator(data_ + offset * IndirectHelper<T>::element_stride,
|
90
|
+
0);
|
91
|
+
}
|
92
|
+
|
93
|
+
VectorIterator &operator+=(const uoffset_t &offset) {
|
94
|
+
data_ += offset * IndirectHelper<T>::element_stride;
|
95
|
+
return *this;
|
96
|
+
}
|
97
|
+
|
98
|
+
VectorIterator &operator--() {
|
99
|
+
data_ -= IndirectHelper<T>::element_stride;
|
100
|
+
return *this;
|
101
|
+
}
|
102
|
+
|
103
|
+
VectorIterator operator--(int) {
|
104
|
+
VectorIterator temp(data_, 0);
|
105
|
+
data_ -= IndirectHelper<T>::element_stride;
|
106
|
+
return temp;
|
107
|
+
}
|
108
|
+
|
109
|
+
VectorIterator operator-(const uoffset_t &offset) const {
|
110
|
+
return VectorIterator(data_ - offset * IndirectHelper<T>::element_stride,
|
111
|
+
0);
|
112
|
+
}
|
113
|
+
|
114
|
+
VectorIterator &operator-=(const uoffset_t &offset) {
|
115
|
+
data_ -= offset * IndirectHelper<T>::element_stride;
|
116
|
+
return *this;
|
117
|
+
}
|
118
|
+
|
119
|
+
private:
|
120
|
+
Data data_;
|
121
|
+
};
|
122
|
+
|
123
|
+
template<typename T, typename IT>
|
124
|
+
using VectorConstIterator = VectorIterator<T, IT, const uint8_t *>;
|
125
|
+
|
126
|
+
template<typename Iterator>
|
127
|
+
struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
|
128
|
+
explicit VectorReverseIterator(Iterator iter)
|
129
|
+
: std::reverse_iterator<Iterator>(iter) {}
|
130
|
+
|
131
|
+
// Note: return type is incompatible with the standard
|
132
|
+
// `reference operator*()`.
|
133
|
+
typename Iterator::value_type operator*() const {
|
134
|
+
auto tmp = std::reverse_iterator<Iterator>::current;
|
135
|
+
return *--tmp;
|
136
|
+
}
|
137
|
+
|
138
|
+
// Note: return type is incompatible with the standard
|
139
|
+
// `pointer operator->()`.
|
140
|
+
typename Iterator::value_type operator->() const {
|
141
|
+
auto tmp = std::reverse_iterator<Iterator>::current;
|
142
|
+
return *--tmp;
|
143
|
+
}
|
144
|
+
};
|
145
|
+
|
146
|
+
// This is used as a helper type for accessing vectors.
|
147
|
+
// Vector::data() assumes the vector elements start after the length field.
|
148
|
+
template<typename T> class Vector {
|
149
|
+
public:
|
150
|
+
typedef VectorIterator<T, typename IndirectHelper<T>::mutable_return_type>
|
151
|
+
iterator;
|
152
|
+
typedef VectorConstIterator<T, typename IndirectHelper<T>::return_type>
|
153
|
+
const_iterator;
|
154
|
+
typedef VectorReverseIterator<iterator> reverse_iterator;
|
155
|
+
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
|
156
|
+
|
157
|
+
typedef typename flatbuffers::bool_constant<flatbuffers::is_scalar<T>::value>
|
158
|
+
scalar_tag;
|
159
|
+
|
160
|
+
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
|
161
|
+
scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1);
|
162
|
+
|
163
|
+
uoffset_t size() const { return EndianScalar(length_); }
|
164
|
+
|
165
|
+
// Deprecated: use size(). Here for backwards compatibility.
|
166
|
+
FLATBUFFERS_ATTRIBUTE([[deprecated("use size() instead")]])
|
167
|
+
uoffset_t Length() const { return size(); }
|
168
|
+
|
169
|
+
typedef typename IndirectHelper<T>::return_type return_type;
|
170
|
+
typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
|
171
|
+
typedef return_type value_type;
|
172
|
+
|
173
|
+
return_type Get(uoffset_t i) const {
|
174
|
+
FLATBUFFERS_ASSERT(i < size());
|
175
|
+
return IndirectHelper<T>::Read(Data(), i);
|
176
|
+
}
|
177
|
+
|
178
|
+
return_type operator[](uoffset_t i) const { return Get(i); }
|
179
|
+
|
180
|
+
// If this is a Vector of enums, T will be its storage type, not the enum
|
181
|
+
// type. This function makes it convenient to retrieve value with enum
|
182
|
+
// type E.
|
183
|
+
template<typename E> E GetEnum(uoffset_t i) const {
|
184
|
+
return static_cast<E>(Get(i));
|
185
|
+
}
|
186
|
+
|
187
|
+
// If this a vector of unions, this does the cast for you. There's no check
|
188
|
+
// to make sure this is the right type!
|
189
|
+
template<typename U> const U *GetAs(uoffset_t i) const {
|
190
|
+
return reinterpret_cast<const U *>(Get(i));
|
191
|
+
}
|
192
|
+
|
193
|
+
// If this a vector of unions, this does the cast for you. There's no check
|
194
|
+
// to make sure this is actually a string!
|
195
|
+
const String *GetAsString(uoffset_t i) const {
|
196
|
+
return reinterpret_cast<const String *>(Get(i));
|
197
|
+
}
|
198
|
+
|
199
|
+
const void *GetStructFromOffset(size_t o) const {
|
200
|
+
return reinterpret_cast<const void *>(Data() + o);
|
201
|
+
}
|
202
|
+
|
203
|
+
iterator begin() { return iterator(Data(), 0); }
|
204
|
+
const_iterator begin() const { return const_iterator(Data(), 0); }
|
205
|
+
|
206
|
+
iterator end() { return iterator(Data(), size()); }
|
207
|
+
const_iterator end() const { return const_iterator(Data(), size()); }
|
208
|
+
|
209
|
+
reverse_iterator rbegin() { return reverse_iterator(end()); }
|
210
|
+
const_reverse_iterator rbegin() const {
|
211
|
+
return const_reverse_iterator(end());
|
212
|
+
}
|
213
|
+
|
214
|
+
reverse_iterator rend() { return reverse_iterator(begin()); }
|
215
|
+
const_reverse_iterator rend() const {
|
216
|
+
return const_reverse_iterator(begin());
|
217
|
+
}
|
218
|
+
|
219
|
+
const_iterator cbegin() const { return begin(); }
|
220
|
+
|
221
|
+
const_iterator cend() const { return end(); }
|
222
|
+
|
223
|
+
const_reverse_iterator crbegin() const { return rbegin(); }
|
224
|
+
|
225
|
+
const_reverse_iterator crend() const { return rend(); }
|
226
|
+
|
227
|
+
// Change elements if you have a non-const pointer to this object.
|
228
|
+
// Scalars only. See reflection.h, and the documentation.
|
229
|
+
void Mutate(uoffset_t i, const T &val) {
|
230
|
+
FLATBUFFERS_ASSERT(i < size());
|
231
|
+
WriteScalar(data() + i, val);
|
232
|
+
}
|
233
|
+
|
234
|
+
// Change an element of a vector of tables (or strings).
|
235
|
+
// "val" points to the new table/string, as you can obtain from
|
236
|
+
// e.g. reflection::AddFlatBuffer().
|
237
|
+
void MutateOffset(uoffset_t i, const uint8_t *val) {
|
238
|
+
FLATBUFFERS_ASSERT(i < size());
|
239
|
+
static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
|
240
|
+
WriteScalar(data() + i,
|
241
|
+
static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
|
242
|
+
}
|
243
|
+
|
244
|
+
// Get a mutable pointer to tables/strings inside this vector.
|
245
|
+
mutable_return_type GetMutableObject(uoffset_t i) const {
|
246
|
+
FLATBUFFERS_ASSERT(i < size());
|
247
|
+
return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
|
248
|
+
}
|
249
|
+
|
250
|
+
// The raw data in little endian format. Use with care.
|
251
|
+
const uint8_t *Data() const {
|
252
|
+
return reinterpret_cast<const uint8_t *>(&length_ + 1);
|
253
|
+
}
|
254
|
+
|
255
|
+
uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
|
256
|
+
|
257
|
+
// Similarly, but typed, much like std::vector::data
|
258
|
+
const T *data() const { return reinterpret_cast<const T *>(Data()); }
|
259
|
+
T *data() { return reinterpret_cast<T *>(Data()); }
|
260
|
+
|
261
|
+
template<typename K> return_type LookupByKey(K key) const {
|
262
|
+
void *search_result = std::bsearch(
|
263
|
+
&key, Data(), size(), IndirectHelper<T>::element_stride, KeyCompare<K>);
|
264
|
+
|
265
|
+
if (!search_result) {
|
266
|
+
return nullptr; // Key not found.
|
267
|
+
}
|
268
|
+
|
269
|
+
const uint8_t *element = reinterpret_cast<const uint8_t *>(search_result);
|
270
|
+
|
271
|
+
return IndirectHelper<T>::Read(element, 0);
|
272
|
+
}
|
273
|
+
|
274
|
+
template<typename K> mutable_return_type MutableLookupByKey(K key) {
|
275
|
+
return const_cast<mutable_return_type>(LookupByKey(key));
|
276
|
+
}
|
277
|
+
|
278
|
+
protected:
|
279
|
+
// This class is only used to access pre-existing data. Don't ever
|
280
|
+
// try to construct these manually.
|
281
|
+
Vector();
|
282
|
+
|
283
|
+
uoffset_t length_;
|
284
|
+
|
285
|
+
private:
|
286
|
+
// This class is a pointer. Copying will therefore create an invalid object.
|
287
|
+
// Private and unimplemented copy constructor.
|
288
|
+
Vector(const Vector &);
|
289
|
+
Vector &operator=(const Vector &);
|
290
|
+
|
291
|
+
template<typename K> static int KeyCompare(const void *ap, const void *bp) {
|
292
|
+
const K *key = reinterpret_cast<const K *>(ap);
|
293
|
+
const uint8_t *data = reinterpret_cast<const uint8_t *>(bp);
|
294
|
+
auto table = IndirectHelper<T>::Read(data, 0);
|
295
|
+
|
296
|
+
// std::bsearch compares with the operands transposed, so we negate the
|
297
|
+
// result here.
|
298
|
+
return -table->KeyCompareWithValue(*key);
|
299
|
+
}
|
300
|
+
};
|
301
|
+
|
302
|
+
template<class U>
|
303
|
+
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> &vec)
|
304
|
+
FLATBUFFERS_NOEXCEPT {
|
305
|
+
static_assert(Vector<U>::is_span_observable,
|
306
|
+
"wrong type U, only LE-scalar, or byte types are allowed");
|
307
|
+
return span<U>(vec.data(), vec.size());
|
308
|
+
}
|
309
|
+
|
310
|
+
template<class U>
|
311
|
+
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U> make_span(
|
312
|
+
const Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
|
313
|
+
static_assert(Vector<U>::is_span_observable,
|
314
|
+
"wrong type U, only LE-scalar, or byte types are allowed");
|
315
|
+
return span<const U>(vec.data(), vec.size());
|
316
|
+
}
|
317
|
+
|
318
|
+
template<class U>
|
319
|
+
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<uint8_t> make_bytes_span(
|
320
|
+
Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
|
321
|
+
static_assert(Vector<U>::scalar_tag::value,
|
322
|
+
"wrong type U, only LE-scalar, or byte types are allowed");
|
323
|
+
return span<uint8_t>(vec.Data(), vec.size() * sizeof(U));
|
324
|
+
}
|
325
|
+
|
326
|
+
template<class U>
|
327
|
+
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const uint8_t> make_bytes_span(
|
328
|
+
const Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
|
329
|
+
static_assert(Vector<U>::scalar_tag::value,
|
330
|
+
"wrong type U, only LE-scalar, or byte types are allowed");
|
331
|
+
return span<const uint8_t>(vec.Data(), vec.size() * sizeof(U));
|
332
|
+
}
|
333
|
+
|
334
|
+
// Convenient helper functions to get a span of any vector, regardless
|
335
|
+
// of whether it is null or not (the field is not set).
|
336
|
+
template<class U>
|
337
|
+
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> *ptr)
|
338
|
+
FLATBUFFERS_NOEXCEPT {
|
339
|
+
static_assert(Vector<U>::is_span_observable,
|
340
|
+
"wrong type U, only LE-scalar, or byte types are allowed");
|
341
|
+
return ptr ? make_span(*ptr) : span<U>();
|
342
|
+
}
|
343
|
+
|
344
|
+
template<class U>
|
345
|
+
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U> make_span(
|
346
|
+
const Vector<U> *ptr) FLATBUFFERS_NOEXCEPT {
|
347
|
+
static_assert(Vector<U>::is_span_observable,
|
348
|
+
"wrong type U, only LE-scalar, or byte types are allowed");
|
349
|
+
return ptr ? make_span(*ptr) : span<const U>();
|
350
|
+
}
|
351
|
+
|
352
|
+
// Represent a vector much like the template above, but in this case we
|
353
|
+
// don't know what the element types are (used with reflection.h).
|
354
|
+
class VectorOfAny {
|
355
|
+
public:
|
356
|
+
uoffset_t size() const { return EndianScalar(length_); }
|
357
|
+
|
358
|
+
const uint8_t *Data() const {
|
359
|
+
return reinterpret_cast<const uint8_t *>(&length_ + 1);
|
360
|
+
}
|
361
|
+
uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
|
362
|
+
|
363
|
+
protected:
|
364
|
+
VectorOfAny();
|
365
|
+
|
366
|
+
uoffset_t length_;
|
367
|
+
|
368
|
+
private:
|
369
|
+
VectorOfAny(const VectorOfAny &);
|
370
|
+
VectorOfAny &operator=(const VectorOfAny &);
|
371
|
+
};
|
372
|
+
|
373
|
+
template<typename T, typename U>
|
374
|
+
Vector<Offset<T>> *VectorCast(Vector<Offset<U>> *ptr) {
|
375
|
+
static_assert(std::is_base_of<T, U>::value, "Unrelated types");
|
376
|
+
return reinterpret_cast<Vector<Offset<T>> *>(ptr);
|
377
|
+
}
|
378
|
+
|
379
|
+
template<typename T, typename U>
|
380
|
+
const Vector<Offset<T>> *VectorCast(const Vector<Offset<U>> *ptr) {
|
381
|
+
static_assert(std::is_base_of<T, U>::value, "Unrelated types");
|
382
|
+
return reinterpret_cast<const Vector<Offset<T>> *>(ptr);
|
383
|
+
}
|
384
|
+
|
385
|
+
// Convenient helper function to get the length of any vector, regardless
|
386
|
+
// of whether it is null or not (the field is not set).
|
387
|
+
template<typename T> static inline size_t VectorLength(const Vector<T> *v) {
|
388
|
+
return v ? v->size() : 0;
|
389
|
+
}
|
390
|
+
|
391
|
+
} // namespace flatbuffers
|
392
|
+
|
393
|
+
#endif // FLATBUFFERS_VERIFIER_H_
|
@@ -0,0 +1,273 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright 2021 Google Inc. All rights reserved.
|
3
|
+
*
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
* you may not use this file except in compliance with the License.
|
6
|
+
* You may obtain a copy of the License at
|
7
|
+
*
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
*
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
* See the License for the specific language governing permissions and
|
14
|
+
* limitations under the License.
|
15
|
+
*/
|
16
|
+
|
17
|
+
#ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
|
18
|
+
#define FLATBUFFERS_VECTOR_DOWNWARD_H_
|
19
|
+
|
20
|
+
#include <algorithm>
|
21
|
+
|
22
|
+
#include "flatbuffers/base.h"
|
23
|
+
#include "flatbuffers/default_allocator.h"
|
24
|
+
#include "flatbuffers/detached_buffer.h"
|
25
|
+
|
26
|
+
namespace flatbuffers {
|
27
|
+
|
28
|
+
// This is a minimal replication of std::vector<uint8_t> functionality,
|
29
|
+
// except growing from higher to lower addresses. i.e. push_back() inserts data
|
30
|
+
// in the lowest address in the vector.
|
31
|
+
// Since this vector leaves the lower part unused, we support a "scratch-pad"
|
32
|
+
// that can be stored there for temporary data, to share the allocated space.
|
33
|
+
// Essentially, this supports 2 std::vectors in a single buffer.
|
34
|
+
class vector_downward {
|
35
|
+
public:
|
36
|
+
explicit vector_downward(size_t initial_size, Allocator *allocator,
|
37
|
+
bool own_allocator, size_t buffer_minalign)
|
38
|
+
: allocator_(allocator),
|
39
|
+
own_allocator_(own_allocator),
|
40
|
+
initial_size_(initial_size),
|
41
|
+
buffer_minalign_(buffer_minalign),
|
42
|
+
reserved_(0),
|
43
|
+
size_(0),
|
44
|
+
buf_(nullptr),
|
45
|
+
cur_(nullptr),
|
46
|
+
scratch_(nullptr) {}
|
47
|
+
|
48
|
+
vector_downward(vector_downward &&other) noexcept
|
49
|
+
// clang-format on
|
50
|
+
: allocator_(other.allocator_),
|
51
|
+
own_allocator_(other.own_allocator_),
|
52
|
+
initial_size_(other.initial_size_),
|
53
|
+
buffer_minalign_(other.buffer_minalign_),
|
54
|
+
reserved_(other.reserved_),
|
55
|
+
size_(other.size_),
|
56
|
+
buf_(other.buf_),
|
57
|
+
cur_(other.cur_),
|
58
|
+
scratch_(other.scratch_) {
|
59
|
+
// No change in other.allocator_
|
60
|
+
// No change in other.initial_size_
|
61
|
+
// No change in other.buffer_minalign_
|
62
|
+
other.own_allocator_ = false;
|
63
|
+
other.reserved_ = 0;
|
64
|
+
other.buf_ = nullptr;
|
65
|
+
other.cur_ = nullptr;
|
66
|
+
other.scratch_ = nullptr;
|
67
|
+
}
|
68
|
+
|
69
|
+
vector_downward &operator=(vector_downward &&other) noexcept {
|
70
|
+
// Move construct a temporary and swap idiom
|
71
|
+
vector_downward temp(std::move(other));
|
72
|
+
swap(temp);
|
73
|
+
return *this;
|
74
|
+
}
|
75
|
+
|
76
|
+
~vector_downward() {
|
77
|
+
clear_buffer();
|
78
|
+
clear_allocator();
|
79
|
+
}
|
80
|
+
|
81
|
+
void reset() {
|
82
|
+
clear_buffer();
|
83
|
+
clear();
|
84
|
+
}
|
85
|
+
|
86
|
+
void clear() {
|
87
|
+
if (buf_) {
|
88
|
+
cur_ = buf_ + reserved_;
|
89
|
+
} else {
|
90
|
+
reserved_ = 0;
|
91
|
+
cur_ = nullptr;
|
92
|
+
}
|
93
|
+
size_ = 0;
|
94
|
+
clear_scratch();
|
95
|
+
}
|
96
|
+
|
97
|
+
void clear_scratch() { scratch_ = buf_; }
|
98
|
+
|
99
|
+
void clear_allocator() {
|
100
|
+
if (own_allocator_ && allocator_) { delete allocator_; }
|
101
|
+
allocator_ = nullptr;
|
102
|
+
own_allocator_ = false;
|
103
|
+
}
|
104
|
+
|
105
|
+
void clear_buffer() {
|
106
|
+
if (buf_) Deallocate(allocator_, buf_, reserved_);
|
107
|
+
buf_ = nullptr;
|
108
|
+
}
|
109
|
+
|
110
|
+
// Relinquish the pointer to the caller.
|
111
|
+
uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
|
112
|
+
auto *buf = buf_;
|
113
|
+
allocated_bytes = reserved_;
|
114
|
+
offset = static_cast<size_t>(cur_ - buf_);
|
115
|
+
|
116
|
+
// release_raw only relinquishes the buffer ownership.
|
117
|
+
// Does not deallocate or reset the allocator. Destructor will do that.
|
118
|
+
buf_ = nullptr;
|
119
|
+
clear();
|
120
|
+
return buf;
|
121
|
+
}
|
122
|
+
|
123
|
+
// Relinquish the pointer to the caller.
|
124
|
+
DetachedBuffer release() {
|
125
|
+
// allocator ownership (if any) is transferred to DetachedBuffer.
|
126
|
+
DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_,
|
127
|
+
size());
|
128
|
+
if (own_allocator_) {
|
129
|
+
allocator_ = nullptr;
|
130
|
+
own_allocator_ = false;
|
131
|
+
}
|
132
|
+
buf_ = nullptr;
|
133
|
+
clear();
|
134
|
+
return fb;
|
135
|
+
}
|
136
|
+
|
137
|
+
size_t ensure_space(size_t len) {
|
138
|
+
FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
|
139
|
+
if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
|
140
|
+
// Beyond this, signed offsets may not have enough range:
|
141
|
+
// (FlatBuffers > 2GB not supported).
|
142
|
+
FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
|
143
|
+
return len;
|
144
|
+
}
|
145
|
+
|
146
|
+
inline uint8_t *make_space(size_t len) {
|
147
|
+
if (len) {
|
148
|
+
ensure_space(len);
|
149
|
+
cur_ -= len;
|
150
|
+
size_ += static_cast<uoffset_t>(len);
|
151
|
+
}
|
152
|
+
return cur_;
|
153
|
+
}
|
154
|
+
|
155
|
+
// Returns nullptr if using the DefaultAllocator.
|
156
|
+
Allocator *get_custom_allocator() { return allocator_; }
|
157
|
+
|
158
|
+
inline uoffset_t size() const { return size_; }
|
159
|
+
|
160
|
+
uoffset_t scratch_size() const {
|
161
|
+
return static_cast<uoffset_t>(scratch_ - buf_);
|
162
|
+
}
|
163
|
+
|
164
|
+
size_t capacity() const { return reserved_; }
|
165
|
+
|
166
|
+
uint8_t *data() const {
|
167
|
+
FLATBUFFERS_ASSERT(cur_);
|
168
|
+
return cur_;
|
169
|
+
}
|
170
|
+
|
171
|
+
uint8_t *scratch_data() const {
|
172
|
+
FLATBUFFERS_ASSERT(buf_);
|
173
|
+
return buf_;
|
174
|
+
}
|
175
|
+
|
176
|
+
uint8_t *scratch_end() const {
|
177
|
+
FLATBUFFERS_ASSERT(scratch_);
|
178
|
+
return scratch_;
|
179
|
+
}
|
180
|
+
|
181
|
+
uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; }
|
182
|
+
|
183
|
+
void push(const uint8_t *bytes, size_t num) {
|
184
|
+
if (num > 0) { memcpy(make_space(num), bytes, num); }
|
185
|
+
}
|
186
|
+
|
187
|
+
// Specialized version of push() that avoids memcpy call for small data.
|
188
|
+
template<typename T> void push_small(const T &little_endian_t) {
|
189
|
+
make_space(sizeof(T));
|
190
|
+
*reinterpret_cast<T *>(cur_) = little_endian_t;
|
191
|
+
}
|
192
|
+
|
193
|
+
template<typename T> void scratch_push_small(const T &t) {
|
194
|
+
ensure_space(sizeof(T));
|
195
|
+
*reinterpret_cast<T *>(scratch_) = t;
|
196
|
+
scratch_ += sizeof(T);
|
197
|
+
}
|
198
|
+
|
199
|
+
// fill() is most frequently called with small byte counts (<= 4),
|
200
|
+
// which is why we're using loops rather than calling memset.
|
201
|
+
void fill(size_t zero_pad_bytes) {
|
202
|
+
make_space(zero_pad_bytes);
|
203
|
+
for (size_t i = 0; i < zero_pad_bytes; i++) cur_[i] = 0;
|
204
|
+
}
|
205
|
+
|
206
|
+
// Version for when we know the size is larger.
|
207
|
+
// Precondition: zero_pad_bytes > 0
|
208
|
+
void fill_big(size_t zero_pad_bytes) {
|
209
|
+
memset(make_space(zero_pad_bytes), 0, zero_pad_bytes);
|
210
|
+
}
|
211
|
+
|
212
|
+
void pop(size_t bytes_to_remove) {
|
213
|
+
cur_ += bytes_to_remove;
|
214
|
+
size_ -= static_cast<uoffset_t>(bytes_to_remove);
|
215
|
+
}
|
216
|
+
|
217
|
+
void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
|
218
|
+
|
219
|
+
void swap(vector_downward &other) {
|
220
|
+
using std::swap;
|
221
|
+
swap(allocator_, other.allocator_);
|
222
|
+
swap(own_allocator_, other.own_allocator_);
|
223
|
+
swap(initial_size_, other.initial_size_);
|
224
|
+
swap(buffer_minalign_, other.buffer_minalign_);
|
225
|
+
swap(reserved_, other.reserved_);
|
226
|
+
swap(size_, other.size_);
|
227
|
+
swap(buf_, other.buf_);
|
228
|
+
swap(cur_, other.cur_);
|
229
|
+
swap(scratch_, other.scratch_);
|
230
|
+
}
|
231
|
+
|
232
|
+
void swap_allocator(vector_downward &other) {
|
233
|
+
using std::swap;
|
234
|
+
swap(allocator_, other.allocator_);
|
235
|
+
swap(own_allocator_, other.own_allocator_);
|
236
|
+
}
|
237
|
+
|
238
|
+
private:
|
239
|
+
// You shouldn't really be copying instances of this class.
|
240
|
+
FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &));
|
241
|
+
FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &));
|
242
|
+
|
243
|
+
Allocator *allocator_;
|
244
|
+
bool own_allocator_;
|
245
|
+
size_t initial_size_;
|
246
|
+
size_t buffer_minalign_;
|
247
|
+
size_t reserved_;
|
248
|
+
uoffset_t size_;
|
249
|
+
uint8_t *buf_;
|
250
|
+
uint8_t *cur_; // Points at location between empty (below) and used (above).
|
251
|
+
uint8_t *scratch_; // Points to the end of the scratchpad in use.
|
252
|
+
|
253
|
+
void reallocate(size_t len) {
|
254
|
+
auto old_reserved = reserved_;
|
255
|
+
auto old_size = size();
|
256
|
+
auto old_scratch_size = scratch_size();
|
257
|
+
reserved_ +=
|
258
|
+
(std::max)(len, old_reserved ? old_reserved / 2 : initial_size_);
|
259
|
+
reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1);
|
260
|
+
if (buf_) {
|
261
|
+
buf_ = ReallocateDownward(allocator_, buf_, old_reserved, reserved_,
|
262
|
+
old_size, old_scratch_size);
|
263
|
+
} else {
|
264
|
+
buf_ = Allocate(allocator_, reserved_);
|
265
|
+
}
|
266
|
+
cur_ = buf_ + reserved_ - old_size;
|
267
|
+
scratch_ = buf_ + old_scratch_size;
|
268
|
+
}
|
269
|
+
};
|
270
|
+
|
271
|
+
} // namespace flatbuffers
|
272
|
+
|
273
|
+
#endif // FLATBUFFERS_VECTOR_DOWNWARD_H_
|