xmos-ai-tools 1.3.2.dev180__py3-none-macosx_10_15_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/__init__.py +7 -0
- xmos_ai_tools/io_server/__init__.py +151 -0
- xmos_ai_tools/runtime/__init__.py +0 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
- xmos_ai_tools/runtime/include/flash_server.h +73 -0
- xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
- xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
- xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
- xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
- xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
- xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
- xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
- xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
- xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
- xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
- xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
- xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
- xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
- xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
- xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
- xmos_ai_tools/runtime/include/ioserver.h +44 -0
- xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
- xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +307 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +153 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
- xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +53 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/load_weights.h +64 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +79 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
- xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
- xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
- xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
- xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
- xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
- xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
- xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +68 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
- xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xformer/__init__.py +64 -0
- xmos_ai_tools/xformer/flash.py +190 -0
- xmos_ai_tools/xinterpreters/__init__.py +1 -0
- xmos_ai_tools/xinterpreters/exceptions.py +38 -0
- xmos_ai_tools/xinterpreters/host_interpreter.py +651 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- xmos_ai_tools-1.3.2.dev180.data/data/bin/xcore-opt +0 -0
- xmos_ai_tools-1.3.2.dev180.dist-info/METADATA +33 -0
- xmos_ai_tools-1.3.2.dev180.dist-info/RECORD +395 -0
- xmos_ai_tools-1.3.2.dev180.dist-info/WHEEL +5 -0
- xmos_ai_tools-1.3.2.dev180.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1897 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright 2017 Google Inc. All rights reserved.
|
3
|
+
*
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
* you may not use this file except in compliance with the License.
|
6
|
+
* You may obtain a copy of the License at
|
7
|
+
*
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
*
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
* See the License for the specific language governing permissions and
|
14
|
+
* limitations under the License.
|
15
|
+
*/
|
16
|
+
|
17
|
+
#ifndef FLATBUFFERS_FLEXBUFFERS_H_
|
18
|
+
#define FLATBUFFERS_FLEXBUFFERS_H_
|
19
|
+
|
20
|
+
#include <map>
|
21
|
+
// Used to select STL variant.
|
22
|
+
#include "flatbuffers/base.h"
|
23
|
+
// We use the basic binary writing functions from the regular FlatBuffers.
|
24
|
+
#include "flatbuffers/util.h"
|
25
|
+
|
26
|
+
#ifdef _MSC_VER
|
27
|
+
# include <intrin.h>
|
28
|
+
#endif
|
29
|
+
|
30
|
+
#if defined(_MSC_VER)
|
31
|
+
# pragma warning(push)
|
32
|
+
# pragma warning(disable : 4127) // C4127: conditional expression is constant
|
33
|
+
#endif
|
34
|
+
|
35
|
+
namespace flexbuffers {
|
36
|
+
|
37
|
+
class Reference;
|
38
|
+
class Map;
|
39
|
+
|
40
|
+
// These are used in the lower 2 bits of a type field to determine the size of
|
41
|
+
// the elements (and or size field) of the item pointed to (e.g. vector).
|
42
|
+
enum BitWidth {
|
43
|
+
BIT_WIDTH_8 = 0,
|
44
|
+
BIT_WIDTH_16 = 1,
|
45
|
+
BIT_WIDTH_32 = 2,
|
46
|
+
BIT_WIDTH_64 = 3,
|
47
|
+
};
|
48
|
+
|
49
|
+
// These are used as the upper 6 bits of a type field to indicate the actual
|
50
|
+
// type.
|
51
|
+
enum Type {
|
52
|
+
FBT_NULL = 0,
|
53
|
+
FBT_INT = 1,
|
54
|
+
FBT_UINT = 2,
|
55
|
+
FBT_FLOAT = 3,
|
56
|
+
// Types above stored inline, types below (except FBT_BOOL) store an offset.
|
57
|
+
FBT_KEY = 4,
|
58
|
+
FBT_STRING = 5,
|
59
|
+
FBT_INDIRECT_INT = 6,
|
60
|
+
FBT_INDIRECT_UINT = 7,
|
61
|
+
FBT_INDIRECT_FLOAT = 8,
|
62
|
+
FBT_MAP = 9,
|
63
|
+
FBT_VECTOR = 10, // Untyped.
|
64
|
+
FBT_VECTOR_INT = 11, // Typed any size (stores no type table).
|
65
|
+
FBT_VECTOR_UINT = 12,
|
66
|
+
FBT_VECTOR_FLOAT = 13,
|
67
|
+
FBT_VECTOR_KEY = 14,
|
68
|
+
// DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead.
|
69
|
+
// Read test.cpp/FlexBuffersDeprecatedTest() for details on why.
|
70
|
+
FBT_VECTOR_STRING_DEPRECATED = 15,
|
71
|
+
FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field).
|
72
|
+
FBT_VECTOR_UINT2 = 17,
|
73
|
+
FBT_VECTOR_FLOAT2 = 18,
|
74
|
+
FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field).
|
75
|
+
FBT_VECTOR_UINT3 = 20,
|
76
|
+
FBT_VECTOR_FLOAT3 = 21,
|
77
|
+
FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field).
|
78
|
+
FBT_VECTOR_UINT4 = 23,
|
79
|
+
FBT_VECTOR_FLOAT4 = 24,
|
80
|
+
FBT_BLOB = 25,
|
81
|
+
FBT_BOOL = 26,
|
82
|
+
FBT_VECTOR_BOOL =
|
83
|
+
36, // To Allow the same type of conversion of type to vector type
|
84
|
+
|
85
|
+
FBT_MAX_TYPE = 37
|
86
|
+
};
|
87
|
+
|
88
|
+
inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; }
|
89
|
+
|
90
|
+
inline bool IsTypedVectorElementType(Type t) {
|
91
|
+
return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL;
|
92
|
+
}
|
93
|
+
|
94
|
+
inline bool IsTypedVector(Type t) {
|
95
|
+
return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) ||
|
96
|
+
t == FBT_VECTOR_BOOL;
|
97
|
+
}
|
98
|
+
|
99
|
+
inline bool IsFixedTypedVector(Type t) {
|
100
|
+
return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4;
|
101
|
+
}
|
102
|
+
|
103
|
+
inline Type ToTypedVector(Type t, size_t fixed_len = 0) {
|
104
|
+
FLATBUFFERS_ASSERT(IsTypedVectorElementType(t));
|
105
|
+
switch (fixed_len) {
|
106
|
+
case 0: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT);
|
107
|
+
case 2: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2);
|
108
|
+
case 3: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3);
|
109
|
+
case 4: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4);
|
110
|
+
default: FLATBUFFERS_ASSERT(0); return FBT_NULL;
|
111
|
+
}
|
112
|
+
}
|
113
|
+
|
114
|
+
inline Type ToTypedVectorElementType(Type t) {
|
115
|
+
FLATBUFFERS_ASSERT(IsTypedVector(t));
|
116
|
+
return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT);
|
117
|
+
}
|
118
|
+
|
119
|
+
inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) {
|
120
|
+
FLATBUFFERS_ASSERT(IsFixedTypedVector(t));
|
121
|
+
auto fixed_type = t - FBT_VECTOR_INT2;
|
122
|
+
*len = static_cast<uint8_t>(fixed_type / 3 +
|
123
|
+
2); // 3 types each, starting from length 2.
|
124
|
+
return static_cast<Type>(fixed_type % 3 + FBT_INT);
|
125
|
+
}
|
126
|
+
|
127
|
+
// TODO: implement proper support for 8/16bit floats, or decide not to
|
128
|
+
// support them.
|
129
|
+
typedef int16_t half;
|
130
|
+
typedef int8_t quarter;
|
131
|
+
|
132
|
+
// TODO: can we do this without conditionals using intrinsics or inline asm
|
133
|
+
// on some platforms? Given branch prediction the method below should be
|
134
|
+
// decently quick, but it is the most frequently executed function.
|
135
|
+
// We could do an (unaligned) 64-bit read if we ifdef out the platforms for
|
136
|
+
// which that doesn't work (or where we'd read into un-owned memory).
|
137
|
+
template<typename R, typename T1, typename T2, typename T4, typename T8>
|
138
|
+
R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) {
|
139
|
+
return byte_width < 4
|
140
|
+
? (byte_width < 2
|
141
|
+
? static_cast<R>(flatbuffers::ReadScalar<T1>(data))
|
142
|
+
: static_cast<R>(flatbuffers::ReadScalar<T2>(data)))
|
143
|
+
: (byte_width < 8
|
144
|
+
? static_cast<R>(flatbuffers::ReadScalar<T4>(data))
|
145
|
+
: static_cast<R>(flatbuffers::ReadScalar<T8>(data)));
|
146
|
+
}
|
147
|
+
|
148
|
+
inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) {
|
149
|
+
return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(
|
150
|
+
data, byte_width);
|
151
|
+
}
|
152
|
+
|
153
|
+
inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) {
|
154
|
+
// This is the "hottest" function (all offset lookups use this), so worth
|
155
|
+
// optimizing if possible.
|
156
|
+
// TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a
|
157
|
+
// constant, which here it isn't. Test if memcpy is still faster than
|
158
|
+
// the conditionals in ReadSizedScalar. Can also use inline asm.
|
159
|
+
// clang-format off
|
160
|
+
#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
|
161
|
+
// This is 64-bit Windows only, __movsb does not work on 32-bit Windows.
|
162
|
+
uint64_t u = 0;
|
163
|
+
__movsb(reinterpret_cast<uint8_t *>(&u),
|
164
|
+
reinterpret_cast<const uint8_t *>(data), byte_width);
|
165
|
+
return flatbuffers::EndianScalar(u);
|
166
|
+
#else
|
167
|
+
return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>(
|
168
|
+
data, byte_width);
|
169
|
+
#endif
|
170
|
+
// clang-format on
|
171
|
+
}
|
172
|
+
|
173
|
+
inline double ReadDouble(const uint8_t *data, uint8_t byte_width) {
|
174
|
+
return ReadSizedScalar<double, quarter, half, float, double>(data,
|
175
|
+
byte_width);
|
176
|
+
}
|
177
|
+
|
178
|
+
inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) {
|
179
|
+
return offset - ReadUInt64(offset, byte_width);
|
180
|
+
}
|
181
|
+
|
182
|
+
template<typename T> const uint8_t *Indirect(const uint8_t *offset) {
|
183
|
+
return offset - flatbuffers::ReadScalar<T>(offset);
|
184
|
+
}
|
185
|
+
|
186
|
+
inline BitWidth WidthU(uint64_t u) {
|
187
|
+
#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \
|
188
|
+
{ \
|
189
|
+
if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \
|
190
|
+
}
|
191
|
+
FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8);
|
192
|
+
FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16);
|
193
|
+
FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32);
|
194
|
+
#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH
|
195
|
+
return BIT_WIDTH_64;
|
196
|
+
}
|
197
|
+
|
198
|
+
inline BitWidth WidthI(int64_t i) {
|
199
|
+
auto u = static_cast<uint64_t>(i) << 1;
|
200
|
+
return WidthU(i >= 0 ? u : ~u);
|
201
|
+
}
|
202
|
+
|
203
|
+
inline BitWidth WidthF(double f) {
|
204
|
+
return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32
|
205
|
+
: BIT_WIDTH_64;
|
206
|
+
}
|
207
|
+
|
208
|
+
// Base class of all types below.
|
209
|
+
// Points into the data buffer and allows access to one type.
|
210
|
+
class Object {
|
211
|
+
public:
|
212
|
+
Object(const uint8_t *data, uint8_t byte_width)
|
213
|
+
: data_(data), byte_width_(byte_width) {}
|
214
|
+
|
215
|
+
protected:
|
216
|
+
const uint8_t *data_;
|
217
|
+
uint8_t byte_width_;
|
218
|
+
};
|
219
|
+
|
220
|
+
// Object that has a size, obtained either from size prefix, or elsewhere.
|
221
|
+
class Sized : public Object {
|
222
|
+
public:
|
223
|
+
// Size prefix.
|
224
|
+
Sized(const uint8_t *data, uint8_t byte_width)
|
225
|
+
: Object(data, byte_width), size_(read_size()) {}
|
226
|
+
// Manual size.
|
227
|
+
Sized(const uint8_t *data, uint8_t byte_width, size_t sz)
|
228
|
+
: Object(data, byte_width), size_(sz) {}
|
229
|
+
size_t size() const { return size_; }
|
230
|
+
// Access size stored in `byte_width_` bytes before data_ pointer.
|
231
|
+
size_t read_size() const {
|
232
|
+
return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_));
|
233
|
+
}
|
234
|
+
|
235
|
+
protected:
|
236
|
+
size_t size_;
|
237
|
+
};
|
238
|
+
|
239
|
+
class String : public Sized {
|
240
|
+
public:
|
241
|
+
// Size prefix.
|
242
|
+
String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
|
243
|
+
// Manual size.
|
244
|
+
String(const uint8_t *data, uint8_t byte_width, size_t sz)
|
245
|
+
: Sized(data, byte_width, sz) {}
|
246
|
+
|
247
|
+
size_t length() const { return size(); }
|
248
|
+
const char *c_str() const { return reinterpret_cast<const char *>(data_); }
|
249
|
+
std::string str() const { return std::string(c_str(), size()); }
|
250
|
+
|
251
|
+
static String EmptyString() {
|
252
|
+
static const char *empty_string = "";
|
253
|
+
return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0);
|
254
|
+
}
|
255
|
+
bool IsTheEmptyString() const { return data_ == EmptyString().data_; }
|
256
|
+
};
|
257
|
+
|
258
|
+
class Blob : public Sized {
|
259
|
+
public:
|
260
|
+
Blob(const uint8_t *data_buf, uint8_t byte_width)
|
261
|
+
: Sized(data_buf, byte_width) {}
|
262
|
+
|
263
|
+
static Blob EmptyBlob() {
|
264
|
+
static const uint8_t empty_blob[] = { 0 /*len*/ };
|
265
|
+
return Blob(empty_blob + 1, 1);
|
266
|
+
}
|
267
|
+
bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; }
|
268
|
+
const uint8_t *data() const { return data_; }
|
269
|
+
};
|
270
|
+
|
271
|
+
class Vector : public Sized {
|
272
|
+
public:
|
273
|
+
Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
|
274
|
+
|
275
|
+
Reference operator[](size_t i) const;
|
276
|
+
|
277
|
+
static Vector EmptyVector() {
|
278
|
+
static const uint8_t empty_vector[] = { 0 /*len*/ };
|
279
|
+
return Vector(empty_vector + 1, 1);
|
280
|
+
}
|
281
|
+
bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; }
|
282
|
+
};
|
283
|
+
|
284
|
+
class TypedVector : public Sized {
|
285
|
+
public:
|
286
|
+
TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
|
287
|
+
: Sized(data, byte_width), type_(element_type) {}
|
288
|
+
|
289
|
+
Reference operator[](size_t i) const;
|
290
|
+
|
291
|
+
static TypedVector EmptyTypedVector() {
|
292
|
+
static const uint8_t empty_typed_vector[] = { 0 /*len*/ };
|
293
|
+
return TypedVector(empty_typed_vector + 1, 1, FBT_INT);
|
294
|
+
}
|
295
|
+
bool IsTheEmptyVector() const {
|
296
|
+
return data_ == TypedVector::EmptyTypedVector().data_;
|
297
|
+
}
|
298
|
+
|
299
|
+
Type ElementType() { return type_; }
|
300
|
+
|
301
|
+
friend Reference;
|
302
|
+
|
303
|
+
private:
|
304
|
+
Type type_;
|
305
|
+
|
306
|
+
friend Map;
|
307
|
+
};
|
308
|
+
|
309
|
+
class FixedTypedVector : public Object {
|
310
|
+
public:
|
311
|
+
FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type,
|
312
|
+
uint8_t len)
|
313
|
+
: Object(data, byte_width), type_(element_type), len_(len) {}
|
314
|
+
|
315
|
+
Reference operator[](size_t i) const;
|
316
|
+
|
317
|
+
static FixedTypedVector EmptyFixedTypedVector() {
|
318
|
+
static const uint8_t fixed_empty_vector[] = { 0 /* unused */ };
|
319
|
+
return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0);
|
320
|
+
}
|
321
|
+
bool IsTheEmptyFixedTypedVector() const {
|
322
|
+
return data_ == FixedTypedVector::EmptyFixedTypedVector().data_;
|
323
|
+
}
|
324
|
+
|
325
|
+
Type ElementType() const { return type_; }
|
326
|
+
uint8_t size() const { return len_; }
|
327
|
+
|
328
|
+
private:
|
329
|
+
Type type_;
|
330
|
+
uint8_t len_;
|
331
|
+
};
|
332
|
+
|
333
|
+
class Map : public Vector {
|
334
|
+
public:
|
335
|
+
Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {}
|
336
|
+
|
337
|
+
Reference operator[](const char *key) const;
|
338
|
+
Reference operator[](const std::string &key) const;
|
339
|
+
|
340
|
+
Vector Values() const { return Vector(data_, byte_width_); }
|
341
|
+
|
342
|
+
TypedVector Keys() const {
|
343
|
+
const size_t num_prefixed_fields = 3;
|
344
|
+
auto keys_offset = data_ - byte_width_ * num_prefixed_fields;
|
345
|
+
return TypedVector(Indirect(keys_offset, byte_width_),
|
346
|
+
static_cast<uint8_t>(
|
347
|
+
ReadUInt64(keys_offset + byte_width_, byte_width_)),
|
348
|
+
FBT_KEY);
|
349
|
+
}
|
350
|
+
|
351
|
+
static Map EmptyMap() {
|
352
|
+
static const uint8_t empty_map[] = {
|
353
|
+
0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/
|
354
|
+
};
|
355
|
+
return Map(empty_map + 4, 1);
|
356
|
+
}
|
357
|
+
|
358
|
+
bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; }
|
359
|
+
};
|
360
|
+
|
361
|
+
template<typename T>
|
362
|
+
void AppendToString(std::string &s, T &&v, bool keys_quoted) {
|
363
|
+
s += "[ ";
|
364
|
+
for (size_t i = 0; i < v.size(); i++) {
|
365
|
+
if (i) s += ", ";
|
366
|
+
v[i].ToString(true, keys_quoted, s);
|
367
|
+
}
|
368
|
+
s += " ]";
|
369
|
+
}
|
370
|
+
|
371
|
+
class Reference {
|
372
|
+
public:
|
373
|
+
Reference()
|
374
|
+
: data_(nullptr), parent_width_(0), byte_width_(0), type_(FBT_NULL) {}
|
375
|
+
|
376
|
+
Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width,
|
377
|
+
Type type)
|
378
|
+
: data_(data),
|
379
|
+
parent_width_(parent_width),
|
380
|
+
byte_width_(byte_width),
|
381
|
+
type_(type) {}
|
382
|
+
|
383
|
+
Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
|
384
|
+
: data_(data), parent_width_(parent_width) {
|
385
|
+
byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3);
|
386
|
+
type_ = static_cast<Type>(packed_type >> 2);
|
387
|
+
}
|
388
|
+
|
389
|
+
Type GetType() const { return type_; }
|
390
|
+
|
391
|
+
bool IsNull() const { return type_ == FBT_NULL; }
|
392
|
+
bool IsBool() const { return type_ == FBT_BOOL; }
|
393
|
+
bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; }
|
394
|
+
bool IsUInt() const {
|
395
|
+
return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT;
|
396
|
+
}
|
397
|
+
bool IsIntOrUint() const { return IsInt() || IsUInt(); }
|
398
|
+
bool IsFloat() const {
|
399
|
+
return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT;
|
400
|
+
}
|
401
|
+
bool IsNumeric() const { return IsIntOrUint() || IsFloat(); }
|
402
|
+
bool IsString() const { return type_ == FBT_STRING; }
|
403
|
+
bool IsKey() const { return type_ == FBT_KEY; }
|
404
|
+
bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; }
|
405
|
+
bool IsUntypedVector() const { return type_ == FBT_VECTOR; }
|
406
|
+
bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); }
|
407
|
+
bool IsFixedTypedVector() const {
|
408
|
+
return flexbuffers::IsFixedTypedVector(type_);
|
409
|
+
}
|
410
|
+
bool IsAnyVector() const {
|
411
|
+
return (IsTypedVector() || IsFixedTypedVector() || IsVector());
|
412
|
+
}
|
413
|
+
bool IsMap() const { return type_ == FBT_MAP; }
|
414
|
+
bool IsBlob() const { return type_ == FBT_BLOB; }
|
415
|
+
bool AsBool() const {
|
416
|
+
return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_)
|
417
|
+
: AsUInt64()) != 0;
|
418
|
+
}
|
419
|
+
|
420
|
+
// Reads any type as a int64_t. Never fails, does most sensible conversion.
|
421
|
+
// Truncates floats, strings are attempted to be parsed for a number,
|
422
|
+
// vectors/maps return their size. Returns 0 if all else fails.
|
423
|
+
int64_t AsInt64() const {
|
424
|
+
if (type_ == FBT_INT) {
|
425
|
+
// A fast path for the common case.
|
426
|
+
return ReadInt64(data_, parent_width_);
|
427
|
+
} else
|
428
|
+
switch (type_) {
|
429
|
+
case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
|
430
|
+
case FBT_UINT: return ReadUInt64(data_, parent_width_);
|
431
|
+
case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
|
432
|
+
case FBT_FLOAT:
|
433
|
+
return static_cast<int64_t>(ReadDouble(data_, parent_width_));
|
434
|
+
case FBT_INDIRECT_FLOAT:
|
435
|
+
return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_));
|
436
|
+
case FBT_NULL: return 0;
|
437
|
+
case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str());
|
438
|
+
case FBT_VECTOR: return static_cast<int64_t>(AsVector().size());
|
439
|
+
case FBT_BOOL: return ReadInt64(data_, parent_width_);
|
440
|
+
default:
|
441
|
+
// Convert other things to int.
|
442
|
+
return 0;
|
443
|
+
}
|
444
|
+
}
|
445
|
+
|
446
|
+
// TODO: could specialize these to not use AsInt64() if that saves
|
447
|
+
// extension ops in generated code, and use a faster op than ReadInt64.
|
448
|
+
int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); }
|
449
|
+
int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); }
|
450
|
+
int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); }
|
451
|
+
|
452
|
+
uint64_t AsUInt64() const {
|
453
|
+
if (type_ == FBT_UINT) {
|
454
|
+
// A fast path for the common case.
|
455
|
+
return ReadUInt64(data_, parent_width_);
|
456
|
+
} else
|
457
|
+
switch (type_) {
|
458
|
+
case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
|
459
|
+
case FBT_INT: return ReadInt64(data_, parent_width_);
|
460
|
+
case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
|
461
|
+
case FBT_FLOAT:
|
462
|
+
return static_cast<uint64_t>(ReadDouble(data_, parent_width_));
|
463
|
+
case FBT_INDIRECT_FLOAT:
|
464
|
+
return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_));
|
465
|
+
case FBT_NULL: return 0;
|
466
|
+
case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str());
|
467
|
+
case FBT_VECTOR: return static_cast<uint64_t>(AsVector().size());
|
468
|
+
case FBT_BOOL: return ReadUInt64(data_, parent_width_);
|
469
|
+
default:
|
470
|
+
// Convert other things to uint.
|
471
|
+
return 0;
|
472
|
+
}
|
473
|
+
}
|
474
|
+
|
475
|
+
uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); }
|
476
|
+
uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); }
|
477
|
+
uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); }
|
478
|
+
|
479
|
+
double AsDouble() const {
|
480
|
+
if (type_ == FBT_FLOAT) {
|
481
|
+
// A fast path for the common case.
|
482
|
+
return ReadDouble(data_, parent_width_);
|
483
|
+
} else
|
484
|
+
switch (type_) {
|
485
|
+
case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_);
|
486
|
+
case FBT_INT:
|
487
|
+
return static_cast<double>(ReadInt64(data_, parent_width_));
|
488
|
+
case FBT_UINT:
|
489
|
+
return static_cast<double>(ReadUInt64(data_, parent_width_));
|
490
|
+
case FBT_INDIRECT_INT:
|
491
|
+
return static_cast<double>(ReadInt64(Indirect(), byte_width_));
|
492
|
+
case FBT_INDIRECT_UINT:
|
493
|
+
return static_cast<double>(ReadUInt64(Indirect(), byte_width_));
|
494
|
+
case FBT_NULL: return 0.0;
|
495
|
+
case FBT_STRING: {
|
496
|
+
double d;
|
497
|
+
flatbuffers::StringToNumber(AsString().c_str(), &d);
|
498
|
+
return d;
|
499
|
+
}
|
500
|
+
case FBT_VECTOR: return static_cast<double>(AsVector().size());
|
501
|
+
case FBT_BOOL:
|
502
|
+
return static_cast<double>(ReadUInt64(data_, parent_width_));
|
503
|
+
default:
|
504
|
+
// Convert strings and other things to float.
|
505
|
+
return 0;
|
506
|
+
}
|
507
|
+
}
|
508
|
+
|
509
|
+
float AsFloat() const { return static_cast<float>(AsDouble()); }
|
510
|
+
|
511
|
+
const char *AsKey() const {
|
512
|
+
if (type_ == FBT_KEY || type_ == FBT_STRING) {
|
513
|
+
return reinterpret_cast<const char *>(Indirect());
|
514
|
+
} else {
|
515
|
+
return "";
|
516
|
+
}
|
517
|
+
}
|
518
|
+
|
519
|
+
// This function returns the empty string if you try to read something that
|
520
|
+
// is not a string or key.
|
521
|
+
String AsString() const {
|
522
|
+
if (type_ == FBT_STRING) {
|
523
|
+
return String(Indirect(), byte_width_);
|
524
|
+
} else if (type_ == FBT_KEY) {
|
525
|
+
auto key = Indirect();
|
526
|
+
return String(key, byte_width_,
|
527
|
+
strlen(reinterpret_cast<const char *>(key)));
|
528
|
+
} else {
|
529
|
+
return String::EmptyString();
|
530
|
+
}
|
531
|
+
}
|
532
|
+
|
533
|
+
// Unlike AsString(), this will convert any type to a std::string.
|
534
|
+
std::string ToString() const {
|
535
|
+
std::string s;
|
536
|
+
ToString(false, false, s);
|
537
|
+
return s;
|
538
|
+
}
|
539
|
+
|
540
|
+
// Convert any type to a JSON-like string. strings_quoted determines if
|
541
|
+
// string values at the top level receive "" quotes (inside other values
|
542
|
+
// they always do). keys_quoted determines if keys are quoted, at any level.
|
543
|
+
// TODO(wvo): add further options to have indentation/newlines.
|
544
|
+
void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const {
|
545
|
+
if (type_ == FBT_STRING) {
|
546
|
+
String str(Indirect(), byte_width_);
|
547
|
+
if (strings_quoted) {
|
548
|
+
flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false);
|
549
|
+
} else {
|
550
|
+
s.append(str.c_str(), str.length());
|
551
|
+
}
|
552
|
+
} else if (IsKey()) {
|
553
|
+
auto str = AsKey();
|
554
|
+
if (keys_quoted) {
|
555
|
+
flatbuffers::EscapeString(str, strlen(str), &s, true, false);
|
556
|
+
} else {
|
557
|
+
s += str;
|
558
|
+
}
|
559
|
+
} else if (IsInt()) {
|
560
|
+
s += flatbuffers::NumToString(AsInt64());
|
561
|
+
} else if (IsUInt()) {
|
562
|
+
s += flatbuffers::NumToString(AsUInt64());
|
563
|
+
} else if (IsFloat()) {
|
564
|
+
s += flatbuffers::NumToString(AsDouble());
|
565
|
+
} else if (IsNull()) {
|
566
|
+
s += "null";
|
567
|
+
} else if (IsBool()) {
|
568
|
+
s += AsBool() ? "true" : "false";
|
569
|
+
} else if (IsMap()) {
|
570
|
+
s += "{ ";
|
571
|
+
auto m = AsMap();
|
572
|
+
auto keys = m.Keys();
|
573
|
+
auto vals = m.Values();
|
574
|
+
for (size_t i = 0; i < keys.size(); i++) {
|
575
|
+
bool kq = keys_quoted;
|
576
|
+
if (!kq) {
|
577
|
+
// FlexBuffers keys may contain arbitrary characters, only allow
|
578
|
+
// unquoted if it looks like an "identifier":
|
579
|
+
const char *p = keys[i].AsKey();
|
580
|
+
if (!flatbuffers::is_alpha(*p) && *p != '_') {
|
581
|
+
kq = true;
|
582
|
+
} else {
|
583
|
+
while (*++p) {
|
584
|
+
if (!flatbuffers::is_alnum(*p) && *p != '_') {
|
585
|
+
kq = true;
|
586
|
+
break;
|
587
|
+
}
|
588
|
+
}
|
589
|
+
}
|
590
|
+
}
|
591
|
+
keys[i].ToString(true, kq, s);
|
592
|
+
s += ": ";
|
593
|
+
vals[i].ToString(true, keys_quoted, s);
|
594
|
+
if (i < keys.size() - 1) s += ", ";
|
595
|
+
}
|
596
|
+
s += " }";
|
597
|
+
} else if (IsVector()) {
|
598
|
+
AppendToString<Vector>(s, AsVector(), keys_quoted);
|
599
|
+
} else if (IsTypedVector()) {
|
600
|
+
AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted);
|
601
|
+
} else if (IsFixedTypedVector()) {
|
602
|
+
AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted);
|
603
|
+
} else if (IsBlob()) {
|
604
|
+
auto blob = AsBlob();
|
605
|
+
flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()),
|
606
|
+
blob.size(), &s, true, false);
|
607
|
+
} else {
|
608
|
+
s += "(?)";
|
609
|
+
}
|
610
|
+
}
|
611
|
+
|
612
|
+
// This function returns the empty blob if you try to read a not-blob.
|
613
|
+
// Strings can be viewed as blobs too.
|
614
|
+
Blob AsBlob() const {
|
615
|
+
if (type_ == FBT_BLOB || type_ == FBT_STRING) {
|
616
|
+
return Blob(Indirect(), byte_width_);
|
617
|
+
} else {
|
618
|
+
return Blob::EmptyBlob();
|
619
|
+
}
|
620
|
+
}
|
621
|
+
|
622
|
+
// This function returns the empty vector if you try to read a not-vector.
|
623
|
+
// Maps can be viewed as vectors too.
|
624
|
+
Vector AsVector() const {
|
625
|
+
if (type_ == FBT_VECTOR || type_ == FBT_MAP) {
|
626
|
+
return Vector(Indirect(), byte_width_);
|
627
|
+
} else {
|
628
|
+
return Vector::EmptyVector();
|
629
|
+
}
|
630
|
+
}
|
631
|
+
|
632
|
+
TypedVector AsTypedVector() const {
|
633
|
+
if (IsTypedVector()) {
|
634
|
+
auto tv =
|
635
|
+
TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_));
|
636
|
+
if (tv.type_ == FBT_STRING) {
|
637
|
+
// These can't be accessed as strings, since we don't know the bit-width
|
638
|
+
// of the size field, see the declaration of
|
639
|
+
// FBT_VECTOR_STRING_DEPRECATED above for details.
|
640
|
+
// We change the type here to be keys, which are a subtype of strings,
|
641
|
+
// and will ignore the size field. This will truncate strings with
|
642
|
+
// embedded nulls.
|
643
|
+
tv.type_ = FBT_KEY;
|
644
|
+
}
|
645
|
+
return tv;
|
646
|
+
} else {
|
647
|
+
return TypedVector::EmptyTypedVector();
|
648
|
+
}
|
649
|
+
}
|
650
|
+
|
651
|
+
FixedTypedVector AsFixedTypedVector() const {
|
652
|
+
if (IsFixedTypedVector()) {
|
653
|
+
uint8_t len = 0;
|
654
|
+
auto vtype = ToFixedTypedVectorElementType(type_, &len);
|
655
|
+
return FixedTypedVector(Indirect(), byte_width_, vtype, len);
|
656
|
+
} else {
|
657
|
+
return FixedTypedVector::EmptyFixedTypedVector();
|
658
|
+
}
|
659
|
+
}
|
660
|
+
|
661
|
+
Map AsMap() const {
|
662
|
+
if (type_ == FBT_MAP) {
|
663
|
+
return Map(Indirect(), byte_width_);
|
664
|
+
} else {
|
665
|
+
return Map::EmptyMap();
|
666
|
+
}
|
667
|
+
}
|
668
|
+
|
669
|
+
template<typename T> T As() const;
|
670
|
+
|
671
|
+
// Experimental: Mutation functions.
|
672
|
+
// These allow scalars in an already created buffer to be updated in-place.
|
673
|
+
// Since by default scalars are stored in the smallest possible space,
|
674
|
+
// the new value may not fit, in which case these functions return false.
|
675
|
+
// To avoid this, you can construct the values you intend to mutate using
|
676
|
+
// Builder::ForceMinimumBitWidth.
|
677
|
+
bool MutateInt(int64_t i) {
|
678
|
+
if (type_ == FBT_INT) {
|
679
|
+
return Mutate(data_, i, parent_width_, WidthI(i));
|
680
|
+
} else if (type_ == FBT_INDIRECT_INT) {
|
681
|
+
return Mutate(Indirect(), i, byte_width_, WidthI(i));
|
682
|
+
} else if (type_ == FBT_UINT) {
|
683
|
+
auto u = static_cast<uint64_t>(i);
|
684
|
+
return Mutate(data_, u, parent_width_, WidthU(u));
|
685
|
+
} else if (type_ == FBT_INDIRECT_UINT) {
|
686
|
+
auto u = static_cast<uint64_t>(i);
|
687
|
+
return Mutate(Indirect(), u, byte_width_, WidthU(u));
|
688
|
+
} else {
|
689
|
+
return false;
|
690
|
+
}
|
691
|
+
}
|
692
|
+
|
693
|
+
bool MutateBool(bool b) {
|
694
|
+
return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8);
|
695
|
+
}
|
696
|
+
|
697
|
+
bool MutateUInt(uint64_t u) {
|
698
|
+
if (type_ == FBT_UINT) {
|
699
|
+
return Mutate(data_, u, parent_width_, WidthU(u));
|
700
|
+
} else if (type_ == FBT_INDIRECT_UINT) {
|
701
|
+
return Mutate(Indirect(), u, byte_width_, WidthU(u));
|
702
|
+
} else if (type_ == FBT_INT) {
|
703
|
+
auto i = static_cast<int64_t>(u);
|
704
|
+
return Mutate(data_, i, parent_width_, WidthI(i));
|
705
|
+
} else if (type_ == FBT_INDIRECT_INT) {
|
706
|
+
auto i = static_cast<int64_t>(u);
|
707
|
+
return Mutate(Indirect(), i, byte_width_, WidthI(i));
|
708
|
+
} else {
|
709
|
+
return false;
|
710
|
+
}
|
711
|
+
}
|
712
|
+
|
713
|
+
bool MutateFloat(float f) {
|
714
|
+
if (type_ == FBT_FLOAT) {
|
715
|
+
return MutateF(data_, f, parent_width_, BIT_WIDTH_32);
|
716
|
+
} else if (type_ == FBT_INDIRECT_FLOAT) {
|
717
|
+
return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32);
|
718
|
+
} else {
|
719
|
+
return false;
|
720
|
+
}
|
721
|
+
}
|
722
|
+
|
723
|
+
bool MutateFloat(double d) {
|
724
|
+
if (type_ == FBT_FLOAT) {
|
725
|
+
return MutateF(data_, d, parent_width_, WidthF(d));
|
726
|
+
} else if (type_ == FBT_INDIRECT_FLOAT) {
|
727
|
+
return MutateF(Indirect(), d, byte_width_, WidthF(d));
|
728
|
+
} else {
|
729
|
+
return false;
|
730
|
+
}
|
731
|
+
}
|
732
|
+
|
733
|
+
bool MutateString(const char *str, size_t len) {
|
734
|
+
auto s = AsString();
|
735
|
+
if (s.IsTheEmptyString()) return false;
|
736
|
+
// This is very strict, could allow shorter strings, but that creates
|
737
|
+
// garbage.
|
738
|
+
if (s.length() != len) return false;
|
739
|
+
memcpy(const_cast<char *>(s.c_str()), str, len);
|
740
|
+
return true;
|
741
|
+
}
|
742
|
+
bool MutateString(const char *str) { return MutateString(str, strlen(str)); }
|
743
|
+
bool MutateString(const std::string &str) {
|
744
|
+
return MutateString(str.data(), str.length());
|
745
|
+
}
|
746
|
+
|
747
|
+
private:
|
748
|
+
const uint8_t *Indirect() const {
|
749
|
+
return flexbuffers::Indirect(data_, parent_width_);
|
750
|
+
}
|
751
|
+
|
752
|
+
template<typename T>
|
753
|
+
bool Mutate(const uint8_t *dest, T t, size_t byte_width,
|
754
|
+
BitWidth value_width) {
|
755
|
+
auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <=
|
756
|
+
byte_width;
|
757
|
+
if (fits) {
|
758
|
+
t = flatbuffers::EndianScalar(t);
|
759
|
+
memcpy(const_cast<uint8_t *>(dest), &t, byte_width);
|
760
|
+
}
|
761
|
+
return fits;
|
762
|
+
}
|
763
|
+
|
764
|
+
template<typename T>
|
765
|
+
bool MutateF(const uint8_t *dest, T t, size_t byte_width,
|
766
|
+
BitWidth value_width) {
|
767
|
+
if (byte_width == sizeof(double))
|
768
|
+
return Mutate(dest, static_cast<double>(t), byte_width, value_width);
|
769
|
+
if (byte_width == sizeof(float))
|
770
|
+
return Mutate(dest, static_cast<float>(t), byte_width, value_width);
|
771
|
+
FLATBUFFERS_ASSERT(false);
|
772
|
+
return false;
|
773
|
+
}
|
774
|
+
|
775
|
+
friend class Verifier;
|
776
|
+
|
777
|
+
const uint8_t *data_;
|
778
|
+
uint8_t parent_width_;
|
779
|
+
uint8_t byte_width_;
|
780
|
+
Type type_;
|
781
|
+
};
|
782
|
+
|
783
|
+
// Template specialization for As().
|
784
|
+
template<> inline bool Reference::As<bool>() const { return AsBool(); }
|
785
|
+
|
786
|
+
template<> inline int8_t Reference::As<int8_t>() const { return AsInt8(); }
|
787
|
+
template<> inline int16_t Reference::As<int16_t>() const { return AsInt16(); }
|
788
|
+
template<> inline int32_t Reference::As<int32_t>() const { return AsInt32(); }
|
789
|
+
template<> inline int64_t Reference::As<int64_t>() const { return AsInt64(); }
|
790
|
+
|
791
|
+
template<> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); }
|
792
|
+
template<> inline uint16_t Reference::As<uint16_t>() const {
|
793
|
+
return AsUInt16();
|
794
|
+
}
|
795
|
+
template<> inline uint32_t Reference::As<uint32_t>() const {
|
796
|
+
return AsUInt32();
|
797
|
+
}
|
798
|
+
template<> inline uint64_t Reference::As<uint64_t>() const {
|
799
|
+
return AsUInt64();
|
800
|
+
}
|
801
|
+
|
802
|
+
template<> inline double Reference::As<double>() const { return AsDouble(); }
|
803
|
+
template<> inline float Reference::As<float>() const { return AsFloat(); }
|
804
|
+
|
805
|
+
template<> inline String Reference::As<String>() const { return AsString(); }
|
806
|
+
template<> inline std::string Reference::As<std::string>() const {
|
807
|
+
return AsString().str();
|
808
|
+
}
|
809
|
+
|
810
|
+
template<> inline Blob Reference::As<Blob>() const { return AsBlob(); }
|
811
|
+
template<> inline Vector Reference::As<Vector>() const { return AsVector(); }
|
812
|
+
template<> inline TypedVector Reference::As<TypedVector>() const {
|
813
|
+
return AsTypedVector();
|
814
|
+
}
|
815
|
+
template<> inline FixedTypedVector Reference::As<FixedTypedVector>() const {
|
816
|
+
return AsFixedTypedVector();
|
817
|
+
}
|
818
|
+
template<> inline Map Reference::As<Map>() const { return AsMap(); }
|
819
|
+
|
820
|
+
inline uint8_t PackedType(BitWidth bit_width, Type type) {
|
821
|
+
return static_cast<uint8_t>(bit_width | (type << 2));
|
822
|
+
}
|
823
|
+
|
824
|
+
inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); }
|
825
|
+
|
826
|
+
// Vector accessors.
|
827
|
+
// Note: if you try to access outside of bounds, you get a Null value back
|
828
|
+
// instead. Normally this would be an assert, but since this is "dynamically
|
829
|
+
// typed" data, you may not want that (someone sends you a 2d vector and you
|
830
|
+
// wanted 3d).
|
831
|
+
// The Null converts seamlessly into a default value for any other type.
|
832
|
+
// TODO(wvo): Could introduce an #ifdef that makes this into an assert?
|
833
|
+
inline Reference Vector::operator[](size_t i) const {
|
834
|
+
auto len = size();
|
835
|
+
if (i >= len) return Reference(nullptr, 1, NullPackedType());
|
836
|
+
auto packed_type = (data_ + len * byte_width_)[i];
|
837
|
+
auto elem = data_ + i * byte_width_;
|
838
|
+
return Reference(elem, byte_width_, packed_type);
|
839
|
+
}
|
840
|
+
|
841
|
+
inline Reference TypedVector::operator[](size_t i) const {
|
842
|
+
auto len = size();
|
843
|
+
if (i >= len) return Reference(nullptr, 1, NullPackedType());
|
844
|
+
auto elem = data_ + i * byte_width_;
|
845
|
+
return Reference(elem, byte_width_, 1, type_);
|
846
|
+
}
|
847
|
+
|
848
|
+
inline Reference FixedTypedVector::operator[](size_t i) const {
|
849
|
+
if (i >= len_) return Reference(nullptr, 1, NullPackedType());
|
850
|
+
auto elem = data_ + i * byte_width_;
|
851
|
+
return Reference(elem, byte_width_, 1, type_);
|
852
|
+
}
|
853
|
+
|
854
|
+
template<typename T> int KeyCompare(const void *key, const void *elem) {
|
855
|
+
auto str_elem = reinterpret_cast<const char *>(
|
856
|
+
Indirect<T>(reinterpret_cast<const uint8_t *>(elem)));
|
857
|
+
auto skey = reinterpret_cast<const char *>(key);
|
858
|
+
return strcmp(skey, str_elem);
|
859
|
+
}
|
860
|
+
|
861
|
+
inline Reference Map::operator[](const char *key) const {
|
862
|
+
auto keys = Keys();
|
863
|
+
// We can't pass keys.byte_width_ to the comparison function, so we have
|
864
|
+
// to pick the right one ahead of time.
|
865
|
+
int (*comp)(const void *, const void *) = nullptr;
|
866
|
+
switch (keys.byte_width_) {
|
867
|
+
case 1: comp = KeyCompare<uint8_t>; break;
|
868
|
+
case 2: comp = KeyCompare<uint16_t>; break;
|
869
|
+
case 4: comp = KeyCompare<uint32_t>; break;
|
870
|
+
case 8: comp = KeyCompare<uint64_t>; break;
|
871
|
+
default: FLATBUFFERS_ASSERT(false); return Reference();
|
872
|
+
}
|
873
|
+
auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
|
874
|
+
if (!res) return Reference(nullptr, 1, NullPackedType());
|
875
|
+
auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_;
|
876
|
+
return (*static_cast<const Vector *>(this))[i];
|
877
|
+
}
|
878
|
+
|
879
|
+
inline Reference Map::operator[](const std::string &key) const {
|
880
|
+
return (*this)[key.c_str()];
|
881
|
+
}
|
882
|
+
|
883
|
+
inline Reference GetRoot(const uint8_t *buffer, size_t size) {
|
884
|
+
// See Finish() below for the serialization counterpart of this.
|
885
|
+
// The root starts at the end of the buffer, so we parse backwards from there.
|
886
|
+
auto end = buffer + size;
|
887
|
+
auto byte_width = *--end;
|
888
|
+
auto packed_type = *--end;
|
889
|
+
end -= byte_width; // The root data item.
|
890
|
+
return Reference(end, byte_width, packed_type);
|
891
|
+
}
|
892
|
+
|
893
|
+
inline Reference GetRoot(const std::vector<uint8_t> &buffer) {
|
894
|
+
return GetRoot(buffer.data(), buffer.size());
|
895
|
+
}
|
896
|
+
|
897
|
+
// Flags that configure how the Builder behaves.
|
898
|
+
// The "Share" flags determine if the Builder automatically tries to pool
|
899
|
+
// this type. Pooling can reduce the size of serialized data if there are
|
900
|
+
// multiple maps of the same kind, at the expense of slightly slower
|
901
|
+
// serialization (the cost of lookups) and more memory use (std::set).
|
902
|
+
// By default this is on for keys, but off for strings.
|
903
|
+
// Turn keys off if you have e.g. only one map.
|
904
|
+
// Turn strings on if you expect many non-unique string values.
|
905
|
+
// Additionally, sharing key vectors can save space if you have maps with
|
906
|
+
// identical field populations.
|
907
|
+
enum BuilderFlag {
|
908
|
+
BUILDER_FLAG_NONE = 0,
|
909
|
+
BUILDER_FLAG_SHARE_KEYS = 1,
|
910
|
+
BUILDER_FLAG_SHARE_STRINGS = 2,
|
911
|
+
BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3,
|
912
|
+
BUILDER_FLAG_SHARE_KEY_VECTORS = 4,
|
913
|
+
BUILDER_FLAG_SHARE_ALL = 7,
|
914
|
+
};
|
915
|
+
|
916
|
+
class Builder FLATBUFFERS_FINAL_CLASS {
|
917
|
+
public:
|
918
|
+
Builder(size_t initial_size = 256,
|
919
|
+
BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
|
920
|
+
: buf_(initial_size),
|
921
|
+
finished_(false),
|
922
|
+
has_duplicate_keys_(false),
|
923
|
+
flags_(flags),
|
924
|
+
force_min_bit_width_(BIT_WIDTH_8),
|
925
|
+
key_pool(KeyOffsetCompare(buf_)),
|
926
|
+
string_pool(StringOffsetCompare(buf_)) {
|
927
|
+
buf_.clear();
|
928
|
+
}
|
929
|
+
|
930
|
+
#ifdef FLATBUFFERS_DEFAULT_DECLARATION
|
931
|
+
Builder(Builder &&) = default;
|
932
|
+
Builder &operator=(Builder &&) = default;
|
933
|
+
#endif
|
934
|
+
|
935
|
+
/// @brief Get the serialized buffer (after you call `Finish()`).
|
936
|
+
/// @return Returns a vector owned by this class.
|
937
|
+
const std::vector<uint8_t> &GetBuffer() const {
|
938
|
+
Finished();
|
939
|
+
return buf_;
|
940
|
+
}
|
941
|
+
|
942
|
+
// Size of the buffer. Does not include unfinished values.
|
943
|
+
size_t GetSize() const { return buf_.size(); }
|
944
|
+
|
945
|
+
// Reset all state so we can re-use the buffer.
|
946
|
+
void Clear() {
|
947
|
+
buf_.clear();
|
948
|
+
stack_.clear();
|
949
|
+
finished_ = false;
|
950
|
+
// flags_ remains as-is;
|
951
|
+
force_min_bit_width_ = BIT_WIDTH_8;
|
952
|
+
key_pool.clear();
|
953
|
+
string_pool.clear();
|
954
|
+
}
|
955
|
+
|
956
|
+
// All value constructing functions below have two versions: one that
|
957
|
+
// takes a key (for placement inside a map) and one that doesn't (for inside
|
958
|
+
// vectors and elsewhere).
|
959
|
+
|
960
|
+
void Null() { stack_.push_back(Value()); }
|
961
|
+
void Null(const char *key) {
|
962
|
+
Key(key);
|
963
|
+
Null();
|
964
|
+
}
|
965
|
+
|
966
|
+
void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); }
|
967
|
+
void Int(const char *key, int64_t i) {
|
968
|
+
Key(key);
|
969
|
+
Int(i);
|
970
|
+
}
|
971
|
+
|
972
|
+
void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); }
|
973
|
+
void UInt(const char *key, uint64_t u) {
|
974
|
+
Key(key);
|
975
|
+
UInt(u);
|
976
|
+
}
|
977
|
+
|
978
|
+
void Float(float f) { stack_.push_back(Value(f)); }
|
979
|
+
void Float(const char *key, float f) {
|
980
|
+
Key(key);
|
981
|
+
Float(f);
|
982
|
+
}
|
983
|
+
|
984
|
+
void Double(double f) { stack_.push_back(Value(f)); }
|
985
|
+
void Double(const char *key, double d) {
|
986
|
+
Key(key);
|
987
|
+
Double(d);
|
988
|
+
}
|
989
|
+
|
990
|
+
void Bool(bool b) { stack_.push_back(Value(b)); }
|
991
|
+
void Bool(const char *key, bool b) {
|
992
|
+
Key(key);
|
993
|
+
Bool(b);
|
994
|
+
}
|
995
|
+
|
996
|
+
void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); }
|
997
|
+
void IndirectInt(const char *key, int64_t i) {
|
998
|
+
Key(key);
|
999
|
+
IndirectInt(i);
|
1000
|
+
}
|
1001
|
+
|
1002
|
+
void IndirectUInt(uint64_t u) {
|
1003
|
+
PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u));
|
1004
|
+
}
|
1005
|
+
void IndirectUInt(const char *key, uint64_t u) {
|
1006
|
+
Key(key);
|
1007
|
+
IndirectUInt(u);
|
1008
|
+
}
|
1009
|
+
|
1010
|
+
void IndirectFloat(float f) {
|
1011
|
+
PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32);
|
1012
|
+
}
|
1013
|
+
void IndirectFloat(const char *key, float f) {
|
1014
|
+
Key(key);
|
1015
|
+
IndirectFloat(f);
|
1016
|
+
}
|
1017
|
+
|
1018
|
+
void IndirectDouble(double f) {
|
1019
|
+
PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f));
|
1020
|
+
}
|
1021
|
+
void IndirectDouble(const char *key, double d) {
|
1022
|
+
Key(key);
|
1023
|
+
IndirectDouble(d);
|
1024
|
+
}
|
1025
|
+
|
1026
|
+
size_t Key(const char *str, size_t len) {
|
1027
|
+
auto sloc = buf_.size();
|
1028
|
+
WriteBytes(str, len + 1);
|
1029
|
+
if (flags_ & BUILDER_FLAG_SHARE_KEYS) {
|
1030
|
+
auto it = key_pool.find(sloc);
|
1031
|
+
if (it != key_pool.end()) {
|
1032
|
+
// Already in the buffer. Remove key we just serialized, and use
|
1033
|
+
// existing offset instead.
|
1034
|
+
buf_.resize(sloc);
|
1035
|
+
sloc = *it;
|
1036
|
+
} else {
|
1037
|
+
key_pool.insert(sloc);
|
1038
|
+
}
|
1039
|
+
}
|
1040
|
+
stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8));
|
1041
|
+
return sloc;
|
1042
|
+
}
|
1043
|
+
|
1044
|
+
size_t Key(const char *str) { return Key(str, strlen(str)); }
|
1045
|
+
size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); }
|
1046
|
+
|
1047
|
+
size_t String(const char *str, size_t len) {
|
1048
|
+
auto reset_to = buf_.size();
|
1049
|
+
auto sloc = CreateBlob(str, len, 1, FBT_STRING);
|
1050
|
+
if (flags_ & BUILDER_FLAG_SHARE_STRINGS) {
|
1051
|
+
StringOffset so(sloc, len);
|
1052
|
+
auto it = string_pool.find(so);
|
1053
|
+
if (it != string_pool.end()) {
|
1054
|
+
// Already in the buffer. Remove string we just serialized, and use
|
1055
|
+
// existing offset instead.
|
1056
|
+
buf_.resize(reset_to);
|
1057
|
+
sloc = it->first;
|
1058
|
+
stack_.back().u_ = sloc;
|
1059
|
+
} else {
|
1060
|
+
string_pool.insert(so);
|
1061
|
+
}
|
1062
|
+
}
|
1063
|
+
return sloc;
|
1064
|
+
}
|
1065
|
+
size_t String(const char *str) { return String(str, strlen(str)); }
|
1066
|
+
size_t String(const std::string &str) {
|
1067
|
+
return String(str.c_str(), str.size());
|
1068
|
+
}
|
1069
|
+
void String(const flexbuffers::String &str) {
|
1070
|
+
String(str.c_str(), str.length());
|
1071
|
+
}
|
1072
|
+
|
1073
|
+
void String(const char *key, const char *str) {
|
1074
|
+
Key(key);
|
1075
|
+
String(str);
|
1076
|
+
}
|
1077
|
+
void String(const char *key, const std::string &str) {
|
1078
|
+
Key(key);
|
1079
|
+
String(str);
|
1080
|
+
}
|
1081
|
+
void String(const char *key, const flexbuffers::String &str) {
|
1082
|
+
Key(key);
|
1083
|
+
String(str);
|
1084
|
+
}
|
1085
|
+
|
1086
|
+
size_t Blob(const void *data, size_t len) {
|
1087
|
+
return CreateBlob(data, len, 0, FBT_BLOB);
|
1088
|
+
}
|
1089
|
+
size_t Blob(const std::vector<uint8_t> &v) {
|
1090
|
+
return CreateBlob(v.data(), v.size(), 0, FBT_BLOB);
|
1091
|
+
}
|
1092
|
+
|
1093
|
+
void Blob(const char *key, const void *data, size_t len) {
|
1094
|
+
Key(key);
|
1095
|
+
Blob(data, len);
|
1096
|
+
}
|
1097
|
+
void Blob(const char *key, const std::vector<uint8_t> &v) {
|
1098
|
+
Key(key);
|
1099
|
+
Blob(v);
|
1100
|
+
}
|
1101
|
+
|
1102
|
+
// TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
|
1103
|
+
// e.g. Vector etc. Also in overloaded versions.
|
1104
|
+
// Also some FlatBuffers types?
|
1105
|
+
|
1106
|
+
size_t StartVector() { return stack_.size(); }
|
1107
|
+
size_t StartVector(const char *key) {
|
1108
|
+
Key(key);
|
1109
|
+
return stack_.size();
|
1110
|
+
}
|
1111
|
+
size_t StartMap() { return stack_.size(); }
|
1112
|
+
size_t StartMap(const char *key) {
|
1113
|
+
Key(key);
|
1114
|
+
return stack_.size();
|
1115
|
+
}
|
1116
|
+
|
1117
|
+
// TODO(wvo): allow this to specify an alignment greater than the natural
|
1118
|
+
// alignment.
|
1119
|
+
size_t EndVector(size_t start, bool typed, bool fixed) {
|
1120
|
+
auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
|
1121
|
+
// Remove temp elements and return vector.
|
1122
|
+
stack_.resize(start);
|
1123
|
+
stack_.push_back(vec);
|
1124
|
+
return static_cast<size_t>(vec.u_);
|
1125
|
+
}
|
1126
|
+
|
1127
|
+
size_t EndMap(size_t start) {
|
1128
|
+
// We should have interleaved keys and values on the stack.
|
1129
|
+
// Make sure it is an even number:
|
1130
|
+
auto len = stack_.size() - start;
|
1131
|
+
FLATBUFFERS_ASSERT(!(len & 1));
|
1132
|
+
len /= 2;
|
1133
|
+
// Make sure keys are all strings:
|
1134
|
+
for (auto key = start; key < stack_.size(); key += 2) {
|
1135
|
+
FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY);
|
1136
|
+
}
|
1137
|
+
// Now sort values, so later we can do a binary search lookup.
|
1138
|
+
// We want to sort 2 array elements at a time.
|
1139
|
+
struct TwoValue {
|
1140
|
+
Value key;
|
1141
|
+
Value val;
|
1142
|
+
};
|
1143
|
+
// TODO(wvo): strict aliasing?
|
1144
|
+
// TODO(wvo): allow the caller to indicate the data is already sorted
|
1145
|
+
// for maximum efficiency? With an assert to check sortedness to make sure
|
1146
|
+
// we're not breaking binary search.
|
1147
|
+
// Or, we can track if the map is sorted as keys are added which would be
|
1148
|
+
// be quite cheap (cheaper than checking it here), so we can skip this
|
1149
|
+
// step automatically when appliccable, and encourage people to write in
|
1150
|
+
// sorted fashion.
|
1151
|
+
// std::sort is typically already a lot faster on sorted data though.
|
1152
|
+
auto dict = reinterpret_cast<TwoValue *>(stack_.data() + start);
|
1153
|
+
std::sort(
|
1154
|
+
dict, dict + len, [&](const TwoValue &a, const TwoValue &b) -> bool {
|
1155
|
+
auto as = reinterpret_cast<const char *>(buf_.data() + a.key.u_);
|
1156
|
+
auto bs = reinterpret_cast<const char *>(buf_.data() + b.key.u_);
|
1157
|
+
auto comp = strcmp(as, bs);
|
1158
|
+
// We want to disallow duplicate keys, since this results in a
|
1159
|
+
// map where values cannot be found.
|
1160
|
+
// But we can't assert here (since we don't want to fail on
|
1161
|
+
// random JSON input) or have an error mechanism.
|
1162
|
+
// Instead, we set has_duplicate_keys_ in the builder to
|
1163
|
+
// signal this.
|
1164
|
+
// TODO: Have to check for pointer equality, as some sort
|
1165
|
+
// implementation apparently call this function with the same
|
1166
|
+
// element?? Why?
|
1167
|
+
if (!comp && &a != &b) has_duplicate_keys_ = true;
|
1168
|
+
return comp < 0;
|
1169
|
+
});
|
1170
|
+
// First create a vector out of all keys.
|
1171
|
+
// TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
|
1172
|
+
// the first vector.
|
1173
|
+
auto keys = CreateVector(start, len, 2, true, false);
|
1174
|
+
auto vec = CreateVector(start + 1, len, 2, false, false, &keys);
|
1175
|
+
// Remove temp elements and return map.
|
1176
|
+
stack_.resize(start);
|
1177
|
+
stack_.push_back(vec);
|
1178
|
+
return static_cast<size_t>(vec.u_);
|
1179
|
+
}
|
1180
|
+
|
1181
|
+
// Call this after EndMap to see if the map had any duplicate keys.
|
1182
|
+
// Any map with such keys won't be able to retrieve all values.
|
1183
|
+
bool HasDuplicateKeys() const { return has_duplicate_keys_; }
|
1184
|
+
|
1185
|
+
template<typename F> size_t Vector(F f) {
|
1186
|
+
auto start = StartVector();
|
1187
|
+
f();
|
1188
|
+
return EndVector(start, false, false);
|
1189
|
+
}
|
1190
|
+
template<typename F, typename T> size_t Vector(F f, T &state) {
|
1191
|
+
auto start = StartVector();
|
1192
|
+
f(state);
|
1193
|
+
return EndVector(start, false, false);
|
1194
|
+
}
|
1195
|
+
template<typename F> size_t Vector(const char *key, F f) {
|
1196
|
+
auto start = StartVector(key);
|
1197
|
+
f();
|
1198
|
+
return EndVector(start, false, false);
|
1199
|
+
}
|
1200
|
+
template<typename F, typename T>
|
1201
|
+
size_t Vector(const char *key, F f, T &state) {
|
1202
|
+
auto start = StartVector(key);
|
1203
|
+
f(state);
|
1204
|
+
return EndVector(start, false, false);
|
1205
|
+
}
|
1206
|
+
|
1207
|
+
template<typename T> void Vector(const T *elems, size_t len) {
|
1208
|
+
if (flatbuffers::is_scalar<T>::value) {
|
1209
|
+
// This path should be a lot quicker and use less space.
|
1210
|
+
ScalarVector(elems, len, false);
|
1211
|
+
} else {
|
1212
|
+
auto start = StartVector();
|
1213
|
+
for (size_t i = 0; i < len; i++) Add(elems[i]);
|
1214
|
+
EndVector(start, false, false);
|
1215
|
+
}
|
1216
|
+
}
|
1217
|
+
template<typename T>
|
1218
|
+
void Vector(const char *key, const T *elems, size_t len) {
|
1219
|
+
Key(key);
|
1220
|
+
Vector(elems, len);
|
1221
|
+
}
|
1222
|
+
template<typename T> void Vector(const std::vector<T> &vec) {
|
1223
|
+
Vector(vec.data(), vec.size());
|
1224
|
+
}
|
1225
|
+
|
1226
|
+
template<typename F> size_t TypedVector(F f) {
|
1227
|
+
auto start = StartVector();
|
1228
|
+
f();
|
1229
|
+
return EndVector(start, true, false);
|
1230
|
+
}
|
1231
|
+
template<typename F, typename T> size_t TypedVector(F f, T &state) {
|
1232
|
+
auto start = StartVector();
|
1233
|
+
f(state);
|
1234
|
+
return EndVector(start, true, false);
|
1235
|
+
}
|
1236
|
+
template<typename F> size_t TypedVector(const char *key, F f) {
|
1237
|
+
auto start = StartVector(key);
|
1238
|
+
f();
|
1239
|
+
return EndVector(start, true, false);
|
1240
|
+
}
|
1241
|
+
template<typename F, typename T>
|
1242
|
+
size_t TypedVector(const char *key, F f, T &state) {
|
1243
|
+
auto start = StartVector(key);
|
1244
|
+
f(state);
|
1245
|
+
return EndVector(start, true, false);
|
1246
|
+
}
|
1247
|
+
|
1248
|
+
template<typename T> size_t FixedTypedVector(const T *elems, size_t len) {
|
1249
|
+
// We only support a few fixed vector lengths. Anything bigger use a
|
1250
|
+
// regular typed vector.
|
1251
|
+
FLATBUFFERS_ASSERT(len >= 2 && len <= 4);
|
1252
|
+
// And only scalar values.
|
1253
|
+
static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
|
1254
|
+
return ScalarVector(elems, len, true);
|
1255
|
+
}
|
1256
|
+
|
1257
|
+
template<typename T>
|
1258
|
+
size_t FixedTypedVector(const char *key, const T *elems, size_t len) {
|
1259
|
+
Key(key);
|
1260
|
+
return FixedTypedVector(elems, len);
|
1261
|
+
}
|
1262
|
+
|
1263
|
+
template<typename F> size_t Map(F f) {
|
1264
|
+
auto start = StartMap();
|
1265
|
+
f();
|
1266
|
+
return EndMap(start);
|
1267
|
+
}
|
1268
|
+
template<typename F, typename T> size_t Map(F f, T &state) {
|
1269
|
+
auto start = StartMap();
|
1270
|
+
f(state);
|
1271
|
+
return EndMap(start);
|
1272
|
+
}
|
1273
|
+
template<typename F> size_t Map(const char *key, F f) {
|
1274
|
+
auto start = StartMap(key);
|
1275
|
+
f();
|
1276
|
+
return EndMap(start);
|
1277
|
+
}
|
1278
|
+
template<typename F, typename T> size_t Map(const char *key, F f, T &state) {
|
1279
|
+
auto start = StartMap(key);
|
1280
|
+
f(state);
|
1281
|
+
return EndMap(start);
|
1282
|
+
}
|
1283
|
+
template<typename T> void Map(const std::map<std::string, T> &map) {
|
1284
|
+
auto start = StartMap();
|
1285
|
+
for (auto it = map.begin(); it != map.end(); ++it)
|
1286
|
+
Add(it->first.c_str(), it->second);
|
1287
|
+
EndMap(start);
|
1288
|
+
}
|
1289
|
+
|
1290
|
+
// If you wish to share a value explicitly (a value not shared automatically
|
1291
|
+
// through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these
|
1292
|
+
// functions. Or if you wish to turn those flags off for performance reasons
|
1293
|
+
// and still do some explicit sharing. For example:
|
1294
|
+
// builder.IndirectDouble(M_PI);
|
1295
|
+
// auto id = builder.LastValue(); // Remember where we stored it.
|
1296
|
+
// .. more code goes here ..
|
1297
|
+
// builder.ReuseValue(id); // Refers to same double by offset.
|
1298
|
+
// LastValue works regardless of whether the value has a key or not.
|
1299
|
+
// Works on any data type.
|
1300
|
+
struct Value;
|
1301
|
+
Value LastValue() { return stack_.back(); }
|
1302
|
+
void ReuseValue(Value v) { stack_.push_back(v); }
|
1303
|
+
void ReuseValue(const char *key, Value v) {
|
1304
|
+
Key(key);
|
1305
|
+
ReuseValue(v);
|
1306
|
+
}
|
1307
|
+
|
1308
|
+
// Overloaded Add that tries to call the correct function above.
|
1309
|
+
void Add(int8_t i) { Int(i); }
|
1310
|
+
void Add(int16_t i) { Int(i); }
|
1311
|
+
void Add(int32_t i) { Int(i); }
|
1312
|
+
void Add(int64_t i) { Int(i); }
|
1313
|
+
void Add(uint8_t u) { UInt(u); }
|
1314
|
+
void Add(uint16_t u) { UInt(u); }
|
1315
|
+
void Add(uint32_t u) { UInt(u); }
|
1316
|
+
void Add(uint64_t u) { UInt(u); }
|
1317
|
+
void Add(float f) { Float(f); }
|
1318
|
+
void Add(double d) { Double(d); }
|
1319
|
+
void Add(bool b) { Bool(b); }
|
1320
|
+
void Add(const char *str) { String(str); }
|
1321
|
+
void Add(const std::string &str) { String(str); }
|
1322
|
+
void Add(const flexbuffers::String &str) { String(str); }
|
1323
|
+
|
1324
|
+
template<typename T> void Add(const std::vector<T> &vec) { Vector(vec); }
|
1325
|
+
|
1326
|
+
template<typename T> void Add(const char *key, const T &t) {
|
1327
|
+
Key(key);
|
1328
|
+
Add(t);
|
1329
|
+
}
|
1330
|
+
|
1331
|
+
template<typename T> void Add(const std::map<std::string, T> &map) {
|
1332
|
+
Map(map);
|
1333
|
+
}
|
1334
|
+
|
1335
|
+
template<typename T> void operator+=(const T &t) { Add(t); }
|
1336
|
+
|
1337
|
+
// This function is useful in combination with the Mutate* functions above.
|
1338
|
+
// It forces elements of vectors and maps to have a minimum size, such that
|
1339
|
+
// they can later be updated without failing.
|
1340
|
+
// Call with no arguments to reset.
|
1341
|
+
void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) {
|
1342
|
+
force_min_bit_width_ = bw;
|
1343
|
+
}
|
1344
|
+
|
1345
|
+
void Finish() {
|
1346
|
+
// If you hit this assert, you likely have objects that were never included
|
1347
|
+
// in a parent. You need to have exactly one root to finish a buffer.
|
1348
|
+
// Check your Start/End calls are matched, and all objects are inside
|
1349
|
+
// some other object.
|
1350
|
+
FLATBUFFERS_ASSERT(stack_.size() == 1);
|
1351
|
+
|
1352
|
+
// Write root value.
|
1353
|
+
auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
|
1354
|
+
WriteAny(stack_[0], byte_width);
|
1355
|
+
// Write root type.
|
1356
|
+
Write(stack_[0].StoredPackedType(), 1);
|
1357
|
+
// Write root size. Normally determined by parent, but root has no parent :)
|
1358
|
+
Write(byte_width, 1);
|
1359
|
+
|
1360
|
+
finished_ = true;
|
1361
|
+
}
|
1362
|
+
|
1363
|
+
private:
|
1364
|
+
void Finished() const {
|
1365
|
+
// If you get this assert, you're attempting to get access a buffer
|
1366
|
+
// which hasn't been finished yet. Be sure to call
|
1367
|
+
// Builder::Finish with your root object.
|
1368
|
+
FLATBUFFERS_ASSERT(finished_);
|
1369
|
+
}
|
1370
|
+
|
1371
|
+
// Align to prepare for writing a scalar with a certain size.
|
1372
|
+
uint8_t Align(BitWidth alignment) {
|
1373
|
+
auto byte_width = 1U << alignment;
|
1374
|
+
buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width),
|
1375
|
+
0);
|
1376
|
+
return static_cast<uint8_t>(byte_width);
|
1377
|
+
}
|
1378
|
+
|
1379
|
+
void WriteBytes(const void *val, size_t size) {
|
1380
|
+
buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val),
|
1381
|
+
reinterpret_cast<const uint8_t *>(val) + size);
|
1382
|
+
}
|
1383
|
+
|
1384
|
+
template<typename T> void Write(T val, size_t byte_width) {
|
1385
|
+
FLATBUFFERS_ASSERT(sizeof(T) >= byte_width);
|
1386
|
+
val = flatbuffers::EndianScalar(val);
|
1387
|
+
WriteBytes(&val, byte_width);
|
1388
|
+
}
|
1389
|
+
|
1390
|
+
void WriteDouble(double f, uint8_t byte_width) {
|
1391
|
+
switch (byte_width) {
|
1392
|
+
case 8: Write(f, byte_width); break;
|
1393
|
+
case 4: Write(static_cast<float>(f), byte_width); break;
|
1394
|
+
// case 2: Write(static_cast<half>(f), byte_width); break;
|
1395
|
+
// case 1: Write(static_cast<quarter>(f), byte_width); break;
|
1396
|
+
default: FLATBUFFERS_ASSERT(0);
|
1397
|
+
}
|
1398
|
+
}
|
1399
|
+
|
1400
|
+
void WriteOffset(uint64_t o, uint8_t byte_width) {
|
1401
|
+
auto reloff = buf_.size() - o;
|
1402
|
+
FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8));
|
1403
|
+
Write(reloff, byte_width);
|
1404
|
+
}
|
1405
|
+
|
1406
|
+
template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) {
|
1407
|
+
auto byte_width = Align(bit_width);
|
1408
|
+
auto iloc = buf_.size();
|
1409
|
+
Write(val, byte_width);
|
1410
|
+
stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width));
|
1411
|
+
}
|
1412
|
+
|
1413
|
+
static BitWidth WidthB(size_t byte_width) {
|
1414
|
+
switch (byte_width) {
|
1415
|
+
case 1: return BIT_WIDTH_8;
|
1416
|
+
case 2: return BIT_WIDTH_16;
|
1417
|
+
case 4: return BIT_WIDTH_32;
|
1418
|
+
case 8: return BIT_WIDTH_64;
|
1419
|
+
default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64;
|
1420
|
+
}
|
1421
|
+
}
|
1422
|
+
|
1423
|
+
template<typename T> static Type GetScalarType() {
|
1424
|
+
static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
|
1425
|
+
return flatbuffers::is_floating_point<T>::value ? FBT_FLOAT
|
1426
|
+
: flatbuffers::is_same<T, bool>::value
|
1427
|
+
? FBT_BOOL
|
1428
|
+
: (flatbuffers::is_unsigned<T>::value ? FBT_UINT : FBT_INT);
|
1429
|
+
}
|
1430
|
+
|
1431
|
+
public:
|
1432
|
+
// This was really intended to be private, except for LastValue/ReuseValue.
|
1433
|
+
struct Value {
|
1434
|
+
union {
|
1435
|
+
int64_t i_;
|
1436
|
+
uint64_t u_;
|
1437
|
+
double f_;
|
1438
|
+
};
|
1439
|
+
|
1440
|
+
Type type_;
|
1441
|
+
|
1442
|
+
// For scalars: of itself, for vector: of its elements, for string: length.
|
1443
|
+
BitWidth min_bit_width_;
|
1444
|
+
|
1445
|
+
Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {}
|
1446
|
+
|
1447
|
+
Value(bool b)
|
1448
|
+
: u_(static_cast<uint64_t>(b)),
|
1449
|
+
type_(FBT_BOOL),
|
1450
|
+
min_bit_width_(BIT_WIDTH_8) {}
|
1451
|
+
|
1452
|
+
Value(int64_t i, Type t, BitWidth bw)
|
1453
|
+
: i_(i), type_(t), min_bit_width_(bw) {}
|
1454
|
+
Value(uint64_t u, Type t, BitWidth bw)
|
1455
|
+
: u_(u), type_(t), min_bit_width_(bw) {}
|
1456
|
+
|
1457
|
+
Value(float f)
|
1458
|
+
: f_(static_cast<double>(f)),
|
1459
|
+
type_(FBT_FLOAT),
|
1460
|
+
min_bit_width_(BIT_WIDTH_32) {}
|
1461
|
+
Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {}
|
1462
|
+
|
1463
|
+
uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
|
1464
|
+
return PackedType(StoredWidth(parent_bit_width_), type_);
|
1465
|
+
}
|
1466
|
+
|
1467
|
+
BitWidth ElemWidth(size_t buf_size, size_t elem_index) const {
|
1468
|
+
if (IsInline(type_)) {
|
1469
|
+
return min_bit_width_;
|
1470
|
+
} else {
|
1471
|
+
// We have an absolute offset, but want to store a relative offset
|
1472
|
+
// elem_index elements beyond the current buffer end. Since whether
|
1473
|
+
// the relative offset fits in a certain byte_width depends on
|
1474
|
+
// the size of the elements before it (and their alignment), we have
|
1475
|
+
// to test for each size in turn.
|
1476
|
+
for (size_t byte_width = 1;
|
1477
|
+
byte_width <= sizeof(flatbuffers::largest_scalar_t);
|
1478
|
+
byte_width *= 2) {
|
1479
|
+
// Where are we going to write this offset?
|
1480
|
+
auto offset_loc = buf_size +
|
1481
|
+
flatbuffers::PaddingBytes(buf_size, byte_width) +
|
1482
|
+
elem_index * byte_width;
|
1483
|
+
// Compute relative offset.
|
1484
|
+
auto offset = offset_loc - u_;
|
1485
|
+
// Does it fit?
|
1486
|
+
auto bit_width = WidthU(offset);
|
1487
|
+
if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) ==
|
1488
|
+
byte_width)
|
1489
|
+
return bit_width;
|
1490
|
+
}
|
1491
|
+
FLATBUFFERS_ASSERT(false); // Must match one of the sizes above.
|
1492
|
+
return BIT_WIDTH_64;
|
1493
|
+
}
|
1494
|
+
}
|
1495
|
+
|
1496
|
+
BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
|
1497
|
+
if (IsInline(type_)) {
|
1498
|
+
return (std::max)(min_bit_width_, parent_bit_width_);
|
1499
|
+
} else {
|
1500
|
+
return min_bit_width_;
|
1501
|
+
}
|
1502
|
+
}
|
1503
|
+
};
|
1504
|
+
|
1505
|
+
private:
|
1506
|
+
void WriteAny(const Value &val, uint8_t byte_width) {
|
1507
|
+
switch (val.type_) {
|
1508
|
+
case FBT_NULL:
|
1509
|
+
case FBT_INT: Write(val.i_, byte_width); break;
|
1510
|
+
case FBT_BOOL:
|
1511
|
+
case FBT_UINT: Write(val.u_, byte_width); break;
|
1512
|
+
case FBT_FLOAT: WriteDouble(val.f_, byte_width); break;
|
1513
|
+
default: WriteOffset(val.u_, byte_width); break;
|
1514
|
+
}
|
1515
|
+
}
|
1516
|
+
|
1517
|
+
size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) {
|
1518
|
+
auto bit_width = WidthU(len);
|
1519
|
+
auto byte_width = Align(bit_width);
|
1520
|
+
Write<uint64_t>(len, byte_width);
|
1521
|
+
auto sloc = buf_.size();
|
1522
|
+
WriteBytes(data, len + trailing);
|
1523
|
+
stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width));
|
1524
|
+
return sloc;
|
1525
|
+
}
|
1526
|
+
|
1527
|
+
template<typename T>
|
1528
|
+
size_t ScalarVector(const T *elems, size_t len, bool fixed) {
|
1529
|
+
auto vector_type = GetScalarType<T>();
|
1530
|
+
auto byte_width = sizeof(T);
|
1531
|
+
auto bit_width = WidthB(byte_width);
|
1532
|
+
// If you get this assert, you're trying to write a vector with a size
|
1533
|
+
// field that is bigger than the scalars you're trying to write (e.g. a
|
1534
|
+
// byte vector > 255 elements). For such types, write a "blob" instead.
|
1535
|
+
// TODO: instead of asserting, could write vector with larger elements
|
1536
|
+
// instead, though that would be wasteful.
|
1537
|
+
FLATBUFFERS_ASSERT(WidthU(len) <= bit_width);
|
1538
|
+
Align(bit_width);
|
1539
|
+
if (!fixed) Write<uint64_t>(len, byte_width);
|
1540
|
+
auto vloc = buf_.size();
|
1541
|
+
for (size_t i = 0; i < len; i++) Write(elems[i], byte_width);
|
1542
|
+
stack_.push_back(Value(static_cast<uint64_t>(vloc),
|
1543
|
+
ToTypedVector(vector_type, fixed ? len : 0),
|
1544
|
+
bit_width));
|
1545
|
+
return vloc;
|
1546
|
+
}
|
1547
|
+
|
1548
|
+
Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed,
|
1549
|
+
bool fixed, const Value *keys = nullptr) {
|
1550
|
+
FLATBUFFERS_ASSERT(
|
1551
|
+
!fixed ||
|
1552
|
+
typed); // typed=false, fixed=true combination is not supported.
|
1553
|
+
// Figure out smallest bit width we can store this vector with.
|
1554
|
+
auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len));
|
1555
|
+
auto prefix_elems = 1;
|
1556
|
+
if (keys) {
|
1557
|
+
// If this vector is part of a map, we will pre-fix an offset to the keys
|
1558
|
+
// to this vector.
|
1559
|
+
bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0));
|
1560
|
+
prefix_elems += 2;
|
1561
|
+
}
|
1562
|
+
Type vector_type = FBT_KEY;
|
1563
|
+
// Check bit widths and types for all elements.
|
1564
|
+
for (size_t i = start; i < stack_.size(); i += step) {
|
1565
|
+
auto elem_width =
|
1566
|
+
stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems);
|
1567
|
+
bit_width = (std::max)(bit_width, elem_width);
|
1568
|
+
if (typed) {
|
1569
|
+
if (i == start) {
|
1570
|
+
vector_type = stack_[i].type_;
|
1571
|
+
} else {
|
1572
|
+
// If you get this assert, you are writing a typed vector with
|
1573
|
+
// elements that are not all the same type.
|
1574
|
+
FLATBUFFERS_ASSERT(vector_type == stack_[i].type_);
|
1575
|
+
}
|
1576
|
+
}
|
1577
|
+
}
|
1578
|
+
// If you get this assert, your typed types are not one of:
|
1579
|
+
// Int / UInt / Float / Key.
|
1580
|
+
FLATBUFFERS_ASSERT(!typed || IsTypedVectorElementType(vector_type));
|
1581
|
+
auto byte_width = Align(bit_width);
|
1582
|
+
// Write vector. First the keys width/offset if available, and size.
|
1583
|
+
if (keys) {
|
1584
|
+
WriteOffset(keys->u_, byte_width);
|
1585
|
+
Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width);
|
1586
|
+
}
|
1587
|
+
if (!fixed) Write<uint64_t>(vec_len, byte_width);
|
1588
|
+
// Then the actual data.
|
1589
|
+
auto vloc = buf_.size();
|
1590
|
+
for (size_t i = start; i < stack_.size(); i += step) {
|
1591
|
+
WriteAny(stack_[i], byte_width);
|
1592
|
+
}
|
1593
|
+
// Then the types.
|
1594
|
+
if (!typed) {
|
1595
|
+
for (size_t i = start; i < stack_.size(); i += step) {
|
1596
|
+
buf_.push_back(stack_[i].StoredPackedType(bit_width));
|
1597
|
+
}
|
1598
|
+
}
|
1599
|
+
return Value(static_cast<uint64_t>(vloc),
|
1600
|
+
keys ? FBT_MAP
|
1601
|
+
: (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0)
|
1602
|
+
: FBT_VECTOR),
|
1603
|
+
bit_width);
|
1604
|
+
}
|
1605
|
+
|
1606
|
+
// You shouldn't really be copying instances of this class.
|
1607
|
+
Builder(const Builder &);
|
1608
|
+
Builder &operator=(const Builder &);
|
1609
|
+
|
1610
|
+
std::vector<uint8_t> buf_;
|
1611
|
+
std::vector<Value> stack_;
|
1612
|
+
|
1613
|
+
bool finished_;
|
1614
|
+
bool has_duplicate_keys_;
|
1615
|
+
|
1616
|
+
BuilderFlag flags_;
|
1617
|
+
|
1618
|
+
BitWidth force_min_bit_width_;
|
1619
|
+
|
1620
|
+
struct KeyOffsetCompare {
|
1621
|
+
explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
|
1622
|
+
bool operator()(size_t a, size_t b) const {
|
1623
|
+
auto stra = reinterpret_cast<const char *>(buf_->data() + a);
|
1624
|
+
auto strb = reinterpret_cast<const char *>(buf_->data() + b);
|
1625
|
+
return strcmp(stra, strb) < 0;
|
1626
|
+
}
|
1627
|
+
const std::vector<uint8_t> *buf_;
|
1628
|
+
};
|
1629
|
+
|
1630
|
+
typedef std::pair<size_t, size_t> StringOffset;
|
1631
|
+
struct StringOffsetCompare {
|
1632
|
+
explicit StringOffsetCompare(const std::vector<uint8_t> &buf)
|
1633
|
+
: buf_(&buf) {}
|
1634
|
+
bool operator()(const StringOffset &a, const StringOffset &b) const {
|
1635
|
+
auto stra = buf_->data() + a.first;
|
1636
|
+
auto strb = buf_->data() + b.first;
|
1637
|
+
auto cr = memcmp(stra, strb, (std::min)(a.second, b.second) + 1);
|
1638
|
+
return cr < 0 || (cr == 0 && a.second < b.second);
|
1639
|
+
}
|
1640
|
+
const std::vector<uint8_t> *buf_;
|
1641
|
+
};
|
1642
|
+
|
1643
|
+
typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap;
|
1644
|
+
typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap;
|
1645
|
+
|
1646
|
+
KeyOffsetMap key_pool;
|
1647
|
+
StringOffsetMap string_pool;
|
1648
|
+
|
1649
|
+
friend class Verifier;
|
1650
|
+
};
|
1651
|
+
|
1652
|
+
// Helper class to verify the integrity of a FlexBuffer
|
1653
|
+
class Verifier FLATBUFFERS_FINAL_CLASS {
|
1654
|
+
public:
|
1655
|
+
Verifier(const uint8_t *buf, size_t buf_len,
|
1656
|
+
// Supplying this vector likely results in faster verification
|
1657
|
+
// of larger buffers with many shared keys/strings, but
|
1658
|
+
// comes at the cost of using additional memory the same size of
|
1659
|
+
// the buffer being verified, so it is by default off.
|
1660
|
+
std::vector<uint8_t> *reuse_tracker = nullptr,
|
1661
|
+
bool _check_alignment = true, size_t max_depth = 64)
|
1662
|
+
: buf_(buf),
|
1663
|
+
size_(buf_len),
|
1664
|
+
depth_(0),
|
1665
|
+
max_depth_(max_depth),
|
1666
|
+
num_vectors_(0),
|
1667
|
+
max_vectors_(buf_len),
|
1668
|
+
check_alignment_(_check_alignment),
|
1669
|
+
reuse_tracker_(reuse_tracker) {
|
1670
|
+
FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
|
1671
|
+
if (reuse_tracker_) {
|
1672
|
+
reuse_tracker_->clear();
|
1673
|
+
reuse_tracker_->resize(size_, PackedType(BIT_WIDTH_8, FBT_NULL));
|
1674
|
+
}
|
1675
|
+
}
|
1676
|
+
|
1677
|
+
private:
|
1678
|
+
// Central location where any verification failures register.
|
1679
|
+
bool Check(bool ok) const {
|
1680
|
+
// clang-format off
|
1681
|
+
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
|
1682
|
+
FLATBUFFERS_ASSERT(ok);
|
1683
|
+
#endif
|
1684
|
+
// clang-format on
|
1685
|
+
return ok;
|
1686
|
+
}
|
1687
|
+
|
1688
|
+
// Verify any range within the buffer.
|
1689
|
+
bool VerifyFrom(size_t elem, size_t elem_len) const {
|
1690
|
+
return Check(elem_len < size_ && elem <= size_ - elem_len);
|
1691
|
+
}
|
1692
|
+
bool VerifyBefore(size_t elem, size_t elem_len) const {
|
1693
|
+
return Check(elem_len <= elem);
|
1694
|
+
}
|
1695
|
+
|
1696
|
+
bool VerifyFromPointer(const uint8_t *p, size_t len) {
|
1697
|
+
auto o = static_cast<size_t>(p - buf_);
|
1698
|
+
return VerifyFrom(o, len);
|
1699
|
+
}
|
1700
|
+
bool VerifyBeforePointer(const uint8_t *p, size_t len) {
|
1701
|
+
auto o = static_cast<size_t>(p - buf_);
|
1702
|
+
return VerifyBefore(o, len);
|
1703
|
+
}
|
1704
|
+
|
1705
|
+
bool VerifyByteWidth(size_t width) {
|
1706
|
+
return Check(width == 1 || width == 2 || width == 4 || width == 8);
|
1707
|
+
}
|
1708
|
+
|
1709
|
+
bool VerifyType(int type) { return Check(type >= 0 && type < FBT_MAX_TYPE); }
|
1710
|
+
|
1711
|
+
bool VerifyOffset(uint64_t off, const uint8_t *p) {
|
1712
|
+
return Check(off <= static_cast<uint64_t>(size_)) &&
|
1713
|
+
off <= static_cast<uint64_t>(p - buf_);
|
1714
|
+
}
|
1715
|
+
|
1716
|
+
bool VerifyAlignment(const uint8_t *p, size_t size) const {
|
1717
|
+
auto o = static_cast<size_t>(p - buf_);
|
1718
|
+
return Check((o & (size - 1)) == 0 || !check_alignment_);
|
1719
|
+
}
|
1720
|
+
|
1721
|
+
// Macro, since we want to escape from parent function & use lazy args.
|
1722
|
+
#define FLEX_CHECK_VERIFIED(P, PACKED_TYPE) \
|
1723
|
+
if (reuse_tracker_) { \
|
1724
|
+
auto packed_type = PACKED_TYPE; \
|
1725
|
+
auto existing = (*reuse_tracker_)[P - buf_]; \
|
1726
|
+
if (existing == packed_type) return true; \
|
1727
|
+
/* Fail verification if already set with different type! */ \
|
1728
|
+
if (!Check(existing == 0)) return false; \
|
1729
|
+
(*reuse_tracker_)[P - buf_] = packed_type; \
|
1730
|
+
}
|
1731
|
+
|
1732
|
+
bool VerifyVector(Reference r, const uint8_t *p, Type elem_type) {
|
1733
|
+
// Any kind of nesting goes thru this function, so guard against that
|
1734
|
+
// here, both with simple nesting checks, and the reuse tracker if on.
|
1735
|
+
depth_++;
|
1736
|
+
num_vectors_++;
|
1737
|
+
if (!Check(depth_ <= max_depth_ && num_vectors_ <= max_vectors_))
|
1738
|
+
return false;
|
1739
|
+
auto size_byte_width = r.byte_width_;
|
1740
|
+
FLEX_CHECK_VERIFIED(p,
|
1741
|
+
PackedType(Builder::WidthB(size_byte_width), r.type_));
|
1742
|
+
if (!VerifyBeforePointer(p, size_byte_width)) return false;
|
1743
|
+
auto sized = Sized(p, size_byte_width);
|
1744
|
+
auto num_elems = sized.size();
|
1745
|
+
auto elem_byte_width = r.type_ == FBT_STRING || r.type_ == FBT_BLOB
|
1746
|
+
? uint8_t(1)
|
1747
|
+
: r.byte_width_;
|
1748
|
+
auto max_elems = SIZE_MAX / elem_byte_width;
|
1749
|
+
if (!Check(num_elems < max_elems))
|
1750
|
+
return false; // Protect against byte_size overflowing.
|
1751
|
+
auto byte_size = num_elems * elem_byte_width;
|
1752
|
+
if (!VerifyFromPointer(p, byte_size)) return false;
|
1753
|
+
if (elem_type == FBT_NULL) {
|
1754
|
+
// Verify type bytes after the vector.
|
1755
|
+
if (!VerifyFromPointer(p + byte_size, num_elems)) return false;
|
1756
|
+
auto v = Vector(p, size_byte_width);
|
1757
|
+
for (size_t i = 0; i < num_elems; i++)
|
1758
|
+
if (!VerifyRef(v[i])) return false;
|
1759
|
+
} else if (elem_type == FBT_KEY) {
|
1760
|
+
auto v = TypedVector(p, elem_byte_width, FBT_KEY);
|
1761
|
+
for (size_t i = 0; i < num_elems; i++)
|
1762
|
+
if (!VerifyRef(v[i])) return false;
|
1763
|
+
} else {
|
1764
|
+
FLATBUFFERS_ASSERT(IsInline(elem_type));
|
1765
|
+
}
|
1766
|
+
depth_--;
|
1767
|
+
return true;
|
1768
|
+
}
|
1769
|
+
|
1770
|
+
bool VerifyKeys(const uint8_t *p, uint8_t byte_width) {
|
1771
|
+
// The vector part of the map has already been verified.
|
1772
|
+
const size_t num_prefixed_fields = 3;
|
1773
|
+
if (!VerifyBeforePointer(p, byte_width * num_prefixed_fields)) return false;
|
1774
|
+
p -= byte_width * num_prefixed_fields;
|
1775
|
+
auto off = ReadUInt64(p, byte_width);
|
1776
|
+
if (!VerifyOffset(off, p)) return false;
|
1777
|
+
auto key_byte_with =
|
1778
|
+
static_cast<uint8_t>(ReadUInt64(p + byte_width, byte_width));
|
1779
|
+
if (!VerifyByteWidth(key_byte_with)) return false;
|
1780
|
+
return VerifyVector(Reference(p, byte_width, key_byte_with, FBT_VECTOR_KEY),
|
1781
|
+
p - off, FBT_KEY);
|
1782
|
+
}
|
1783
|
+
|
1784
|
+
bool VerifyKey(const uint8_t *p) {
|
1785
|
+
FLEX_CHECK_VERIFIED(p, PackedType(BIT_WIDTH_8, FBT_KEY));
|
1786
|
+
while (p < buf_ + size_)
|
1787
|
+
if (*p++) return true;
|
1788
|
+
return false;
|
1789
|
+
}
|
1790
|
+
|
1791
|
+
#undef FLEX_CHECK_VERIFIED
|
1792
|
+
|
1793
|
+
bool VerifyTerminator(const String &s) {
|
1794
|
+
return VerifyFromPointer(reinterpret_cast<const uint8_t *>(s.c_str()),
|
1795
|
+
s.size() + 1);
|
1796
|
+
}
|
1797
|
+
|
1798
|
+
bool VerifyRef(Reference r) {
|
1799
|
+
// r.parent_width_ and r.data_ already verified.
|
1800
|
+
if (!VerifyByteWidth(r.byte_width_) || !VerifyType(r.type_)) {
|
1801
|
+
return false;
|
1802
|
+
}
|
1803
|
+
if (IsInline(r.type_)) {
|
1804
|
+
// Inline scalars, don't require further verification.
|
1805
|
+
return true;
|
1806
|
+
}
|
1807
|
+
// All remaining types are an offset.
|
1808
|
+
auto off = ReadUInt64(r.data_, r.parent_width_);
|
1809
|
+
if (!VerifyOffset(off, r.data_)) return false;
|
1810
|
+
auto p = r.Indirect();
|
1811
|
+
if (!VerifyAlignment(p, r.byte_width_)) return false;
|
1812
|
+
switch (r.type_) {
|
1813
|
+
case FBT_INDIRECT_INT:
|
1814
|
+
case FBT_INDIRECT_UINT:
|
1815
|
+
case FBT_INDIRECT_FLOAT: return VerifyFromPointer(p, r.byte_width_);
|
1816
|
+
case FBT_KEY: return VerifyKey(p);
|
1817
|
+
case FBT_MAP:
|
1818
|
+
return VerifyVector(r, p, FBT_NULL) && VerifyKeys(p, r.byte_width_);
|
1819
|
+
case FBT_VECTOR: return VerifyVector(r, p, FBT_NULL);
|
1820
|
+
case FBT_VECTOR_INT: return VerifyVector(r, p, FBT_INT);
|
1821
|
+
case FBT_VECTOR_BOOL:
|
1822
|
+
case FBT_VECTOR_UINT: return VerifyVector(r, p, FBT_UINT);
|
1823
|
+
case FBT_VECTOR_FLOAT: return VerifyVector(r, p, FBT_FLOAT);
|
1824
|
+
case FBT_VECTOR_KEY: return VerifyVector(r, p, FBT_KEY);
|
1825
|
+
case FBT_VECTOR_STRING_DEPRECATED:
|
1826
|
+
// Use of FBT_KEY here intentional, see elsewhere.
|
1827
|
+
return VerifyVector(r, p, FBT_KEY);
|
1828
|
+
case FBT_BLOB: return VerifyVector(r, p, FBT_UINT);
|
1829
|
+
case FBT_STRING:
|
1830
|
+
return VerifyVector(r, p, FBT_UINT) &&
|
1831
|
+
VerifyTerminator(String(p, r.byte_width_));
|
1832
|
+
case FBT_VECTOR_INT2:
|
1833
|
+
case FBT_VECTOR_UINT2:
|
1834
|
+
case FBT_VECTOR_FLOAT2:
|
1835
|
+
case FBT_VECTOR_INT3:
|
1836
|
+
case FBT_VECTOR_UINT3:
|
1837
|
+
case FBT_VECTOR_FLOAT3:
|
1838
|
+
case FBT_VECTOR_INT4:
|
1839
|
+
case FBT_VECTOR_UINT4:
|
1840
|
+
case FBT_VECTOR_FLOAT4: {
|
1841
|
+
uint8_t len = 0;
|
1842
|
+
auto vtype = ToFixedTypedVectorElementType(r.type_, &len);
|
1843
|
+
if (!VerifyType(vtype)) return false;
|
1844
|
+
return VerifyFromPointer(p, r.byte_width_ * len);
|
1845
|
+
}
|
1846
|
+
default: return false;
|
1847
|
+
}
|
1848
|
+
}
|
1849
|
+
|
1850
|
+
public:
|
1851
|
+
bool VerifyBuffer() {
|
1852
|
+
if (!Check(size_ >= 3)) return false;
|
1853
|
+
auto end = buf_ + size_;
|
1854
|
+
auto byte_width = *--end;
|
1855
|
+
auto packed_type = *--end;
|
1856
|
+
return VerifyByteWidth(byte_width) && Check(end - buf_ >= byte_width) &&
|
1857
|
+
VerifyRef(Reference(end - byte_width, byte_width, packed_type));
|
1858
|
+
}
|
1859
|
+
|
1860
|
+
private:
|
1861
|
+
const uint8_t *buf_;
|
1862
|
+
size_t size_;
|
1863
|
+
size_t depth_;
|
1864
|
+
const size_t max_depth_;
|
1865
|
+
size_t num_vectors_;
|
1866
|
+
const size_t max_vectors_;
|
1867
|
+
bool check_alignment_;
|
1868
|
+
std::vector<uint8_t> *reuse_tracker_;
|
1869
|
+
};
|
1870
|
+
|
1871
|
+
// Utility function that contructs the Verifier for you, see above for
|
1872
|
+
// parameters.
|
1873
|
+
inline bool VerifyBuffer(const uint8_t *buf, size_t buf_len,
|
1874
|
+
std::vector<uint8_t> *reuse_tracker = nullptr) {
|
1875
|
+
Verifier verifier(buf, buf_len, reuse_tracker);
|
1876
|
+
return verifier.VerifyBuffer();
|
1877
|
+
}
|
1878
|
+
|
1879
|
+
#ifdef FLATBUFFERS_H_
|
1880
|
+
// This is a verifier utility function that works together with the
|
1881
|
+
// FlatBuffers verifier, which should only be present if flatbuffer.h
|
1882
|
+
// has been included (which it typically is in generated code).
|
1883
|
+
inline bool VerifyNestedFlexBuffer(const flatbuffers::Vector<uint8_t> *nv,
|
1884
|
+
flatbuffers::Verifier &verifier) {
|
1885
|
+
if (!nv) return true;
|
1886
|
+
return verifier.Check(flexbuffers::VerifyBuffer(
|
1887
|
+
nv->data(), nv->size(), verifier.GetFlexReuseTracker()));
|
1888
|
+
}
|
1889
|
+
#endif
|
1890
|
+
|
1891
|
+
} // namespace flexbuffers
|
1892
|
+
|
1893
|
+
#if defined(_MSC_VER)
|
1894
|
+
# pragma warning(pop)
|
1895
|
+
#endif
|
1896
|
+
|
1897
|
+
#endif // FLATBUFFERS_FLEXBUFFERS_H_
|