xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/__init__.py +7 -0
- xmos_ai_tools/io_server/__init__.py +151 -0
- xmos_ai_tools/runtime/__init__.py +0 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
- xmos_ai_tools/runtime/include/flash_server.h +74 -0
- xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
- xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
- xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
- xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
- xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
- xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
- xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
- xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
- xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
- xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
- xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
- xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
- xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
- xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
- xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
- xmos_ai_tools/runtime/include/ioserver.h +44 -0
- xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
- xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
- xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
- xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
- xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
- xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
- xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
- xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
- xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
- xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
- xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xformer/__init__.py +60 -0
- xmos_ai_tools/xformer/flash.py +190 -0
- xmos_ai_tools/xinterpreters/__init__.py +1 -0
- xmos_ai_tools/xinterpreters/exceptions.py +38 -0
- xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,62 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_
|
16
|
+
#define TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_
|
17
|
+
|
18
|
+
#include "tensorflow/lite/c/common.h"
|
19
|
+
#include "tensorflow/lite/micro/micro_common.h"
|
20
|
+
#include "tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h"
|
21
|
+
#include "tensorflow/lite/schema/schema_generated.h"
|
22
|
+
|
23
|
+
namespace tflite_micro {
|
24
|
+
|
25
|
+
// This is an interface for the OpResolver for TFLiteMicro. The differences from
|
26
|
+
// the TFLite OpResolver base class are to:
|
27
|
+
// * explicitly remove support for Op versions
|
28
|
+
// * allow for finer grained registration of the Builtin Ops to reduce code
|
29
|
+
// size for TFLiteMicro.
|
30
|
+
//
|
31
|
+
// We need an interface class instead of directly using MicroMutableOpResolver
|
32
|
+
// because MicroMutableOpResolver is a class template with the number of
|
33
|
+
// registered Ops as the template parameter.
|
34
|
+
class MicroOpResolver {
|
35
|
+
public:
|
36
|
+
// Returns the Op registration struct corresponding to the enum code from the
|
37
|
+
// flatbuffer schema. Returns nullptr if the op is not found or if op ==
|
38
|
+
// BuiltinOperator_CUSTOM.
|
39
|
+
virtual const TFLMRegistration* FindOp(BuiltinOperator op) const = 0;
|
40
|
+
|
41
|
+
// Returns the Op registration struct corresponding to the custom operator by
|
42
|
+
// name.
|
43
|
+
virtual const TFLMRegistration* FindOp(const char* op) const = 0;
|
44
|
+
|
45
|
+
// Returns the operator specific parsing function for the OpData for a
|
46
|
+
// BuiltinOperator (if registered), else nullptr.
|
47
|
+
virtual TfLiteBridgeBuiltinParseFunction GetOpDataParser(
|
48
|
+
BuiltinOperator op) const = 0;
|
49
|
+
|
50
|
+
virtual ~MicroOpResolver() {}
|
51
|
+
};
|
52
|
+
|
53
|
+
// Handles the logic for converting between an OperatorCode structure extracted
|
54
|
+
// from a flatbuffer and information about a registered operator
|
55
|
+
// implementation.
|
56
|
+
TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode,
|
57
|
+
const MicroOpResolver& op_resolver,
|
58
|
+
const TFLMRegistration** registration);
|
59
|
+
|
60
|
+
} // namespace tflite_micro
|
61
|
+
|
62
|
+
#endif // TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_
|
@@ -0,0 +1,140 @@
|
|
1
|
+
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_
|
18
|
+
|
19
|
+
#include "tensorflow/lite/micro/compatibility.h"
|
20
|
+
#include "tensorflow/lite/micro/micro_profiler_interface.h"
|
21
|
+
|
22
|
+
namespace tflite_micro {
|
23
|
+
|
24
|
+
// MicroProfiler creates a common way to gain fine-grained insight into runtime
|
25
|
+
// performance. Bottleck operators can be identified along with slow code
|
26
|
+
// sections. This can be used in conjunction with running the relevant micro
|
27
|
+
// benchmark to evaluate end-to-end performance.
|
28
|
+
class MicroProfiler : public MicroProfilerInterface {
|
29
|
+
public:
|
30
|
+
MicroProfiler() = default;
|
31
|
+
virtual ~MicroProfiler() = default;
|
32
|
+
|
33
|
+
// Marks the start of a new event and returns an event handle that can be used
|
34
|
+
// to mark the end of the event via EndEvent. The lifetime of the tag
|
35
|
+
// parameter must exceed that of the MicroProfiler.
|
36
|
+
virtual uint32_t BeginEvent(const char* tag) override;
|
37
|
+
|
38
|
+
// Marks the end of an event associated with event_handle. It is the
|
39
|
+
// responsibility of the caller to ensure than EndEvent is called once and
|
40
|
+
// only once per event_handle.
|
41
|
+
//
|
42
|
+
// If EndEvent is called more than once for the same event_handle, the last
|
43
|
+
// call will be used as the end of event marker.If EndEvent is called 0 times
|
44
|
+
// for a particular event_handle, the duration of that event will be 0 ticks.
|
45
|
+
virtual void EndEvent(uint32_t event_handle) override;
|
46
|
+
|
47
|
+
// Clears all the events that have been currently profiled.
|
48
|
+
void ClearEvents() { num_events_ = 0; }
|
49
|
+
|
50
|
+
// Returns the sum of the ticks taken across all the events. This number
|
51
|
+
// is only meaningful if all of the events are disjoint (the end time of
|
52
|
+
// event[i] <= start time of event[i+1]).
|
53
|
+
uint32_t GetTotalTicks() const;
|
54
|
+
|
55
|
+
// Prints the profiling information of each of the events in human readable
|
56
|
+
// form.
|
57
|
+
void Log() const;
|
58
|
+
|
59
|
+
// Prints the profiling information of each of the events in CSV (Comma
|
60
|
+
// Separated Value) form.
|
61
|
+
void LogCsv() const;
|
62
|
+
|
63
|
+
// Prints total ticks for each unique tag in CSV format.
|
64
|
+
// Output will have one row for each unique tag along with the
|
65
|
+
// total ticks summed across all events with that particular tag.
|
66
|
+
void LogTicksPerTagCsv();
|
67
|
+
|
68
|
+
private:
|
69
|
+
// Maximum number of events that this class can keep track of. If we call
|
70
|
+
// AddEvent more than kMaxEvents number of times, then the oldest event's
|
71
|
+
// profiling information will be overwritten.
|
72
|
+
static constexpr int kMaxEvents = 4096;
|
73
|
+
|
74
|
+
const char* tags_[kMaxEvents];
|
75
|
+
uint32_t start_ticks_[kMaxEvents];
|
76
|
+
uint32_t end_ticks_[kMaxEvents];
|
77
|
+
int num_events_ = 0;
|
78
|
+
|
79
|
+
struct TicksPerTag {
|
80
|
+
const char* tag;
|
81
|
+
uint32_t ticks;
|
82
|
+
};
|
83
|
+
// In practice, the number of tags will be much lower than the number of
|
84
|
+
// events. But it is theoretically possible that each event to be unique and
|
85
|
+
// hence we allow total_ticks_per_tag to have kMaxEvents entries.
|
86
|
+
TicksPerTag total_ticks_per_tag[kMaxEvents] = {};
|
87
|
+
|
88
|
+
int FindExistingOrNextPosition(const char* tag_name);
|
89
|
+
|
90
|
+
TF_LITE_REMOVE_VIRTUAL_DELETE
|
91
|
+
};
|
92
|
+
|
93
|
+
#if defined(TF_LITE_STRIP_ERROR_STRINGS)
|
94
|
+
// For release builds, the ScopedMicroProfiler is a noop.
|
95
|
+
//
|
96
|
+
// This is done because the ScipedProfiler is used as part of the
|
97
|
+
// MicroInterpreter and we want to ensure zero overhead for the release builds.
|
98
|
+
class ScopedMicroProfiler {
|
99
|
+
public:
|
100
|
+
explicit ScopedMicroProfiler(const char* tag,
|
101
|
+
MicroProfilerInterface* profiler) {}
|
102
|
+
};
|
103
|
+
|
104
|
+
#else
|
105
|
+
|
106
|
+
// This class can be used to add events to a MicroProfiler object that span the
|
107
|
+
// lifetime of the ScopedMicroProfiler object.
|
108
|
+
// Usage example:
|
109
|
+
//
|
110
|
+
// MicroProfiler profiler();
|
111
|
+
// ...
|
112
|
+
// {
|
113
|
+
// ScopedMicroProfiler scoped_profiler("custom_tag", profiler);
|
114
|
+
// work_to_profile();
|
115
|
+
// }
|
116
|
+
class ScopedMicroProfiler {
|
117
|
+
public:
|
118
|
+
explicit ScopedMicroProfiler(const char* tag,
|
119
|
+
MicroProfilerInterface* profiler)
|
120
|
+
: profiler_(profiler) {
|
121
|
+
if (profiler_ != nullptr) {
|
122
|
+
event_handle_ = profiler_->BeginEvent(tag);
|
123
|
+
}
|
124
|
+
}
|
125
|
+
|
126
|
+
~ScopedMicroProfiler() {
|
127
|
+
if (profiler_ != nullptr) {
|
128
|
+
profiler_->EndEvent(event_handle_);
|
129
|
+
}
|
130
|
+
}
|
131
|
+
|
132
|
+
private:
|
133
|
+
uint32_t event_handle_ = 0;
|
134
|
+
MicroProfilerInterface* profiler_ = nullptr;
|
135
|
+
};
|
136
|
+
#endif // !defined(TF_LITE_STRIP_ERROR_STRINGS)
|
137
|
+
|
138
|
+
} // namespace tflite_micro
|
139
|
+
|
140
|
+
#endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_
|
@@ -0,0 +1,38 @@
|
|
1
|
+
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_
|
18
|
+
|
19
|
+
#include <cstdint>
|
20
|
+
|
21
|
+
namespace tflite_micro {
|
22
|
+
|
23
|
+
// Interface class that the TFLM framework relies on for profiling.
|
24
|
+
class MicroProfilerInterface {
|
25
|
+
public:
|
26
|
+
virtual ~MicroProfilerInterface() {}
|
27
|
+
|
28
|
+
// Marks the start of a new event and returns an event handle that can be used
|
29
|
+
// to mark the end of the event via EndEvent.
|
30
|
+
virtual uint32_t BeginEvent(const char* tag) = 0;
|
31
|
+
|
32
|
+
// Marks the end of an event associated with event_handle.
|
33
|
+
virtual void EndEvent(uint32_t event_handle) = 0;
|
34
|
+
};
|
35
|
+
|
36
|
+
} // namespace tflite_micro
|
37
|
+
|
38
|
+
#endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_
|
@@ -0,0 +1,89 @@
|
|
1
|
+
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_
|
17
|
+
#define TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_
|
18
|
+
|
19
|
+
#include <cstdint>
|
20
|
+
|
21
|
+
#include "tensorflow/lite/c/common.h"
|
22
|
+
#include "tensorflow/lite/micro/micro_allocator.h"
|
23
|
+
|
24
|
+
namespace tflite_micro {
|
25
|
+
|
26
|
+
class MicroResourceVariables {
|
27
|
+
public:
|
28
|
+
// Create
|
29
|
+
static MicroResourceVariables* Create(MicroAllocator* allocator,
|
30
|
+
int num_variables);
|
31
|
+
|
32
|
+
// Creates a resource variable if none is available for the given container
|
33
|
+
// and shared name pair. Returns the resource ID corresponding to the
|
34
|
+
// container and shared name pair. If allocation fails, the returned resource
|
35
|
+
// ID will be negative. The the container and shared_name must outlive this
|
36
|
+
// class.
|
37
|
+
int CreateIdIfNoneFound(const char* container, const char* shared_name);
|
38
|
+
|
39
|
+
// Read the resource buffer associated with the given ID into the given
|
40
|
+
// tensor.
|
41
|
+
TfLiteStatus Read(int id, const TfLiteEvalTensor* tensor);
|
42
|
+
|
43
|
+
// Allocates the resource buffer if none has been allocated, based on the
|
44
|
+
// length of the input tensor. Copies input tensor contents to the resource
|
45
|
+
// buffer.
|
46
|
+
TfLiteStatus Allocate(int id, TfLiteContext* context,
|
47
|
+
const TfLiteTensor* tensor);
|
48
|
+
|
49
|
+
// Copies input tensor contents to the resource buffer.
|
50
|
+
// AllocateResourceVariable with a TFLite tensor must have been called first
|
51
|
+
// in order to allocate the resource buffer.
|
52
|
+
TfLiteStatus Assign(int id, const TfLiteEvalTensor* tensor);
|
53
|
+
|
54
|
+
// Zeros out all resource buffers.
|
55
|
+
TfLiteStatus ResetAll();
|
56
|
+
|
57
|
+
private:
|
58
|
+
int FindId(const char* container, const char* shared_name);
|
59
|
+
|
60
|
+
// Micro resource contains the mapping between resource container/name strings
|
61
|
+
// and resouce IDs. Each resource ID corresponds to a resource buffer pointer.
|
62
|
+
// The resouce ID is created during the VAR_HANDLE operator preparation stage.
|
63
|
+
// The resource buffer pointer is created during ASSIGN_VARIABLE preparation
|
64
|
+
// stage based on the size of the TFLiteTensor being assigned.
|
65
|
+
struct MicroResourceVariable {
|
66
|
+
const char* container;
|
67
|
+
const char* shared_name;
|
68
|
+
void* resource_buffer;
|
69
|
+
|
70
|
+
// This is only for verifying read size.
|
71
|
+
size_t bytes;
|
72
|
+
// Initialization default value
|
73
|
+
int8_t default_value;
|
74
|
+
};
|
75
|
+
|
76
|
+
MicroResourceVariables(MicroResourceVariable* variables,
|
77
|
+
int max_variable_count)
|
78
|
+
: resource_variables_(variables),
|
79
|
+
max_variable_count_(max_variable_count),
|
80
|
+
num_resource_variables_(0) {}
|
81
|
+
|
82
|
+
MicroResourceVariable* resource_variables_;
|
83
|
+
int max_variable_count_;
|
84
|
+
int num_resource_variables_;
|
85
|
+
};
|
86
|
+
|
87
|
+
} // namespace tflite_micro
|
88
|
+
|
89
|
+
#endif // TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_
|
@@ -0,0 +1,36 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
|
16
|
+
#define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
|
17
|
+
|
18
|
+
#include <cstdint>
|
19
|
+
|
20
|
+
namespace tflite_micro {
|
21
|
+
|
22
|
+
// These functions should be implemented by each target platform, and provide an
|
23
|
+
// accurate tick count along with how many ticks there are per second.
|
24
|
+
uint32_t ticks_per_second();
|
25
|
+
|
26
|
+
// Return time in ticks. The meaning of a tick varies per platform.
|
27
|
+
uint32_t GetCurrentTimeTicks();
|
28
|
+
|
29
|
+
inline uint32_t TicksToMs(int32_t ticks) {
|
30
|
+
return static_cast<uint32_t>(1000.0f * static_cast<float>(ticks) /
|
31
|
+
static_cast<float>(ticks_per_second()));
|
32
|
+
}
|
33
|
+
|
34
|
+
} // namespace tflite_micro
|
35
|
+
|
36
|
+
#endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_
|
@@ -0,0 +1,162 @@
|
|
1
|
+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_
|
18
|
+
|
19
|
+
#include <algorithm>
|
20
|
+
#include <cmath>
|
21
|
+
#include <cstdint>
|
22
|
+
#include <limits>
|
23
|
+
|
24
|
+
#include "tensorflow/lite/c/common.h"
|
25
|
+
|
26
|
+
namespace tflite_micro {
|
27
|
+
|
28
|
+
// Returns number of elements in the shape array.
|
29
|
+
|
30
|
+
int ElementCount(const TfLiteIntArray& dims);
|
31
|
+
|
32
|
+
size_t EvalTensorBytes(const TfLiteEvalTensor* tensor);
|
33
|
+
|
34
|
+
// C++11 does not support constexpr max; hence, use ternary conditional to
|
35
|
+
// create our own constexpr Max function.
|
36
|
+
constexpr int Max(int a, int b) { return a >= b ? a : b; }
|
37
|
+
|
38
|
+
// Converts a float value into a quantized value. Note that large values (close
|
39
|
+
// to max int and min int) may see significant error due to a lack of floating
|
40
|
+
// point granularity for large values.
|
41
|
+
template <typename T>
|
42
|
+
T FloatToQuantizedType(const float value, const float scale, int zero_point) {
|
43
|
+
int32_t result = round(value / scale) + zero_point;
|
44
|
+
result =
|
45
|
+
std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), result);
|
46
|
+
result =
|
47
|
+
std::min(static_cast<int32_t>(std::numeric_limits<T>::max()), result);
|
48
|
+
return result;
|
49
|
+
}
|
50
|
+
|
51
|
+
template <typename T>
|
52
|
+
T FloatToSymmetricQuantizedType(const float value, const float scale) {
|
53
|
+
// 64-bit values are required since 8x16 conv accumulates to int64, meaning
|
54
|
+
// an int64 bias is required.
|
55
|
+
std::int64_t result = round(value / scale);
|
56
|
+
result = std::max(
|
57
|
+
static_cast<std::int64_t>(std::numeric_limits<T>::min() + 1), result);
|
58
|
+
result = std::min(static_cast<std::int64_t>(std::numeric_limits<T>::max()),
|
59
|
+
result);
|
60
|
+
return result;
|
61
|
+
}
|
62
|
+
|
63
|
+
// Helper methods to quantize arrays of floats to the desired format.
|
64
|
+
//
|
65
|
+
// There are several key flavors of quantization in TfLite:
|
66
|
+
// asymmetric symmetric per channel
|
67
|
+
// int8_t | X | X | X |
|
68
|
+
// uint8_t | X | X | |
|
69
|
+
// int16_t | X | | |
|
70
|
+
// int32_t | | X | X |
|
71
|
+
//
|
72
|
+
// The per-op quantization spec can be found here:
|
73
|
+
// https://www.tensorflow.org/lite/performance/quantization_spec
|
74
|
+
template <typename T>
|
75
|
+
void Quantize(const float* input, T* output, int num_elements, float scale,
|
76
|
+
int zero_point) {
|
77
|
+
for (int i = 0; i < num_elements; i++) {
|
78
|
+
output[i] = FloatToQuantizedType<T>(input[i], scale, zero_point);
|
79
|
+
}
|
80
|
+
}
|
81
|
+
|
82
|
+
template <typename T>
|
83
|
+
void SymmetricQuantize(const float* input, T* output, int num_elements,
|
84
|
+
float scale) {
|
85
|
+
for (int i = 0; i < num_elements; i++) {
|
86
|
+
output[i] = FloatToSymmetricQuantizedType<T>(input[i], scale);
|
87
|
+
}
|
88
|
+
}
|
89
|
+
|
90
|
+
template <typename T>
|
91
|
+
void SymmetricPerChannelQuantize(const float* input, T* output,
|
92
|
+
int num_elements, int num_channels,
|
93
|
+
float* scales) {
|
94
|
+
int elements_per_channel = num_elements / num_channels;
|
95
|
+
for (int i = 0; i < num_channels; i++) {
|
96
|
+
for (int j = 0; j < elements_per_channel; j++) {
|
97
|
+
output[i * elements_per_channel + j] = FloatToSymmetricQuantizedType<T>(
|
98
|
+
input[i * elements_per_channel + j], scales[i]);
|
99
|
+
}
|
100
|
+
}
|
101
|
+
}
|
102
|
+
|
103
|
+
void SignedSymmetricPerChannelQuantize(const float* values,
|
104
|
+
TfLiteIntArray* dims,
|
105
|
+
int quantized_dimension,
|
106
|
+
int8_t* quantized_values,
|
107
|
+
float* scaling_factor,
|
108
|
+
TfLiteType type = kTfLiteNoType);
|
109
|
+
|
110
|
+
// Quantizes inputs based on the values provided, choosing the smallest range
|
111
|
+
// which includes all input values.
|
112
|
+
template <typename T>
|
113
|
+
void SymmetricQuantizeCalculateScales(const float* values, TfLiteIntArray* dims,
|
114
|
+
T* output, float* scale) {
|
115
|
+
int input_size = ElementCount(*dims);
|
116
|
+
|
117
|
+
float min = 0;
|
118
|
+
float max = 0;
|
119
|
+
for (int i = 0; i < input_size; i++) {
|
120
|
+
min = fminf(min, values[i]);
|
121
|
+
max = fmaxf(max, values[i]);
|
122
|
+
}
|
123
|
+
*scale = fmaxf(std::abs(min), std::abs(max)) / std::numeric_limits<T>::max();
|
124
|
+
for (int i = 0; i < input_size; i++) {
|
125
|
+
const int32_t quantized_value =
|
126
|
+
static_cast<int32_t>(roundf(values[i] / *scale));
|
127
|
+
// Clamp: just in case some odd numeric offset.
|
128
|
+
quantized_value = fminf(std::numeric_limits<T>::max(), quantized_value);
|
129
|
+
quantized_value = fmaxf(std::numeric_limits<T>::min() + 1, quantized_value);
|
130
|
+
output[i] = quantized_value;
|
131
|
+
}
|
132
|
+
}
|
133
|
+
|
134
|
+
template <typename T>
|
135
|
+
void Dequantize(const T* values, const int size, const float scale,
|
136
|
+
int zero_point, float* dequantized_values) {
|
137
|
+
for (int i = 0; i < size; ++i) {
|
138
|
+
dequantized_values[i] = (values[i] - zero_point) * scale;
|
139
|
+
}
|
140
|
+
}
|
141
|
+
|
142
|
+
// based on TfLiteType passed in to these functions the corresponding max / min
|
143
|
+
// int for that type are returned
|
144
|
+
inline int QMinFromTfLiteType(TfLiteType type) {
|
145
|
+
if (type == kTfLiteInt4) {
|
146
|
+
return -8;
|
147
|
+
} else {
|
148
|
+
return std::numeric_limits<int8_t>::min();
|
149
|
+
}
|
150
|
+
}
|
151
|
+
|
152
|
+
inline int QMaxFromTfLiteType(TfLiteType type) {
|
153
|
+
if (type == kTfLiteInt4) {
|
154
|
+
return 7;
|
155
|
+
} else {
|
156
|
+
return std::numeric_limits<int8_t>::max();
|
157
|
+
}
|
158
|
+
}
|
159
|
+
|
160
|
+
} // namespace tflite_micro
|
161
|
+
|
162
|
+
#endif // TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_
|
@@ -0,0 +1,60 @@
|
|
1
|
+
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_
|
18
|
+
|
19
|
+
#include "tensorflow/lite/c/common.h"
|
20
|
+
#include "tensorflow/lite/micro/micro_allocator.h"
|
21
|
+
#include "tensorflow/lite/micro/micro_graph.h"
|
22
|
+
#include "tensorflow/lite/schema/schema_generated.h"
|
23
|
+
|
24
|
+
namespace tflite_micro {
|
25
|
+
|
26
|
+
// MockMicroGraph stubs out all MicroGraph methods used during invoke. A count
|
27
|
+
// of the number of calls to invoke for each subgraph is maintained for
|
28
|
+
// validation of control flow operators.
|
29
|
+
class MockMicroGraph : public MicroGraph {
|
30
|
+
public:
|
31
|
+
explicit MockMicroGraph(SingleArenaBufferAllocator* allocator);
|
32
|
+
TfLiteStatus InvokeSubgraph(int subgraph_idx) override;
|
33
|
+
size_t NumSubgraphInputs(int subgraph_idx) override;
|
34
|
+
TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int tensor_idx) override;
|
35
|
+
size_t NumSubgraphOutputs(int subgraph_idx) override;
|
36
|
+
TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx,
|
37
|
+
int tensor_idx) override;
|
38
|
+
int NumSubgraphs() override;
|
39
|
+
MicroResourceVariables* GetResourceVariables() override;
|
40
|
+
int get_init_count() const { return init_count_; }
|
41
|
+
int get_prepare_count() const { return prepare_count_; }
|
42
|
+
int get_free_count() const { return free_count_; }
|
43
|
+
int get_invoke_count(int subgraph_idx) const {
|
44
|
+
return invoke_counts_[subgraph_idx];
|
45
|
+
}
|
46
|
+
|
47
|
+
private:
|
48
|
+
static constexpr int kMaxSubgraphs = 10;
|
49
|
+
SingleArenaBufferAllocator* allocator_;
|
50
|
+
TfLiteEvalTensor* mock_tensor_;
|
51
|
+
int init_count_;
|
52
|
+
int prepare_count_;
|
53
|
+
int free_count_;
|
54
|
+
int invoke_counts_[kMaxSubgraphs];
|
55
|
+
TF_LITE_REMOVE_VIRTUAL_DELETE
|
56
|
+
};
|
57
|
+
|
58
|
+
} // namespace tflite_micro
|
59
|
+
|
60
|
+
#endif // TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_MICRO_PYTHON_INTERPRETER_SRC_PYTHON_OPS_RESOLVER_H_
|
16
|
+
#define TENSORFLOW_LITE_MICRO_PYTHON_INTERPRETER_SRC_PYTHON_OPS_RESOLVER_H_
|
17
|
+
|
18
|
+
// TODO(b/286456378): remove once this shim is no longer needed.
|
19
|
+
#include "python/tflite_micro/python_ops_resolver.h"
|
20
|
+
|
21
|
+
#endif // TENSORFLOW_LITE_MICRO_PYTHON_INTERPRETER_SRC_PYTHON_OPS_RESOLVER_H_
|
@@ -0,0 +1,30 @@
|
|
1
|
+
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_MICRO_PYTHON_TFLITE_SIZE_SRC_FLATBUFFERS_SIZE_H_
|
16
|
+
#define TENSORFLOW_LITE_MICRO_PYTHON_TFLITE_SIZE_SRC_FLATBUFFERS_SIZE_H_
|
17
|
+
|
18
|
+
#include <string>
|
19
|
+
|
20
|
+
#include "flatbuffers/flatbuffers.h"
|
21
|
+
#include "flatbuffers/util.h"
|
22
|
+
|
23
|
+
namespace tflite_micro {
|
24
|
+
|
25
|
+
std::string FlatBufferSizeToJsonString(
|
26
|
+
const uint8_t* buffer, const flatbuffers::TypeTable* type_table);
|
27
|
+
|
28
|
+
} // namespace tflite_micro
|
29
|
+
|
30
|
+
#endif // TENSORFLOW_LITE_MICRO_PYTHON_TFLITE_SIZE_SRC_FLATBUFFERS_SIZE_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h
ADDED
@@ -0,0 +1,33 @@
|
|
1
|
+
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_MICRO_PYTHON_TFLITE_SIZE_SRC_FLATBUFFERS_SIZE_WRAPPER_H_
|
16
|
+
#define TENSORFLOW_LITE_MICRO_PYTHON_TFLITE_SIZE_SRC_FLATBUFFERS_SIZE_WRAPPER_H_
|
17
|
+
|
18
|
+
#include <Python.h>
|
19
|
+
|
20
|
+
#include <string>
|
21
|
+
|
22
|
+
namespace tflite_micro {
|
23
|
+
|
24
|
+
class FlatbufferSizeWrapper {
|
25
|
+
public:
|
26
|
+
FlatbufferSizeWrapper();
|
27
|
+
~FlatbufferSizeWrapper();
|
28
|
+
|
29
|
+
std::string ConvertToJsonString(const char* in_flatbuffer);
|
30
|
+
};
|
31
|
+
|
32
|
+
} // namespace tflite_micro
|
33
|
+
#endif // TENSORFLOW_LITE_MICRO_PYTHON_TFLITE_SIZE_SRC_FLATBUFFERS_SIZE_WRAPPER_H_
|