xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/__init__.py +7 -0
- xmos_ai_tools/io_server/__init__.py +151 -0
- xmos_ai_tools/runtime/__init__.py +0 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
- xmos_ai_tools/runtime/include/flash_server.h +74 -0
- xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
- xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
- xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
- xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
- xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
- xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
- xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
- xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
- xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
- xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
- xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
- xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
- xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
- xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
- xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
- xmos_ai_tools/runtime/include/ioserver.h +44 -0
- xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
- xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
- xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
- xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
- xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
- xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
- xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
- xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
- xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
- xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
- xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xformer/__init__.py +60 -0
- xmos_ai_tools/xformer/flash.py +190 -0
- xmos_ai_tools/xinterpreters/__init__.py +1 -0
- xmos_ai_tools/xinterpreters/exceptions.py +38 -0
- xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,652 @@
|
|
1
|
+
# Copyright 2022 XMOS LIMITED. This Software is subject to the terms of the
|
2
|
+
# XMOS Public License: Version 1
|
3
|
+
import sys
|
4
|
+
import ctypes
|
5
|
+
from typing import Optional, Dict, Any, List
|
6
|
+
from tflite.Model import Model
|
7
|
+
from tflite.TensorType import TensorType
|
8
|
+
from tflite import opcode2name
|
9
|
+
from enum import Enum
|
10
|
+
|
11
|
+
import numpy as np
|
12
|
+
from pathlib import Path
|
13
|
+
|
14
|
+
from numpy import ndarray
|
15
|
+
|
16
|
+
# DLL path for different platforms
|
17
|
+
__PARENT_DIR = Path(__file__).parent.absolute()
|
18
|
+
if sys.platform.startswith("linux"):
|
19
|
+
lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "linux", "xtflm_python.so"))
|
20
|
+
elif sys.platform == "darwin":
|
21
|
+
lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "macos", "xtflm_python.dylib"))
|
22
|
+
else:
|
23
|
+
lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "windows", "xtflm_python.dll"))
|
24
|
+
|
25
|
+
lib = ctypes.cdll.LoadLibrary(lib_path)
|
26
|
+
|
27
|
+
from xmos_ai_tools.xinterpreters.exceptions import (
|
28
|
+
InterpreterError,
|
29
|
+
AllocateTensorsError,
|
30
|
+
InvokeError,
|
31
|
+
SetTensorError,
|
32
|
+
GetTensorError,
|
33
|
+
ModelSizeError,
|
34
|
+
ArenaSizeError,
|
35
|
+
DeviceTimeoutError,
|
36
|
+
)
|
37
|
+
|
38
|
+
MAX_TENSOR_ARENA_SIZE = 10000000
|
39
|
+
|
40
|
+
|
41
|
+
class XTFLMInterpreterStatus(Enum):
|
42
|
+
OK = 0
|
43
|
+
ERROR = 1
|
44
|
+
|
45
|
+
|
46
|
+
class TFLMHostInterpreter:
|
47
|
+
"""! The xcore interpreters host class.
|
48
|
+
The interpreter to be used on a host.
|
49
|
+
"""
|
50
|
+
|
51
|
+
def __init__(self, max_tensor_arena_size: int = MAX_TENSOR_ARENA_SIZE) -> None:
|
52
|
+
"""! Host interpreter initializer.
|
53
|
+
Sets up functions from the cdll, and calls to cdll function to create a new interpreter.
|
54
|
+
"""
|
55
|
+
self._error_msg = ctypes.create_string_buffer(4096)
|
56
|
+
|
57
|
+
lib.new_interpreter.restype = ctypes.c_void_p
|
58
|
+
lib.new_interpreter.argtypes = [
|
59
|
+
ctypes.c_size_t,
|
60
|
+
]
|
61
|
+
|
62
|
+
lib.print_memory_plan.restype = None
|
63
|
+
lib.print_memory_plan.argtypes = [ctypes.c_void_p]
|
64
|
+
|
65
|
+
lib.delete_interpreter.restype = None
|
66
|
+
lib.delete_interpreter.argtypes = [ctypes.c_void_p]
|
67
|
+
|
68
|
+
lib.initialize.restype = ctypes.c_int
|
69
|
+
lib.initialize.argtypes = [
|
70
|
+
ctypes.c_void_p,
|
71
|
+
ctypes.c_char_p,
|
72
|
+
ctypes.c_size_t,
|
73
|
+
ctypes.c_char_p,
|
74
|
+
]
|
75
|
+
|
76
|
+
lib.set_input_tensor.restype = ctypes.c_int
|
77
|
+
lib.set_input_tensor.argtypes = [
|
78
|
+
ctypes.c_void_p,
|
79
|
+
ctypes.c_size_t,
|
80
|
+
ctypes.c_void_p,
|
81
|
+
ctypes.c_int,
|
82
|
+
]
|
83
|
+
|
84
|
+
lib.get_output_tensor.restype = ctypes.c_int
|
85
|
+
lib.get_output_tensor.argtypes = [
|
86
|
+
ctypes.c_void_p,
|
87
|
+
ctypes.c_size_t,
|
88
|
+
ctypes.c_void_p,
|
89
|
+
ctypes.c_int,
|
90
|
+
]
|
91
|
+
|
92
|
+
lib.get_input_tensor.restype = ctypes.c_int
|
93
|
+
lib.get_input_tensor.argtypes = [
|
94
|
+
ctypes.c_void_p,
|
95
|
+
ctypes.c_size_t,
|
96
|
+
ctypes.c_void_p,
|
97
|
+
ctypes.c_int,
|
98
|
+
]
|
99
|
+
|
100
|
+
lib.reset.restype = ctypes.c_int
|
101
|
+
lib.reset.argtypes = [ctypes.c_void_p]
|
102
|
+
|
103
|
+
lib.invoke.restype = ctypes.c_int
|
104
|
+
lib.invoke.argtypes = [ctypes.c_void_p]
|
105
|
+
|
106
|
+
lib.get_error.restype = ctypes.c_size_t
|
107
|
+
lib.get_error.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
|
108
|
+
|
109
|
+
lib.arena_used_bytes.restype = ctypes.c_size_t
|
110
|
+
lib.arena_used_bytes.argtypes = [
|
111
|
+
ctypes.c_void_p,
|
112
|
+
]
|
113
|
+
|
114
|
+
self._max_tensor_arena_size = max_tensor_arena_size
|
115
|
+
self.models: List[TFLMHostInterpreter.modelData] = []
|
116
|
+
|
117
|
+
def __enter__(self) -> "TFLMHostInterpreter":
|
118
|
+
return self
|
119
|
+
|
120
|
+
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
|
121
|
+
"""! Exit calls close function to delete interpreter"""
|
122
|
+
self.close()
|
123
|
+
|
124
|
+
def initialise_interpreter(self, model_index: int = 0) -> None:
|
125
|
+
"""! Interpreter initialiser, initialised interpreter with model and parameters (optional)
|
126
|
+
@param model_index The model to target, for interpreters that support multiple models
|
127
|
+
running concurrently. Defaults to 0 for use with a single model.
|
128
|
+
"""
|
129
|
+
max_model_size = 50000000
|
130
|
+
self.obj = lib.new_interpreter(max_model_size)
|
131
|
+
currentModel = None
|
132
|
+
|
133
|
+
for model in self.models:
|
134
|
+
if model.tile == model_index:
|
135
|
+
currentModel = model
|
136
|
+
|
137
|
+
if currentModel is None:
|
138
|
+
print(f"No model at index {model_index} found.", sys.stderr)
|
139
|
+
raise IndexError
|
140
|
+
|
141
|
+
assert currentModel.model_content is not None
|
142
|
+
|
143
|
+
status = lib.initialize(
|
144
|
+
self.obj,
|
145
|
+
currentModel.model_content,
|
146
|
+
len(currentModel.model_content),
|
147
|
+
currentModel.params_content,
|
148
|
+
)
|
149
|
+
if XTFLMInterpreterStatus(status) is XTFLMInterpreterStatus.ERROR:
|
150
|
+
raise RuntimeError("Unable to initialize interpreter")
|
151
|
+
|
152
|
+
def set_tensor(self, tensor_index: int, value: ndarray, model_index=0) -> None:
|
153
|
+
"""! Write the input tensor of a model.
|
154
|
+
@param value The blob of data to set the tensor to.
|
155
|
+
@param tensor_index The index of input tensor to target. Defaults to 0.
|
156
|
+
@param model_index The model to target, for interpreters that support multiple models
|
157
|
+
running concurrently. Defaults to 0 for use with a single model.
|
158
|
+
"""
|
159
|
+
val = value.tobytes()
|
160
|
+
|
161
|
+
length = len(val)
|
162
|
+
length2 = self.get_input_tensor_size(tensor_index)
|
163
|
+
if length != length2:
|
164
|
+
print(
|
165
|
+
"ERROR: mismatching size in set_input_tensor %d vs %d"
|
166
|
+
% (length, length2)
|
167
|
+
)
|
168
|
+
|
169
|
+
self._check_status(lib.set_input_tensor(self.obj, tensor_index, val, length))
|
170
|
+
|
171
|
+
def get_tensor(
|
172
|
+
self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None
|
173
|
+
) -> ndarray:
|
174
|
+
"""! Read data from the output tensor of a model.
|
175
|
+
@param tensor_index The index of output tensor to target.
|
176
|
+
@param model_index The model to target, for interpreters that support multiple models
|
177
|
+
running concurrently. Defaults to 0 for use with a single model.
|
178
|
+
@param tensor Tensor of correct shape to write into (optional).
|
179
|
+
@return The data that was stored in the output tensor.
|
180
|
+
"""
|
181
|
+
|
182
|
+
count: Optional[int]
|
183
|
+
tensor_details: Optional[Dict[str, Any]]
|
184
|
+
count, tensor_details = next(
|
185
|
+
filter(
|
186
|
+
lambda x: x[1]["index"] == tensor_index,
|
187
|
+
enumerate(self.get_output_details()),
|
188
|
+
),
|
189
|
+
(None, None),
|
190
|
+
)
|
191
|
+
|
192
|
+
if count is None or tensor_details is None:
|
193
|
+
print(f"No tensor at index {tensor_index} found.", sys.stderr)
|
194
|
+
raise IndexError
|
195
|
+
|
196
|
+
length = self.get_tensor_size(tensor_index)
|
197
|
+
if tensor is None:
|
198
|
+
tensor = np.zeros(tensor_details["shape"], dtype=tensor_details["dtype"])
|
199
|
+
else:
|
200
|
+
length = len(tensor.tobytes())
|
201
|
+
if length != length:
|
202
|
+
print(
|
203
|
+
"ERROR: mismatching size in get_output_tensor %d vs %d"
|
204
|
+
% (length, length)
|
205
|
+
)
|
206
|
+
|
207
|
+
data_ptr = tensor.ctypes.data_as(ctypes.c_void_p)
|
208
|
+
self._check_status(lib.get_output_tensor(self.obj, count, data_ptr, length))
|
209
|
+
return tensor
|
210
|
+
|
211
|
+
def get_input_tensor(self, input_index: int = 0, model_index: int = 0) -> ndarray:
|
212
|
+
"""! Read the data in the input tensor of a model.
|
213
|
+
@param input_index The index of input tensor to target.
|
214
|
+
@param model_index The engine to target, for interpreters that support multiple models
|
215
|
+
running concurrently. Defaults to 0 for use with a single model.
|
216
|
+
@return The data that was stored in the output tensor.
|
217
|
+
"""
|
218
|
+
tensor_details = self.get_input_details(model_index)[input_index]
|
219
|
+
tensor = np.zeros(tensor_details["shape"], dtype=tensor_details["dtype"])
|
220
|
+
data_ptr = tensor.ctypes.data_as(ctypes.c_void_p)
|
221
|
+
|
222
|
+
l = len(tensor.tobytes())
|
223
|
+
self._check_status(lib.get_input_tensor(self.obj, input_index, data_ptr, l))
|
224
|
+
return tensor
|
225
|
+
|
226
|
+
def reset(self, model_index: int = 0) -> None:
|
227
|
+
"""! Resets the model."""
|
228
|
+
self._check_status(lib.reset(self.obj))
|
229
|
+
|
230
|
+
def invoke(self, model_index: int = 0) -> None:
|
231
|
+
"""! Invoke the model and starting inference of the current
|
232
|
+
state of the tensors.
|
233
|
+
"""
|
234
|
+
INVOKE_CALLBACK_FUNC = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_int)
|
235
|
+
|
236
|
+
self._check_status(lib.invoke(self.obj))
|
237
|
+
|
238
|
+
def close(self, model_index: int = 0) -> None:
|
239
|
+
"""! Delete the interpreter.
|
240
|
+
@params model_index Defines which interpreter to target in systems with multiple.
|
241
|
+
"""
|
242
|
+
if self.obj:
|
243
|
+
lib.delete_interpreter(self.obj)
|
244
|
+
self.obj = None
|
245
|
+
print(self.obj)
|
246
|
+
|
247
|
+
def tensor_arena_size(self) -> int:
|
248
|
+
"""! Read the size of the tensor arena required.
|
249
|
+
@return size of the tensor arena as an integer.
|
250
|
+
"""
|
251
|
+
return lib.arena_used_bytes(self.obj)
|
252
|
+
|
253
|
+
def _check_status(self, status) -> None:
|
254
|
+
"""! Read a status code and raise an exception.
|
255
|
+
@param status Status code.
|
256
|
+
"""
|
257
|
+
if XTFLMInterpreterStatus(status) is XTFLMInterpreterStatus.ERROR:
|
258
|
+
lib.get_error(self.obj, self._error_msg)
|
259
|
+
raise RuntimeError(self._error_msg.value.decode("utf-8"))
|
260
|
+
|
261
|
+
def print_memory_plan(self) -> None:
|
262
|
+
"""! Print a plan of memory allocation"""
|
263
|
+
lib.print_memory_plan(self.obj)
|
264
|
+
|
265
|
+
def allocate_tensors(self):
|
266
|
+
"""! Dummy function to match tf.lite.Interpreter() API"""
|
267
|
+
return
|
268
|
+
|
269
|
+
def get_input_tensor_size(self, input_index: int = 0, model_index: int = 0) -> int:
|
270
|
+
"""! Read the size of the input tensor from the model.
|
271
|
+
@param input_index The index of input tensor to target.
|
272
|
+
@param model_index The model to target, for interpreters that support multiple models
|
273
|
+
running concurrently. Defaults to 0 for use with a single model.
|
274
|
+
@return The size of the input tensor as an integer.
|
275
|
+
"""
|
276
|
+
|
277
|
+
# Select correct model from model list
|
278
|
+
model = self.get_model(model_index)
|
279
|
+
modelBuf = Model.GetRootAsModel(model.model_content, 0)
|
280
|
+
|
281
|
+
# Get index of specific input tensor
|
282
|
+
tensorIndex = modelBuf.Subgraphs(0).Inputs(input_index)
|
283
|
+
|
284
|
+
tensorType = modelBuf.Subgraphs(0).Tensors(tensorIndex).Type()
|
285
|
+
|
286
|
+
tensorSize: int
|
287
|
+
if tensorType == TensorType.INT8:
|
288
|
+
tensorSize = 1 # int8 is 1 byte
|
289
|
+
elif tensorType == TensorType.INT16:
|
290
|
+
tensorSize = 2 # int16 is 2 bytes
|
291
|
+
elif tensorType == TensorType.INT32:
|
292
|
+
tensorSize = 4 # int32 is 4 bytes
|
293
|
+
elif tensorType == TensorType.FLOAT32:
|
294
|
+
tensorSize = 4 # float32 is 4 bytes
|
295
|
+
else:
|
296
|
+
print(tensorType)
|
297
|
+
self._check_status(XTFLMInterpreterStatus.ERROR)
|
298
|
+
tensorSize = 0
|
299
|
+
|
300
|
+
# Calculate tensor size by multiplying shape elements
|
301
|
+
for i in range(0, modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeLength()):
|
302
|
+
tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensorIndex).Shape(
|
303
|
+
i
|
304
|
+
)
|
305
|
+
return tensorSize
|
306
|
+
|
307
|
+
def get_output_tensor_size(
|
308
|
+
self, output_index: int = 0, model_index: int = 0
|
309
|
+
) -> int:
|
310
|
+
"""! Read the size of the output tensor from the model.
|
311
|
+
@param output_index The index of output tensor to target.
|
312
|
+
@param model_index The model to target, for interpreters that support multiple models
|
313
|
+
running concurrently. Defaults to 0 for use with a single model.
|
314
|
+
@return The size of the output tensor as an integer.
|
315
|
+
"""
|
316
|
+
|
317
|
+
# Select correct model from model list
|
318
|
+
modelBuf = None
|
319
|
+
model = self.get_model(model_index)
|
320
|
+
modelBuf = Model.GetRootAsModel(model.model_content, 0)
|
321
|
+
|
322
|
+
# Get index of specific output tensor
|
323
|
+
tensorIndex = modelBuf.Subgraphs(0).Outputs(output_index)
|
324
|
+
|
325
|
+
tensorType = modelBuf.Subgraphs(0).Tensors(tensorIndex).Type()
|
326
|
+
|
327
|
+
tensorSize: int
|
328
|
+
if tensorType == TensorType.INT8:
|
329
|
+
tensorSize = 1 # int8 is 1 byte
|
330
|
+
elif tensorType == TensorType.INT16:
|
331
|
+
tensorSize = 2 # int16 is 2 bytes
|
332
|
+
elif tensorType == TensorType.INT32:
|
333
|
+
tensorSize = 4 # int32 is 4 bytes
|
334
|
+
elif tensorType == TensorType.FLOAT32:
|
335
|
+
tensorSize = 4 # float32 is 4 bytes
|
336
|
+
else:
|
337
|
+
print(tensorType)
|
338
|
+
self._check_status(XTFLMInterpreterStatus.ERROR)
|
339
|
+
tensorSize = 0
|
340
|
+
|
341
|
+
# Calculate tensor size by multiplying shape elements
|
342
|
+
for i in range(0, modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeLength()):
|
343
|
+
tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensorIndex).Shape(
|
344
|
+
i
|
345
|
+
)
|
346
|
+
return tensorSize
|
347
|
+
|
348
|
+
def get_tensor_size(self, tensor_index: int = 0, model_index: int = 0) -> int:
|
349
|
+
"""! Read the size of the input tensor from the model.
|
350
|
+
@param tensor_index The index of input tensor to target.
|
351
|
+
@param model_index The model to target, for interpreters that support multiple models
|
352
|
+
running concurrently. Defaults to 0 for use with a single model.
|
353
|
+
@return The size of the input tensor as an integer.
|
354
|
+
"""
|
355
|
+
|
356
|
+
# Select correct model from model list
|
357
|
+
modelBuf = None
|
358
|
+
model = self.get_model(model_index)
|
359
|
+
modelBuf = Model.GetRootAsModel(model.model_content, 0)
|
360
|
+
|
361
|
+
tensorType = modelBuf.Subgraphs(0).Tensors(tensor_index).Type()
|
362
|
+
if tensorType == TensorType.INT8:
|
363
|
+
tensorSize = 1 # int8 is 1 byte
|
364
|
+
elif tensorType == TensorType.INT16:
|
365
|
+
tensorSize = 2 # int16 is 2 bytes
|
366
|
+
elif tensorType == TensorType.INT32:
|
367
|
+
tensorSize = 4 # int32 is 4 bytes
|
368
|
+
elif tensorType == TensorType.FLOAT32:
|
369
|
+
tensorSize = 4 # float32 is 4 bytes
|
370
|
+
else:
|
371
|
+
print(tensorType)
|
372
|
+
self._check_status(XTFLMInterpreterStatus.ERROR)
|
373
|
+
|
374
|
+
# Calculate tensor size by multiplying shape elements
|
375
|
+
for i in range(0, modelBuf.Subgraphs(0).Tensors(tensor_index).ShapeLength()):
|
376
|
+
tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensor_index).Shape(
|
377
|
+
i
|
378
|
+
)
|
379
|
+
return tensorSize
|
380
|
+
|
381
|
+
def get_input_details(self, model_index: int = 0) -> List[Dict[str, Any]]:
|
382
|
+
"""! Reads the input tensor details from the model.
|
383
|
+
@param model_index The model to target, for interpreters that support multiple models
|
384
|
+
running concurrently. Defaults to 0 for use with a single model.
|
385
|
+
@return Tensor details, including the index, name, shape, data type, and quantization
|
386
|
+
parameters.
|
387
|
+
"""
|
388
|
+
|
389
|
+
# Select correct model from model list
|
390
|
+
modelBuf = None
|
391
|
+
model = self.get_model(model_index)
|
392
|
+
modelBuf = Model.GetRootAsModel(model.model_content, 0)
|
393
|
+
|
394
|
+
inputsList = []
|
395
|
+
for input_ in range(0, modelBuf.Subgraphs(0).InputsLength()):
|
396
|
+
tensorIndex = modelBuf.Subgraphs(0).Inputs(input_)
|
397
|
+
|
398
|
+
# Generate dictioary of tensor details
|
399
|
+
dtype: Union[Type[Any]]
|
400
|
+
if modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT8:
|
401
|
+
dtype = np.int8
|
402
|
+
elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT16:
|
403
|
+
dtype = np.int16
|
404
|
+
elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT32:
|
405
|
+
dtype = np.int32
|
406
|
+
elif (
|
407
|
+
modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32
|
408
|
+
):
|
409
|
+
dtype = np.float32
|
410
|
+
else:
|
411
|
+
raise TypeError
|
412
|
+
|
413
|
+
details = {
|
414
|
+
"name": str(modelBuf.Subgraphs(0).Tensors(tensorIndex).Name())[
|
415
|
+
1:
|
416
|
+
].strip("'"),
|
417
|
+
"index": tensorIndex,
|
418
|
+
"shape": modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeAsNumpy(),
|
419
|
+
"shape_signature": modelBuf.Subgraphs(0)
|
420
|
+
.Tensors(tensorIndex)
|
421
|
+
.ShapeSignatureAsNumpy(),
|
422
|
+
"dtype": dtype,
|
423
|
+
"quantization": (
|
424
|
+
modelBuf.Subgraphs(0).Tensors(tensorIndex).Quantization().Scale(0),
|
425
|
+
modelBuf.Subgraphs(0)
|
426
|
+
.Tensors(tensorIndex)
|
427
|
+
.Quantization()
|
428
|
+
.ZeroPoint(0),
|
429
|
+
),
|
430
|
+
"quantization_parameters": {
|
431
|
+
"scales": modelBuf.Subgraphs(0)
|
432
|
+
.Tensors(tensorIndex)
|
433
|
+
.Quantization()
|
434
|
+
.ScaleAsNumpy(),
|
435
|
+
"zero_points": modelBuf.Subgraphs(0)
|
436
|
+
.Tensors(tensorIndex)
|
437
|
+
.Quantization()
|
438
|
+
.ZeroPointAsNumpy(),
|
439
|
+
"quantized_dimension": modelBuf.Subgraphs(0)
|
440
|
+
.Tensors(tensorIndex)
|
441
|
+
.Quantization()
|
442
|
+
.QuantizedDimension(),
|
443
|
+
},
|
444
|
+
"sparsity_parameters": {
|
445
|
+
modelBuf.Subgraphs(0).Tensors(tensorIndex).Sparsity()
|
446
|
+
},
|
447
|
+
}
|
448
|
+
inputsList.append(details)
|
449
|
+
|
450
|
+
return inputsList
|
451
|
+
|
452
|
+
def get_output_details(self, model_index: int = 0) -> List[Dict[str, Any]]:
|
453
|
+
"""! Reads the output tensor details from the model.
|
454
|
+
@param output_index The index of output tensor to target.
|
455
|
+
@param model_index The model to target, for interpreters that support multiple models
|
456
|
+
running concurrently. Defaults to 0 for use with a single model.
|
457
|
+
@return Tensor details, including the index, name, shape, data type, and quantization
|
458
|
+
parameters.
|
459
|
+
"""
|
460
|
+
|
461
|
+
# Select correct model from models list
|
462
|
+
model = self.get_model(model_index)
|
463
|
+
modelBuf = Model.GetRootAsModel(model.model_content, 0)
|
464
|
+
|
465
|
+
outputsList = []
|
466
|
+
for output_ in range(0, modelBuf.Subgraphs(0).OutputsLength()):
|
467
|
+
# Output tensor is last tensor
|
468
|
+
tensorIndex = modelBuf.Subgraphs(0).Outputs(output_)
|
469
|
+
|
470
|
+
dtype: Union[Type[Any]]
|
471
|
+
# Generate dictionary of tensor details
|
472
|
+
if modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT8:
|
473
|
+
dtype = np.int8
|
474
|
+
elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT16:
|
475
|
+
dtype = np.int16
|
476
|
+
elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT32:
|
477
|
+
dtype = np.int32
|
478
|
+
elif (
|
479
|
+
modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32
|
480
|
+
):
|
481
|
+
dtype = np.float32
|
482
|
+
|
483
|
+
details = {
|
484
|
+
"name": str(modelBuf.Subgraphs(0).Tensors(tensorIndex).Name())[
|
485
|
+
1:
|
486
|
+
].strip("'"),
|
487
|
+
"index": tensorIndex,
|
488
|
+
"shape": modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeAsNumpy(),
|
489
|
+
"shape_signature": modelBuf.Subgraphs(0)
|
490
|
+
.Tensors(tensorIndex)
|
491
|
+
.ShapeSignatureAsNumpy(),
|
492
|
+
"dtype": dtype,
|
493
|
+
"quantization": (
|
494
|
+
modelBuf.Subgraphs(0).Tensors(tensorIndex).Quantization().Scale(0),
|
495
|
+
modelBuf.Subgraphs(0)
|
496
|
+
.Tensors(tensorIndex)
|
497
|
+
.Quantization()
|
498
|
+
.ZeroPoint(0),
|
499
|
+
),
|
500
|
+
"quantization_parameters": {
|
501
|
+
"scales": modelBuf.Subgraphs(0)
|
502
|
+
.Tensors(tensorIndex)
|
503
|
+
.Quantization()
|
504
|
+
.ScaleAsNumpy(),
|
505
|
+
"zero_points": modelBuf.Subgraphs(0)
|
506
|
+
.Tensors(tensorIndex)
|
507
|
+
.Quantization()
|
508
|
+
.ZeroPointAsNumpy(),
|
509
|
+
"quantized_dimension": modelBuf.Subgraphs(0)
|
510
|
+
.Tensors(tensorIndex)
|
511
|
+
.Quantization()
|
512
|
+
.QuantizedDimension(),
|
513
|
+
},
|
514
|
+
"sparsity_parameters": {
|
515
|
+
modelBuf.Subgraphs(0).Tensors(tensorIndex).Sparsity()
|
516
|
+
},
|
517
|
+
}
|
518
|
+
outputsList.append(details)
|
519
|
+
|
520
|
+
return outputsList
|
521
|
+
|
522
|
+
def set_model(
|
523
|
+
self,
|
524
|
+
model_path: Optional[str] = None,
|
525
|
+
model_content: Optional[bytes] = None,
|
526
|
+
params_path: Optional[str] = None,
|
527
|
+
params_content: Optional[bytes] = None,
|
528
|
+
model_index: int = 0,
|
529
|
+
secondary_memory: bool = False,
|
530
|
+
flash: bool = False,
|
531
|
+
) -> None:
|
532
|
+
"""! Adds a model to the interpreter's list of models.
|
533
|
+
@param model_path The path to the model file (.tflite), alternative to model_content.
|
534
|
+
@param model_content The byte array representing a model, alternative to model_path.
|
535
|
+
@param params_path The path to the params file for the model,
|
536
|
+
alternative to params_content (optional).
|
537
|
+
@param params_content The byte array representing the model parameters,
|
538
|
+
alternative to params_path (optional).
|
539
|
+
@param model_index The model to target, for interpreters that support multiple models
|
540
|
+
running concurrently. Defaults to 0 for use with a single model.
|
541
|
+
"""
|
542
|
+
|
543
|
+
# Check model_path or model_content is valid
|
544
|
+
if not model_path and not model_content:
|
545
|
+
raise ValueError("model_path or model_content must be provided")
|
546
|
+
tile_found = False
|
547
|
+
# Find correct model and replace
|
548
|
+
for model in self.models:
|
549
|
+
if model.tile == model_index:
|
550
|
+
model = self.modelData(
|
551
|
+
model_path,
|
552
|
+
model_content,
|
553
|
+
params_path,
|
554
|
+
params_content,
|
555
|
+
model_index,
|
556
|
+
secondary_memory,
|
557
|
+
flash,
|
558
|
+
)
|
559
|
+
tile_found = True
|
560
|
+
break
|
561
|
+
# If model wasn't previously set, add it to list
|
562
|
+
if not tile_found:
|
563
|
+
self.models.append(
|
564
|
+
self.modelData(
|
565
|
+
model_path,
|
566
|
+
model_content,
|
567
|
+
params_path,
|
568
|
+
params_content,
|
569
|
+
model_index,
|
570
|
+
secondary_memory,
|
571
|
+
flash,
|
572
|
+
)
|
573
|
+
)
|
574
|
+
self.initialise_interpreter(model_index)
|
575
|
+
|
576
|
+
def get_model(self, model_index: int = 0):
|
577
|
+
for model in self.models:
|
578
|
+
if model.tile == model_index:
|
579
|
+
return model
|
580
|
+
|
581
|
+
class modelData:
|
582
|
+
"""! The model data class
|
583
|
+
A class that holds a model and data associated with a single model.
|
584
|
+
"""
|
585
|
+
|
586
|
+
def __init__(
|
587
|
+
self,
|
588
|
+
model_path: Optional[str],
|
589
|
+
model_content: Optional[bytes],
|
590
|
+
params_path: Optional[str],
|
591
|
+
params_content: Optional[bytes],
|
592
|
+
model_index: int,
|
593
|
+
secondary_memory: bool,
|
594
|
+
flash: bool,
|
595
|
+
):
|
596
|
+
"""! Model data initializer.
|
597
|
+
Sets up variables, generates a list of operators used in the model,
|
598
|
+
and reads model and params paths into byte arrays (content).
|
599
|
+
@param model_path Path to the model file (.tflite).
|
600
|
+
@param model_content Model model_content (byte array).
|
601
|
+
@param params_path Path to model parameters file.
|
602
|
+
@param params_content Model parameters content (byte array)
|
603
|
+
@param model_index The model to target, for interpreters that support multiple models
|
604
|
+
running concurrently. Defaults to 0 for use with a single model.
|
605
|
+
"""
|
606
|
+
self.model_path: Optional[str] = model_path
|
607
|
+
self.model_content: Optional[bytes] = model_content
|
608
|
+
self.params_path: Optional[str] = params_path
|
609
|
+
self.params_content: Optional[bytes] = params_content
|
610
|
+
self.tile: int = model_index
|
611
|
+
self.secondary_memory = secondary_memory
|
612
|
+
self.flash = flash
|
613
|
+
self.opList: List[str] = []
|
614
|
+
self.pathToContent()
|
615
|
+
self.modelToOpList()
|
616
|
+
|
617
|
+
def modelToOpList(self) -> None:
|
618
|
+
"""! Generates operator list from model."""
|
619
|
+
|
620
|
+
# Load model
|
621
|
+
buffer = self.model_content
|
622
|
+
model = Model.GetRootAsModel(buffer, 0)
|
623
|
+
self.opList = []
|
624
|
+
|
625
|
+
# Iterate through operators in model and add operators to opList
|
626
|
+
for y in range(0, model.Subgraphs(0).OperatorsLength()):
|
627
|
+
opcode = model.OperatorCodes(
|
628
|
+
model.Subgraphs(0).Operators(y).OpcodeIndex()
|
629
|
+
)
|
630
|
+
# If custom opcode parse string
|
631
|
+
if opcode.BuiltinCode() == 32:
|
632
|
+
self.opList.append(str(opcode.CustomCode()).strip("b'"))
|
633
|
+
# If built in op code, decode
|
634
|
+
else:
|
635
|
+
self.opList.append(opcode2name(opcode.BuiltinCode()))
|
636
|
+
|
637
|
+
def pathToContent(self) -> None:
|
638
|
+
"""! Reads model and params paths to content (byte arrays)"""
|
639
|
+
|
640
|
+
# Check if path exists but not content
|
641
|
+
if self.model_content is None and self.model_path is not None:
|
642
|
+
with open(self.model_path, "rb") as input_fd:
|
643
|
+
self.model_content = input_fd.read()
|
644
|
+
|
645
|
+
# Check if params_path exists but not params_content
|
646
|
+
if self.params_content is None and self.params_path is not None:
|
647
|
+
with open(self.params_path, "rb") as input_fd2:
|
648
|
+
self.params_content = input_fd2.read()
|
649
|
+
|
650
|
+
# If params_content is None, set to empty byte array
|
651
|
+
if self.params_content is None:
|
652
|
+
self.params_content = bytes([])
|
Binary file
|
Binary file
|
Binary file
|
@@ -0,0 +1,33 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: xmos_ai_tools
|
3
|
+
Version: 1.3.2.dev80
|
4
|
+
Summary: XMOS AI Tools
|
5
|
+
Home-page: https://github.com/xmos/ai_tools
|
6
|
+
Author: XMOS
|
7
|
+
Author-email: support@xmos.com
|
8
|
+
License: LICENSE.txt
|
9
|
+
Keywords: tensorflow binarized neural networks
|
10
|
+
Classifier: License :: Other/Proprietary License
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
12
|
+
Classifier: Intended Audience :: Developers
|
13
|
+
Classifier: Intended Audience :: Education
|
14
|
+
Classifier: Intended Audience :: Science/Research
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
16
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
19
|
+
Classifier: Topic :: Scientific/Engineering
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
22
|
+
Classifier: Topic :: Software Development
|
23
|
+
Classifier: Topic :: Software Development :: Libraries
|
24
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
25
|
+
Requires-Python: >=3.9
|
26
|
+
Description-Content-Type: text/markdown
|
27
|
+
Requires-Dist: numpy<2.0
|
28
|
+
Requires-Dist: tflite>=2.4.0
|
29
|
+
|
30
|
+
Documentation
|
31
|
+
-------------
|
32
|
+
|
33
|
+
Click [here](https://github.com/xmos/ai_tools/blob/e1acb3f468151dc1bc31896ef1138bc3b0460505/README.md) for documentation on using xmos-ai-tools to deploy AI models on xcore.
|