xmos-ai-tools 1.2.1.dev20__py3-none-win_amd64.whl → 1.3.2.dev180__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (229) hide show
  1. xmos_ai_tools/__init__.py +7 -7
  2. xmos_ai_tools/io_server/__init__.py +151 -151
  3. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -13
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -8
  5. xmos_ai_tools/runtime/include/flash_server.h +2 -3
  6. xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +2 -1
  7. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +2 -1
  8. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +4 -2
  9. xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +2 -0
  10. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +11 -51
  11. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +3 -0
  12. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +1 -0
  13. xmos_ai_tools/runtime/include/lib_nn/api/version.h +2 -2
  14. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  15. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  16. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +6 -0
  17. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +13 -13
  18. xmos_ai_tools/runtime/include/lib_tflite_micro/api/load_weights.h +64 -0
  19. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +1 -1
  20. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +1 -1
  21. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +2 -2
  22. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +3 -3
  23. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +8 -8
  24. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +21 -7
  25. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +4 -4
  26. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +5 -5
  27. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +3 -3
  28. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +4 -4
  29. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +2 -2
  30. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +3 -3
  31. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +2 -2
  32. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +2 -2
  33. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +3 -3
  34. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +17 -17
  35. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +2 -2
  36. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +2 -2
  37. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +2 -2
  38. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +2 -2
  39. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +2 -2
  40. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +2 -2
  41. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +2 -2
  42. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +3 -3
  43. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +2 -2
  44. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +2 -2
  45. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +2 -2
  46. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +2 -2
  47. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +2 -2
  48. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +2 -2
  49. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +2 -2
  50. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +2 -2
  51. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +2 -2
  52. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +3 -3
  53. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +2 -2
  54. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +2 -2
  55. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +3 -3
  56. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +1 -1
  57. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +1 -1
  58. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +4 -4
  59. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +2 -2
  60. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +2 -2
  61. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +2 -2
  62. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +2 -2
  63. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +2 -2
  64. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +2 -2
  65. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +2 -2
  66. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +2 -2
  67. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +2 -2
  68. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +2 -2
  69. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +2 -2
  70. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +2 -2
  71. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +2 -2
  72. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +2 -2
  73. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +2 -2
  74. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +2 -2
  75. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +2 -2
  76. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +2 -2
  77. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +2 -2
  78. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +4 -4
  79. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +3 -3
  80. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +2 -2
  81. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +2 -2
  82. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +5 -5
  83. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +2 -2
  84. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +2 -2
  85. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +2 -2
  86. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +8 -8
  87. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +2 -2
  88. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +2 -2
  89. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +2 -2
  90. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +2 -2
  91. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +3 -3
  92. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +4 -4
  93. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +3 -3
  94. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +2 -2
  95. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +4 -4
  96. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +3 -3
  97. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +2 -2
  98. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +2 -2
  99. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +5 -5
  100. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +2 -2
  101. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +2 -2
  102. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +3 -3
  103. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +6 -6
  104. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +2 -2
  105. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +2 -2
  106. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +2 -2
  107. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +2 -2
  108. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +2 -2
  109. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +9 -9
  110. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +2 -2
  111. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +2 -2
  112. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +2 -2
  113. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +2 -2
  114. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +2 -2
  115. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +2 -2
  116. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +2 -2
  117. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +2 -2
  118. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +2 -2
  119. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +7 -7
  120. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +2 -2
  121. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +2 -2
  122. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +2 -2
  123. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +6 -2
  124. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +2 -2
  125. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +2 -2
  126. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +2 -2
  127. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +2 -2
  128. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +4 -4
  129. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +2 -2
  130. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +2 -2
  131. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +6 -6
  132. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +2 -2
  133. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +2 -2
  134. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +2 -2
  135. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +2 -2
  136. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +3 -3
  137. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +2 -2
  138. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +2 -2
  139. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +2 -2
  140. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +2 -2
  141. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +2 -2
  142. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +2 -2
  143. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +2 -2
  144. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +2 -2
  145. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +48 -48
  146. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +57 -57
  147. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +2 -2
  148. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +2 -2
  149. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +2 -2
  150. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +2 -2
  151. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +2 -2
  152. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +15 -15
  153. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +2 -2
  154. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +3 -3
  155. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +2 -2
  156. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +2 -2
  157. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +2 -2
  158. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +2 -2
  159. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +2 -2
  160. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +2 -2
  161. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +2 -2
  162. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +7 -7
  163. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +2 -2
  164. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +2 -2
  165. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +2 -2
  166. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +2 -2
  167. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +2 -2
  168. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +2 -2
  169. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +2 -2
  170. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +2 -2
  171. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +2 -2
  172. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +2 -2
  173. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +2 -2
  174. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +2 -2
  175. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +2 -2
  176. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +2 -2
  177. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +3 -3
  178. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +2 -2
  179. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +2 -2
  180. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +2 -2
  181. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +2 -2
  182. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +2 -2
  183. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +3 -3
  184. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +3 -3
  185. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +2 -2
  186. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +3 -3
  187. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +4 -4
  188. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +2 -2
  189. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +2 -2
  190. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +4 -4
  191. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +4 -4
  192. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +50 -50
  193. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +2 -2
  194. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +2 -2
  195. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +2 -2
  196. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +2 -2
  197. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +2 -2
  198. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +2 -2
  199. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +2 -2
  200. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +2 -2
  201. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +2 -2
  202. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +2 -2
  203. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +2 -2
  204. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +2 -2
  205. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +2 -2
  206. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +6 -6
  207. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +4 -4
  208. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +2 -2
  209. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +2 -2
  210. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +2 -2
  211. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +3 -3
  212. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +3 -3
  213. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +2 -2
  214. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +2731 -2731
  215. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +2 -2
  216. xmos_ai_tools/runtime/lib/host_xtflitemicro.lib +0 -0
  217. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  218. xmos_ai_tools/xformer/__init__.py +64 -60
  219. xmos_ai_tools/xformer/flash.py +190 -190
  220. xmos_ai_tools/xinterpreters/__init__.py +1 -1
  221. xmos_ai_tools/xinterpreters/exceptions.py +38 -38
  222. xmos_ai_tools/xinterpreters/host_interpreter.py +651 -652
  223. xmos_ai_tools/xinterpreters/libs/windows/xtflm_python.dll +0 -0
  224. {xmos_ai_tools-1.2.1.dev20.data → xmos_ai_tools-1.3.2.dev180.data}/data/Scripts/xcore-opt.exe +0 -0
  225. {xmos_ai_tools-1.2.1.dev20.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/METADATA +5 -7
  226. {xmos_ai_tools-1.2.1.dev20.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/RECORD +228 -226
  227. {xmos_ai_tools-1.2.1.dev20.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/WHEEL +1 -1
  228. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +0 -19
  229. {xmos_ai_tools-1.2.1.dev20.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/top_level.txt +0 -0
@@ -1,652 +1,651 @@
1
- # Copyright 2022 XMOS LIMITED. This Software is subject to the terms of the
2
- # XMOS Public License: Version 1
3
- import sys
4
- import ctypes
5
- from typing import Optional, Dict, Any, List
6
- from tflite.Model import Model
7
- from tflite.TensorType import TensorType
8
- from tflite import opcode2name
9
- from enum import Enum
10
-
11
- import numpy as np
12
- from pathlib import Path
13
-
14
- from numpy import ndarray
15
-
16
- # DLL path for different platforms
17
- __PARENT_DIR = Path(__file__).parent.absolute()
18
- if sys.platform.startswith("linux"):
19
- lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "linux", "xtflm_python.so"))
20
- elif sys.platform == "darwin":
21
- lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "macos", "xtflm_python.dylib"))
22
- else:
23
- lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "windows", "xtflm_python.dll"))
24
-
25
- lib = ctypes.cdll.LoadLibrary(lib_path)
26
-
27
- from xmos_ai_tools.xinterpreters.exceptions import (
28
- InterpreterError,
29
- AllocateTensorsError,
30
- InvokeError,
31
- SetTensorError,
32
- GetTensorError,
33
- ModelSizeError,
34
- ArenaSizeError,
35
- DeviceTimeoutError,
36
- )
37
-
38
- MAX_TENSOR_ARENA_SIZE = 10000000
39
-
40
-
41
- class XTFLMInterpreterStatus(Enum):
42
- OK = 0
43
- ERROR = 1
44
-
45
-
46
- class TFLMHostInterpreter:
47
- """! The xcore interpreters host class.
48
- The interpreter to be used on a host.
49
- """
50
-
51
- def __init__(self, max_tensor_arena_size: int = MAX_TENSOR_ARENA_SIZE) -> None:
52
- """! Host interpreter initializer.
53
- Sets up functions from the cdll, and calls to cdll function to create a new interpreter.
54
- """
55
- self._error_msg = ctypes.create_string_buffer(4096)
56
-
57
- lib.new_interpreter.restype = ctypes.c_void_p
58
- lib.new_interpreter.argtypes = [
59
- ctypes.c_size_t,
60
- ]
61
-
62
- lib.print_memory_plan.restype = None
63
- lib.print_memory_plan.argtypes = [ctypes.c_void_p]
64
-
65
- lib.delete_interpreter.restype = None
66
- lib.delete_interpreter.argtypes = [ctypes.c_void_p]
67
-
68
- lib.initialize.restype = ctypes.c_int
69
- lib.initialize.argtypes = [
70
- ctypes.c_void_p,
71
- ctypes.c_char_p,
72
- ctypes.c_size_t,
73
- ctypes.c_char_p,
74
- ]
75
-
76
- lib.set_input_tensor.restype = ctypes.c_int
77
- lib.set_input_tensor.argtypes = [
78
- ctypes.c_void_p,
79
- ctypes.c_size_t,
80
- ctypes.c_void_p,
81
- ctypes.c_int,
82
- ]
83
-
84
- lib.get_output_tensor.restype = ctypes.c_int
85
- lib.get_output_tensor.argtypes = [
86
- ctypes.c_void_p,
87
- ctypes.c_size_t,
88
- ctypes.c_void_p,
89
- ctypes.c_int,
90
- ]
91
-
92
- lib.get_input_tensor.restype = ctypes.c_int
93
- lib.get_input_tensor.argtypes = [
94
- ctypes.c_void_p,
95
- ctypes.c_size_t,
96
- ctypes.c_void_p,
97
- ctypes.c_int,
98
- ]
99
-
100
- lib.reset.restype = ctypes.c_int
101
- lib.reset.argtypes = [ctypes.c_void_p]
102
-
103
- lib.invoke.restype = ctypes.c_int
104
- lib.invoke.argtypes = [ctypes.c_void_p]
105
-
106
- lib.get_error.restype = ctypes.c_size_t
107
- lib.get_error.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
108
-
109
- lib.arena_used_bytes.restype = ctypes.c_size_t
110
- lib.arena_used_bytes.argtypes = [
111
- ctypes.c_void_p,
112
- ]
113
-
114
- self._max_tensor_arena_size = max_tensor_arena_size
115
- self.models: List[TFLMHostInterpreter.modelData] = []
116
-
117
- def __enter__(self) -> "TFLMHostInterpreter":
118
- return self
119
-
120
- def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
121
- """! Exit calls close function to delete interpreter"""
122
- self.close()
123
-
124
- def initialise_interpreter(self, model_index: int = 0) -> None:
125
- """! Interpreter initialiser, initialised interpreter with model and parameters (optional)
126
- @param model_index The model to target, for interpreters that support multiple models
127
- running concurrently. Defaults to 0 for use with a single model.
128
- """
129
- max_model_size = 50000000
130
- self.obj = lib.new_interpreter(max_model_size)
131
- currentModel = None
132
-
133
- for model in self.models:
134
- if model.tile == model_index:
135
- currentModel = model
136
-
137
- if currentModel is None:
138
- print(f"No model at index {model_index} found.", sys.stderr)
139
- raise IndexError
140
-
141
- assert currentModel.model_content is not None
142
-
143
- status = lib.initialize(
144
- self.obj,
145
- currentModel.model_content,
146
- len(currentModel.model_content),
147
- currentModel.params_content,
148
- )
149
- if XTFLMInterpreterStatus(status) is XTFLMInterpreterStatus.ERROR:
150
- raise RuntimeError("Unable to initialize interpreter")
151
-
152
- def set_tensor(self, tensor_index: int, value: ndarray, model_index=0) -> None:
153
- """! Write the input tensor of a model.
154
- @param value The blob of data to set the tensor to.
155
- @param tensor_index The index of input tensor to target. Defaults to 0.
156
- @param model_index The model to target, for interpreters that support multiple models
157
- running concurrently. Defaults to 0 for use with a single model.
158
- """
159
- val = value.tobytes()
160
-
161
- length = len(val)
162
- length2 = self.get_input_tensor_size(tensor_index)
163
- if length != length2:
164
- print(
165
- "ERROR: mismatching size in set_input_tensor %d vs %d"
166
- % (length, length2)
167
- )
168
-
169
- self._check_status(lib.set_input_tensor(self.obj, tensor_index, val, length))
170
-
171
- def get_tensor(
172
- self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None
173
- ) -> ndarray:
174
- """! Read data from the output tensor of a model.
175
- @param tensor_index The index of output tensor to target.
176
- @param model_index The model to target, for interpreters that support multiple models
177
- running concurrently. Defaults to 0 for use with a single model.
178
- @param tensor Tensor of correct shape to write into (optional).
179
- @return The data that was stored in the output tensor.
180
- """
181
-
182
- count: Optional[int]
183
- tensor_details: Optional[Dict[str, Any]]
184
- count, tensor_details = next(
185
- filter(
186
- lambda x: x[1]["index"] == tensor_index,
187
- enumerate(self.get_output_details()),
188
- ),
189
- (None, None),
190
- )
191
-
192
- if count is None or tensor_details is None:
193
- print(f"No tensor at index {tensor_index} found.", sys.stderr)
194
- raise IndexError
195
-
196
- length = self.get_tensor_size(tensor_index)
197
- if tensor is None:
198
- tensor = np.zeros(tensor_details["shape"], dtype=tensor_details["dtype"])
199
- else:
200
- length = len(tensor.tobytes())
201
- if length != length:
202
- print(
203
- "ERROR: mismatching size in get_output_tensor %d vs %d"
204
- % (length, length)
205
- )
206
-
207
- data_ptr = tensor.ctypes.data_as(ctypes.c_void_p)
208
- self._check_status(lib.get_output_tensor(self.obj, count, data_ptr, length))
209
- return tensor
210
-
211
- def get_input_tensor(self, input_index: int = 0, model_index: int = 0) -> ndarray:
212
- """! Read the data in the input tensor of a model.
213
- @param input_index The index of input tensor to target.
214
- @param model_index The engine to target, for interpreters that support multiple models
215
- running concurrently. Defaults to 0 for use with a single model.
216
- @return The data that was stored in the output tensor.
217
- """
218
- tensor_details = self.get_input_details(model_index)[input_index]
219
- tensor = np.zeros(tensor_details["shape"], dtype=tensor_details["dtype"])
220
- data_ptr = tensor.ctypes.data_as(ctypes.c_void_p)
221
-
222
- l = len(tensor.tobytes())
223
- self._check_status(lib.get_input_tensor(self.obj, input_index, data_ptr, l))
224
- return tensor
225
-
226
- def reset(self, model_index: int = 0) -> None:
227
- """! Resets the model."""
228
- self._check_status(lib.reset(self.obj))
229
-
230
- def invoke(self, model_index: int = 0) -> None:
231
- """! Invoke the model and starting inference of the current
232
- state of the tensors.
233
- """
234
- INVOKE_CALLBACK_FUNC = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_int)
235
-
236
- self._check_status(lib.invoke(self.obj))
237
-
238
- def close(self, model_index: int = 0) -> None:
239
- """! Delete the interpreter.
240
- @params model_index Defines which interpreter to target in systems with multiple.
241
- """
242
- if self.obj:
243
- lib.delete_interpreter(self.obj)
244
- self.obj = None
245
- print(self.obj)
246
-
247
- def tensor_arena_size(self) -> int:
248
- """! Read the size of the tensor arena required.
249
- @return size of the tensor arena as an integer.
250
- """
251
- return lib.arena_used_bytes(self.obj)
252
-
253
- def _check_status(self, status) -> None:
254
- """! Read a status code and raise an exception.
255
- @param status Status code.
256
- """
257
- if XTFLMInterpreterStatus(status) is XTFLMInterpreterStatus.ERROR:
258
- lib.get_error(self.obj, self._error_msg)
259
- raise RuntimeError(self._error_msg.value.decode("utf-8"))
260
-
261
- def print_memory_plan(self) -> None:
262
- """! Print a plan of memory allocation"""
263
- lib.print_memory_plan(self.obj)
264
-
265
- def allocate_tensors(self):
266
- """! Dummy function to match tf.lite.Interpreter() API"""
267
- return
268
-
269
- def get_input_tensor_size(self, input_index: int = 0, model_index: int = 0) -> int:
270
- """! Read the size of the input tensor from the model.
271
- @param input_index The index of input tensor to target.
272
- @param model_index The model to target, for interpreters that support multiple models
273
- running concurrently. Defaults to 0 for use with a single model.
274
- @return The size of the input tensor as an integer.
275
- """
276
-
277
- # Select correct model from model list
278
- model = self.get_model(model_index)
279
- modelBuf = Model.GetRootAsModel(model.model_content, 0)
280
-
281
- # Get index of specific input tensor
282
- tensorIndex = modelBuf.Subgraphs(0).Inputs(input_index)
283
-
284
- tensorType = modelBuf.Subgraphs(0).Tensors(tensorIndex).Type()
285
-
286
- tensorSize: int
287
- if tensorType == TensorType.INT8:
288
- tensorSize = 1 # int8 is 1 byte
289
- elif tensorType == TensorType.INT16:
290
- tensorSize = 2 # int16 is 2 bytes
291
- elif tensorType == TensorType.INT32:
292
- tensorSize = 4 # int32 is 4 bytes
293
- elif tensorType == TensorType.FLOAT32:
294
- tensorSize = 4 # float32 is 4 bytes
295
- else:
296
- print(tensorType)
297
- self._check_status(XTFLMInterpreterStatus.ERROR)
298
- tensorSize = 0
299
-
300
- # Calculate tensor size by multiplying shape elements
301
- for i in range(0, modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeLength()):
302
- tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensorIndex).Shape(
303
- i
304
- )
305
- return tensorSize
306
-
307
- def get_output_tensor_size(
308
- self, output_index: int = 0, model_index: int = 0
309
- ) -> int:
310
- """! Read the size of the output tensor from the model.
311
- @param output_index The index of output tensor to target.
312
- @param model_index The model to target, for interpreters that support multiple models
313
- running concurrently. Defaults to 0 for use with a single model.
314
- @return The size of the output tensor as an integer.
315
- """
316
-
317
- # Select correct model from model list
318
- modelBuf = None
319
- model = self.get_model(model_index)
320
- modelBuf = Model.GetRootAsModel(model.model_content, 0)
321
-
322
- # Get index of specific output tensor
323
- tensorIndex = modelBuf.Subgraphs(0).Outputs(output_index)
324
-
325
- tensorType = modelBuf.Subgraphs(0).Tensors(tensorIndex).Type()
326
-
327
- tensorSize: int
328
- if tensorType == TensorType.INT8:
329
- tensorSize = 1 # int8 is 1 byte
330
- elif tensorType == TensorType.INT16:
331
- tensorSize = 2 # int16 is 2 bytes
332
- elif tensorType == TensorType.INT32:
333
- tensorSize = 4 # int32 is 4 bytes
334
- elif tensorType == TensorType.FLOAT32:
335
- tensorSize = 4 # float32 is 4 bytes
336
- else:
337
- print(tensorType)
338
- self._check_status(XTFLMInterpreterStatus.ERROR)
339
- tensorSize = 0
340
-
341
- # Calculate tensor size by multiplying shape elements
342
- for i in range(0, modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeLength()):
343
- tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensorIndex).Shape(
344
- i
345
- )
346
- return tensorSize
347
-
348
- def get_tensor_size(self, tensor_index: int = 0, model_index: int = 0) -> int:
349
- """! Read the size of the input tensor from the model.
350
- @param tensor_index The index of input tensor to target.
351
- @param model_index The model to target, for interpreters that support multiple models
352
- running concurrently. Defaults to 0 for use with a single model.
353
- @return The size of the input tensor as an integer.
354
- """
355
-
356
- # Select correct model from model list
357
- modelBuf = None
358
- model = self.get_model(model_index)
359
- modelBuf = Model.GetRootAsModel(model.model_content, 0)
360
-
361
- tensorType = modelBuf.Subgraphs(0).Tensors(tensor_index).Type()
362
- if tensorType == TensorType.INT8:
363
- tensorSize = 1 # int8 is 1 byte
364
- elif tensorType == TensorType.INT16:
365
- tensorSize = 2 # int16 is 2 bytes
366
- elif tensorType == TensorType.INT32:
367
- tensorSize = 4 # int32 is 4 bytes
368
- elif tensorType == TensorType.FLOAT32:
369
- tensorSize = 4 # float32 is 4 bytes
370
- else:
371
- print(tensorType)
372
- self._check_status(XTFLMInterpreterStatus.ERROR)
373
-
374
- # Calculate tensor size by multiplying shape elements
375
- for i in range(0, modelBuf.Subgraphs(0).Tensors(tensor_index).ShapeLength()):
376
- tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensor_index).Shape(
377
- i
378
- )
379
- return tensorSize
380
-
381
- def get_input_details(self, model_index: int = 0) -> List[Dict[str, Any]]:
382
- """! Reads the input tensor details from the model.
383
- @param model_index The model to target, for interpreters that support multiple models
384
- running concurrently. Defaults to 0 for use with a single model.
385
- @return Tensor details, including the index, name, shape, data type, and quantization
386
- parameters.
387
- """
388
-
389
- # Select correct model from model list
390
- modelBuf = None
391
- model = self.get_model(model_index)
392
- modelBuf = Model.GetRootAsModel(model.model_content, 0)
393
-
394
- inputsList = []
395
- for input_ in range(0, modelBuf.Subgraphs(0).InputsLength()):
396
- tensorIndex = modelBuf.Subgraphs(0).Inputs(input_)
397
-
398
- # Generate dictioary of tensor details
399
- dtype: Union[Type[Any]]
400
- if modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT8:
401
- dtype = np.int8
402
- elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT16:
403
- dtype = np.int16
404
- elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT32:
405
- dtype = np.int32
406
- elif (
407
- modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32
408
- ):
409
- dtype = np.float32
410
- else:
411
- raise TypeError
412
-
413
- details = {
414
- "name": str(modelBuf.Subgraphs(0).Tensors(tensorIndex).Name())[
415
- 1:
416
- ].strip("'"),
417
- "index": tensorIndex,
418
- "shape": modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeAsNumpy(),
419
- "shape_signature": modelBuf.Subgraphs(0)
420
- .Tensors(tensorIndex)
421
- .ShapeSignatureAsNumpy(),
422
- "dtype": dtype,
423
- "quantization": (
424
- modelBuf.Subgraphs(0).Tensors(tensorIndex).Quantization().Scale(0),
425
- modelBuf.Subgraphs(0)
426
- .Tensors(tensorIndex)
427
- .Quantization()
428
- .ZeroPoint(0),
429
- ),
430
- "quantization_parameters": {
431
- "scales": modelBuf.Subgraphs(0)
432
- .Tensors(tensorIndex)
433
- .Quantization()
434
- .ScaleAsNumpy(),
435
- "zero_points": modelBuf.Subgraphs(0)
436
- .Tensors(tensorIndex)
437
- .Quantization()
438
- .ZeroPointAsNumpy(),
439
- "quantized_dimension": modelBuf.Subgraphs(0)
440
- .Tensors(tensorIndex)
441
- .Quantization()
442
- .QuantizedDimension(),
443
- },
444
- "sparsity_parameters": {
445
- modelBuf.Subgraphs(0).Tensors(tensorIndex).Sparsity()
446
- },
447
- }
448
- inputsList.append(details)
449
-
450
- return inputsList
451
-
452
- def get_output_details(self, model_index: int = 0) -> List[Dict[str, Any]]:
453
- """! Reads the output tensor details from the model.
454
- @param output_index The index of output tensor to target.
455
- @param model_index The model to target, for interpreters that support multiple models
456
- running concurrently. Defaults to 0 for use with a single model.
457
- @return Tensor details, including the index, name, shape, data type, and quantization
458
- parameters.
459
- """
460
-
461
- # Select correct model from models list
462
- model = self.get_model(model_index)
463
- modelBuf = Model.GetRootAsModel(model.model_content, 0)
464
-
465
- outputsList = []
466
- for output_ in range(0, modelBuf.Subgraphs(0).OutputsLength()):
467
- # Output tensor is last tensor
468
- tensorIndex = modelBuf.Subgraphs(0).Outputs(output_)
469
-
470
- dtype: Union[Type[Any]]
471
- # Generate dictionary of tensor details
472
- if modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT8:
473
- dtype = np.int8
474
- elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT16:
475
- dtype = np.int16
476
- elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT32:
477
- dtype = np.int32
478
- elif (
479
- modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32
480
- ):
481
- dtype = np.float32
482
-
483
- details = {
484
- "name": str(modelBuf.Subgraphs(0).Tensors(tensorIndex).Name())[
485
- 1:
486
- ].strip("'"),
487
- "index": tensorIndex,
488
- "shape": modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeAsNumpy(),
489
- "shape_signature": modelBuf.Subgraphs(0)
490
- .Tensors(tensorIndex)
491
- .ShapeSignatureAsNumpy(),
492
- "dtype": dtype,
493
- "quantization": (
494
- modelBuf.Subgraphs(0).Tensors(tensorIndex).Quantization().Scale(0),
495
- modelBuf.Subgraphs(0)
496
- .Tensors(tensorIndex)
497
- .Quantization()
498
- .ZeroPoint(0),
499
- ),
500
- "quantization_parameters": {
501
- "scales": modelBuf.Subgraphs(0)
502
- .Tensors(tensorIndex)
503
- .Quantization()
504
- .ScaleAsNumpy(),
505
- "zero_points": modelBuf.Subgraphs(0)
506
- .Tensors(tensorIndex)
507
- .Quantization()
508
- .ZeroPointAsNumpy(),
509
- "quantized_dimension": modelBuf.Subgraphs(0)
510
- .Tensors(tensorIndex)
511
- .Quantization()
512
- .QuantizedDimension(),
513
- },
514
- "sparsity_parameters": {
515
- modelBuf.Subgraphs(0).Tensors(tensorIndex).Sparsity()
516
- },
517
- }
518
- outputsList.append(details)
519
-
520
- return outputsList
521
-
522
- def set_model(
523
- self,
524
- model_path: Optional[str] = None,
525
- model_content: Optional[bytes] = None,
526
- params_path: Optional[str] = None,
527
- params_content: Optional[bytes] = None,
528
- model_index: int = 0,
529
- secondary_memory: bool = False,
530
- flash: bool = False,
531
- ) -> None:
532
- """! Adds a model to the interpreter's list of models.
533
- @param model_path The path to the model file (.tflite), alternative to model_content.
534
- @param model_content The byte array representing a model, alternative to model_path.
535
- @param params_path The path to the params file for the model,
536
- alternative to params_content (optional).
537
- @param params_content The byte array representing the model parameters,
538
- alternative to params_path (optional).
539
- @param model_index The model to target, for interpreters that support multiple models
540
- running concurrently. Defaults to 0 for use with a single model.
541
- """
542
-
543
- # Check model_path or model_content is valid
544
- if not model_path and not model_content:
545
- raise ValueError("model_path or model_content must be provided")
546
- tile_found = False
547
- # Find correct model and replace
548
- for model in self.models:
549
- if model.tile == model_index:
550
- model = self.modelData(
551
- model_path,
552
- model_content,
553
- params_path,
554
- params_content,
555
- model_index,
556
- secondary_memory,
557
- flash,
558
- )
559
- tile_found = True
560
- break
561
- # If model wasn't previously set, add it to list
562
- if not tile_found:
563
- self.models.append(
564
- self.modelData(
565
- model_path,
566
- model_content,
567
- params_path,
568
- params_content,
569
- model_index,
570
- secondary_memory,
571
- flash,
572
- )
573
- )
574
- self.initialise_interpreter(model_index)
575
-
576
- def get_model(self, model_index: int = 0):
577
- for model in self.models:
578
- if model.tile == model_index:
579
- return model
580
-
581
- class modelData:
582
- """! The model data class
583
- A class that holds a model and data associated with a single model.
584
- """
585
-
586
- def __init__(
587
- self,
588
- model_path: Optional[str],
589
- model_content: Optional[bytes],
590
- params_path: Optional[str],
591
- params_content: Optional[bytes],
592
- model_index: int,
593
- secondary_memory: bool,
594
- flash: bool,
595
- ):
596
- """! Model data initializer.
597
- Sets up variables, generates a list of operators used in the model,
598
- and reads model and params paths into byte arrays (content).
599
- @param model_path Path to the model file (.tflite).
600
- @param model_content Model model_content (byte array).
601
- @param params_path Path to model parameters file.
602
- @param params_content Model parameters content (byte array)
603
- @param model_index The model to target, for interpreters that support multiple models
604
- running concurrently. Defaults to 0 for use with a single model.
605
- """
606
- self.model_path: Optional[str] = model_path
607
- self.model_content: Optional[bytes] = model_content
608
- self.params_path: Optional[str] = params_path
609
- self.params_content: Optional[bytes] = params_content
610
- self.tile: int = model_index
611
- self.secondary_memory = secondary_memory
612
- self.flash = flash
613
- self.opList: List[str] = []
614
- self.pathToContent()
615
- self.modelToOpList()
616
-
617
- def modelToOpList(self) -> None:
618
- """! Generates operator list from model."""
619
-
620
- # Load model
621
- buffer = self.model_content
622
- model = Model.GetRootAsModel(buffer, 0)
623
- self.opList = []
624
-
625
- # Iterate through operators in model and add operators to opList
626
- for y in range(0, model.Subgraphs(0).OperatorsLength()):
627
- opcode = model.OperatorCodes(
628
- model.Subgraphs(0).Operators(y).OpcodeIndex()
629
- )
630
- # If custom opcode parse string
631
- if opcode.BuiltinCode() == 32:
632
- self.opList.append(str(opcode.CustomCode()).strip("b'"))
633
- # If built in op code, decode
634
- else:
635
- self.opList.append(opcode2name(opcode.BuiltinCode()))
636
-
637
- def pathToContent(self) -> None:
638
- """! Reads model and params paths to content (byte arrays)"""
639
-
640
- # Check if path exists but not content
641
- if self.model_content is None and self.model_path is not None:
642
- with open(self.model_path, "rb") as input_fd:
643
- self.model_content = input_fd.read()
644
-
645
- # Check if params_path exists but not params_content
646
- if self.params_content is None and self.params_path is not None:
647
- with open(self.params_path, "rb") as input_fd2:
648
- self.params_content = input_fd2.read()
649
-
650
- # If params_content is None, set to empty byte array
651
- if self.params_content is None:
652
- self.params_content = bytes([])
1
+ # Copyright 2022 XMOS LIMITED. This Software is subject to the terms of the
2
+ # XMOS Public License: Version 1
3
+ import sys
4
+ import ctypes
5
+ from typing import Optional, Dict, Any, List
6
+ from tflite.Model import Model
7
+ from tflite.TensorType import TensorType
8
+ from tflite import opcode2name
9
+ from enum import Enum
10
+
11
+ import numpy as np
12
+ from pathlib import Path
13
+
14
+ from numpy import ndarray
15
+
16
+ # DLL path for different platforms
17
+ __PARENT_DIR = Path(__file__).parent.absolute()
18
+ if sys.platform.startswith("linux"):
19
+ lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "linux", "xtflm_python.so"))
20
+ elif sys.platform == "darwin":
21
+ lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "macos", "xtflm_python.dylib"))
22
+ else:
23
+ lib_path = str(Path.joinpath(__PARENT_DIR, "libs", "windows", "xtflm_python.dll"))
24
+
25
+ lib = ctypes.cdll.LoadLibrary(lib_path)
26
+
27
+ from xmos_ai_tools.xinterpreters.exceptions import (
28
+ InterpreterError,
29
+ AllocateTensorsError,
30
+ InvokeError,
31
+ SetTensorError,
32
+ GetTensorError,
33
+ ModelSizeError,
34
+ ArenaSizeError,
35
+ DeviceTimeoutError,
36
+ )
37
+
38
+ MAX_TENSOR_ARENA_SIZE = 10000000
39
+
40
+
41
+ class XTFLMInterpreterStatus(Enum):
42
+ OK = 0
43
+ ERROR = 1
44
+
45
+
46
+ class TFLMHostInterpreter:
47
+ """! The xcore interpreters host class.
48
+ The interpreter to be used on a host.
49
+ """
50
+
51
+ def __init__(self, max_tensor_arena_size: int = MAX_TENSOR_ARENA_SIZE) -> None:
52
+ """! Host interpreter initializer.
53
+ Sets up functions from the cdll, and calls to cdll function to create a new interpreter.
54
+ """
55
+ self._error_msg = ctypes.create_string_buffer(4096)
56
+
57
+ lib.new_interpreter.restype = ctypes.c_void_p
58
+ lib.new_interpreter.argtypes = [
59
+ ctypes.c_size_t,
60
+ ]
61
+
62
+ lib.print_memory_plan.restype = None
63
+ lib.print_memory_plan.argtypes = [ctypes.c_void_p]
64
+
65
+ lib.delete_interpreter.restype = None
66
+ lib.delete_interpreter.argtypes = [ctypes.c_void_p]
67
+
68
+ lib.initialize.restype = ctypes.c_int
69
+ lib.initialize.argtypes = [
70
+ ctypes.c_void_p,
71
+ ctypes.c_char_p,
72
+ ctypes.c_size_t,
73
+ ctypes.c_char_p,
74
+ ]
75
+
76
+ lib.set_input_tensor.restype = ctypes.c_int
77
+ lib.set_input_tensor.argtypes = [
78
+ ctypes.c_void_p,
79
+ ctypes.c_size_t,
80
+ ctypes.c_void_p,
81
+ ctypes.c_int,
82
+ ]
83
+
84
+ lib.get_output_tensor.restype = ctypes.c_int
85
+ lib.get_output_tensor.argtypes = [
86
+ ctypes.c_void_p,
87
+ ctypes.c_size_t,
88
+ ctypes.c_void_p,
89
+ ctypes.c_int,
90
+ ]
91
+
92
+ lib.get_input_tensor.restype = ctypes.c_int
93
+ lib.get_input_tensor.argtypes = [
94
+ ctypes.c_void_p,
95
+ ctypes.c_size_t,
96
+ ctypes.c_void_p,
97
+ ctypes.c_int,
98
+ ]
99
+
100
+ lib.reset.restype = ctypes.c_int
101
+ lib.reset.argtypes = [ctypes.c_void_p]
102
+
103
+ lib.invoke.restype = ctypes.c_int
104
+ lib.invoke.argtypes = [ctypes.c_void_p]
105
+
106
+ lib.get_error.restype = ctypes.c_size_t
107
+ lib.get_error.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
108
+
109
+ lib.arena_used_bytes.restype = ctypes.c_size_t
110
+ lib.arena_used_bytes.argtypes = [
111
+ ctypes.c_void_p,
112
+ ]
113
+
114
+ self._max_tensor_arena_size = max_tensor_arena_size
115
+ self.models: List[TFLMHostInterpreter.modelData] = []
116
+
117
+ def __enter__(self) -> "TFLMHostInterpreter":
118
+ return self
119
+
120
+ def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
121
+ """! Exit calls close function to delete interpreter"""
122
+ self.close()
123
+
124
+ def initialise_interpreter(self, model_index: int = 0) -> None:
125
+ """! Interpreter initialiser, initialised interpreter with model and parameters (optional)
126
+ @param model_index The model to target, for interpreters that support multiple models
127
+ running concurrently. Defaults to 0 for use with a single model.
128
+ """
129
+ max_model_size = 50000000
130
+ self.obj = lib.new_interpreter(max_model_size)
131
+ currentModel = None
132
+
133
+ for model in self.models:
134
+ if model.tile == model_index:
135
+ currentModel = model
136
+
137
+ if currentModel is None:
138
+ print(f"No model at index {model_index} found.", sys.stderr)
139
+ raise IndexError
140
+
141
+ assert currentModel.model_content is not None
142
+
143
+ status = lib.initialize(
144
+ self.obj,
145
+ currentModel.model_content,
146
+ len(currentModel.model_content),
147
+ currentModel.params_content,
148
+ )
149
+ if XTFLMInterpreterStatus(status) is XTFLMInterpreterStatus.ERROR:
150
+ raise RuntimeError("Unable to initialize interpreter")
151
+
152
+ def set_tensor(self, tensor_index: int, value: ndarray, model_index=0) -> None:
153
+ """! Write the input tensor of a model.
154
+ @param value The blob of data to set the tensor to.
155
+ @param tensor_index The index of input tensor to target. Defaults to 0.
156
+ @param model_index The model to target, for interpreters that support multiple models
157
+ running concurrently. Defaults to 0 for use with a single model.
158
+ """
159
+ val = value.tobytes()
160
+
161
+ length = len(val)
162
+ length2 = self.get_input_tensor_size(tensor_index)
163
+ if length != length2:
164
+ print(
165
+ "ERROR: mismatching size in set_input_tensor %d vs %d"
166
+ % (length, length2)
167
+ )
168
+
169
+ self._check_status(lib.set_input_tensor(self.obj, tensor_index, val, length))
170
+
171
+ def get_tensor(
172
+ self, tensor_index: int = 0, model_index: int = 0, tensor: ndarray = None
173
+ ) -> ndarray:
174
+ """! Read data from the output tensor of a model.
175
+ @param tensor_index The index of output tensor to target.
176
+ @param model_index The model to target, for interpreters that support multiple models
177
+ running concurrently. Defaults to 0 for use with a single model.
178
+ @param tensor Tensor of correct shape to write into (optional).
179
+ @return The data that was stored in the output tensor.
180
+ """
181
+
182
+ count: Optional[int]
183
+ tensor_details: Optional[Dict[str, Any]]
184
+ count, tensor_details = next(
185
+ filter(
186
+ lambda x: x[1]["index"] == tensor_index,
187
+ enumerate(self.get_output_details()),
188
+ ),
189
+ (None, None),
190
+ )
191
+
192
+ if count is None or tensor_details is None:
193
+ print(f"No tensor at index {tensor_index} found.", sys.stderr)
194
+ raise IndexError
195
+
196
+ length = self.get_tensor_size(tensor_index)
197
+ if tensor is None:
198
+ tensor = np.zeros(tensor_details["shape"], dtype=tensor_details["dtype"])
199
+ else:
200
+ length = len(tensor.tobytes())
201
+ if length != length:
202
+ print(
203
+ "ERROR: mismatching size in get_output_tensor %d vs %d"
204
+ % (length, length)
205
+ )
206
+
207
+ data_ptr = tensor.ctypes.data_as(ctypes.c_void_p)
208
+ self._check_status(lib.get_output_tensor(self.obj, count, data_ptr, length))
209
+ return tensor
210
+
211
+ def get_input_tensor(self, input_index: int = 0, model_index: int = 0) -> ndarray:
212
+ """! Read the data in the input tensor of a model.
213
+ @param input_index The index of input tensor to target.
214
+ @param model_index The engine to target, for interpreters that support multiple models
215
+ running concurrently. Defaults to 0 for use with a single model.
216
+ @return The data that was stored in the output tensor.
217
+ """
218
+ tensor_details = self.get_input_details(model_index)[input_index]
219
+ tensor = np.zeros(tensor_details["shape"], dtype=tensor_details["dtype"])
220
+ data_ptr = tensor.ctypes.data_as(ctypes.c_void_p)
221
+
222
+ l = len(tensor.tobytes())
223
+ self._check_status(lib.get_input_tensor(self.obj, input_index, data_ptr, l))
224
+ return tensor
225
+
226
+ def reset(self, model_index: int = 0) -> None:
227
+ """! Resets the model."""
228
+ self._check_status(lib.reset(self.obj))
229
+
230
+ def invoke(self, model_index: int = 0) -> None:
231
+ """! Invoke the model and starting inference of the current
232
+ state of the tensors.
233
+ """
234
+ INVOKE_CALLBACK_FUNC = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_int)
235
+
236
+ self._check_status(lib.invoke(self.obj))
237
+
238
+ def close(self, model_index: int = 0) -> None:
239
+ """! Delete the interpreter.
240
+ @params model_index Defines which interpreter to target in systems with multiple.
241
+ """
242
+ if self.obj:
243
+ lib.delete_interpreter(self.obj)
244
+ self.obj = None
245
+
246
+ def tensor_arena_size(self) -> int:
247
+ """! Read the size of the tensor arena required.
248
+ @return size of the tensor arena as an integer.
249
+ """
250
+ return lib.arena_used_bytes(self.obj)
251
+
252
+ def _check_status(self, status) -> None:
253
+ """! Read a status code and raise an exception.
254
+ @param status Status code.
255
+ """
256
+ if XTFLMInterpreterStatus(status) is XTFLMInterpreterStatus.ERROR:
257
+ lib.get_error(self.obj, self._error_msg)
258
+ raise RuntimeError(self._error_msg.value.decode("utf-8"))
259
+
260
+ def print_memory_plan(self) -> None:
261
+ """! Print a plan of memory allocation"""
262
+ lib.print_memory_plan(self.obj)
263
+
264
+ def allocate_tensors(self):
265
+ """! Dummy function to match tf.lite.Interpreter() API"""
266
+ return
267
+
268
+ def get_input_tensor_size(self, input_index: int = 0, model_index: int = 0) -> int:
269
+ """! Read the size of the input tensor from the model.
270
+ @param input_index The index of input tensor to target.
271
+ @param model_index The model to target, for interpreters that support multiple models
272
+ running concurrently. Defaults to 0 for use with a single model.
273
+ @return The size of the input tensor as an integer.
274
+ """
275
+
276
+ # Select correct model from model list
277
+ model = self.get_model(model_index)
278
+ modelBuf = Model.GetRootAsModel(model.model_content, 0)
279
+
280
+ # Get index of specific input tensor
281
+ tensorIndex = modelBuf.Subgraphs(0).Inputs(input_index)
282
+
283
+ tensorType = modelBuf.Subgraphs(0).Tensors(tensorIndex).Type()
284
+
285
+ tensorSize: int
286
+ if tensorType == TensorType.INT8:
287
+ tensorSize = 1 # int8 is 1 byte
288
+ elif tensorType == TensorType.INT16:
289
+ tensorSize = 2 # int16 is 2 bytes
290
+ elif tensorType == TensorType.INT32:
291
+ tensorSize = 4 # int32 is 4 bytes
292
+ elif tensorType == TensorType.FLOAT32:
293
+ tensorSize = 4 # float32 is 4 bytes
294
+ else:
295
+ print(tensorType)
296
+ self._check_status(XTFLMInterpreterStatus.ERROR)
297
+ tensorSize = 0
298
+
299
+ # Calculate tensor size by multiplying shape elements
300
+ for i in range(0, modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeLength()):
301
+ tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensorIndex).Shape(
302
+ i
303
+ )
304
+ return tensorSize
305
+
306
+ def get_output_tensor_size(
307
+ self, output_index: int = 0, model_index: int = 0
308
+ ) -> int:
309
+ """! Read the size of the output tensor from the model.
310
+ @param output_index The index of output tensor to target.
311
+ @param model_index The model to target, for interpreters that support multiple models
312
+ running concurrently. Defaults to 0 for use with a single model.
313
+ @return The size of the output tensor as an integer.
314
+ """
315
+
316
+ # Select correct model from model list
317
+ modelBuf = None
318
+ model = self.get_model(model_index)
319
+ modelBuf = Model.GetRootAsModel(model.model_content, 0)
320
+
321
+ # Get index of specific output tensor
322
+ tensorIndex = modelBuf.Subgraphs(0).Outputs(output_index)
323
+
324
+ tensorType = modelBuf.Subgraphs(0).Tensors(tensorIndex).Type()
325
+
326
+ tensorSize: int
327
+ if tensorType == TensorType.INT8:
328
+ tensorSize = 1 # int8 is 1 byte
329
+ elif tensorType == TensorType.INT16:
330
+ tensorSize = 2 # int16 is 2 bytes
331
+ elif tensorType == TensorType.INT32:
332
+ tensorSize = 4 # int32 is 4 bytes
333
+ elif tensorType == TensorType.FLOAT32:
334
+ tensorSize = 4 # float32 is 4 bytes
335
+ else:
336
+ print(tensorType)
337
+ self._check_status(XTFLMInterpreterStatus.ERROR)
338
+ tensorSize = 0
339
+
340
+ # Calculate tensor size by multiplying shape elements
341
+ for i in range(0, modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeLength()):
342
+ tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensorIndex).Shape(
343
+ i
344
+ )
345
+ return tensorSize
346
+
347
+ def get_tensor_size(self, tensor_index: int = 0, model_index: int = 0) -> int:
348
+ """! Read the size of the input tensor from the model.
349
+ @param tensor_index The index of input tensor to target.
350
+ @param model_index The model to target, for interpreters that support multiple models
351
+ running concurrently. Defaults to 0 for use with a single model.
352
+ @return The size of the input tensor as an integer.
353
+ """
354
+
355
+ # Select correct model from model list
356
+ modelBuf = None
357
+ model = self.get_model(model_index)
358
+ modelBuf = Model.GetRootAsModel(model.model_content, 0)
359
+
360
+ tensorType = modelBuf.Subgraphs(0).Tensors(tensor_index).Type()
361
+ if tensorType == TensorType.INT8:
362
+ tensorSize = 1 # int8 is 1 byte
363
+ elif tensorType == TensorType.INT16:
364
+ tensorSize = 2 # int16 is 2 bytes
365
+ elif tensorType == TensorType.INT32:
366
+ tensorSize = 4 # int32 is 4 bytes
367
+ elif tensorType == TensorType.FLOAT32:
368
+ tensorSize = 4 # float32 is 4 bytes
369
+ else:
370
+ print(tensorType)
371
+ self._check_status(XTFLMInterpreterStatus.ERROR)
372
+
373
+ # Calculate tensor size by multiplying shape elements
374
+ for i in range(0, modelBuf.Subgraphs(0).Tensors(tensor_index).ShapeLength()):
375
+ tensorSize = tensorSize * modelBuf.Subgraphs(0).Tensors(tensor_index).Shape(
376
+ i
377
+ )
378
+ return tensorSize
379
+
380
+ def get_input_details(self, model_index: int = 0) -> List[Dict[str, Any]]:
381
+ """! Reads the input tensor details from the model.
382
+ @param model_index The model to target, for interpreters that support multiple models
383
+ running concurrently. Defaults to 0 for use with a single model.
384
+ @return Tensor details, including the index, name, shape, data type, and quantization
385
+ parameters.
386
+ """
387
+
388
+ # Select correct model from model list
389
+ modelBuf = None
390
+ model = self.get_model(model_index)
391
+ modelBuf = Model.GetRootAsModel(model.model_content, 0)
392
+
393
+ inputsList = []
394
+ for input_ in range(0, modelBuf.Subgraphs(0).InputsLength()):
395
+ tensorIndex = modelBuf.Subgraphs(0).Inputs(input_)
396
+
397
+ # Generate dictioary of tensor details
398
+ dtype: Union[Type[Any]]
399
+ if modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT8:
400
+ dtype = np.int8
401
+ elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT16:
402
+ dtype = np.int16
403
+ elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT32:
404
+ dtype = np.int32
405
+ elif (
406
+ modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32
407
+ ):
408
+ dtype = np.float32
409
+ else:
410
+ raise TypeError
411
+
412
+ details = {
413
+ "name": str(modelBuf.Subgraphs(0).Tensors(tensorIndex).Name())[
414
+ 1:
415
+ ].strip("'"),
416
+ "index": tensorIndex,
417
+ "shape": modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeAsNumpy(),
418
+ "shape_signature": modelBuf.Subgraphs(0)
419
+ .Tensors(tensorIndex)
420
+ .ShapeSignatureAsNumpy(),
421
+ "dtype": dtype,
422
+ "quantization": (
423
+ modelBuf.Subgraphs(0).Tensors(tensorIndex).Quantization().Scale(0),
424
+ modelBuf.Subgraphs(0)
425
+ .Tensors(tensorIndex)
426
+ .Quantization()
427
+ .ZeroPoint(0),
428
+ ),
429
+ "quantization_parameters": {
430
+ "scales": modelBuf.Subgraphs(0)
431
+ .Tensors(tensorIndex)
432
+ .Quantization()
433
+ .ScaleAsNumpy(),
434
+ "zero_points": modelBuf.Subgraphs(0)
435
+ .Tensors(tensorIndex)
436
+ .Quantization()
437
+ .ZeroPointAsNumpy(),
438
+ "quantized_dimension": modelBuf.Subgraphs(0)
439
+ .Tensors(tensorIndex)
440
+ .Quantization()
441
+ .QuantizedDimension(),
442
+ },
443
+ "sparsity_parameters": {
444
+ modelBuf.Subgraphs(0).Tensors(tensorIndex).Sparsity()
445
+ },
446
+ }
447
+ inputsList.append(details)
448
+
449
+ return inputsList
450
+
451
+ def get_output_details(self, model_index: int = 0) -> List[Dict[str, Any]]:
452
+ """! Reads the output tensor details from the model.
453
+ @param output_index The index of output tensor to target.
454
+ @param model_index The model to target, for interpreters that support multiple models
455
+ running concurrently. Defaults to 0 for use with a single model.
456
+ @return Tensor details, including the index, name, shape, data type, and quantization
457
+ parameters.
458
+ """
459
+
460
+ # Select correct model from models list
461
+ model = self.get_model(model_index)
462
+ modelBuf = Model.GetRootAsModel(model.model_content, 0)
463
+
464
+ outputsList = []
465
+ for output_ in range(0, modelBuf.Subgraphs(0).OutputsLength()):
466
+ # Output tensor is last tensor
467
+ tensorIndex = modelBuf.Subgraphs(0).Outputs(output_)
468
+
469
+ dtype: Union[Type[Any]]
470
+ # Generate dictionary of tensor details
471
+ if modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT8:
472
+ dtype = np.int8
473
+ elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT16:
474
+ dtype = np.int16
475
+ elif modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.INT32:
476
+ dtype = np.int32
477
+ elif (
478
+ modelBuf.Subgraphs(0).Tensors(tensorIndex).Type() == TensorType.FLOAT32
479
+ ):
480
+ dtype = np.float32
481
+
482
+ details = {
483
+ "name": str(modelBuf.Subgraphs(0).Tensors(tensorIndex).Name())[
484
+ 1:
485
+ ].strip("'"),
486
+ "index": tensorIndex,
487
+ "shape": modelBuf.Subgraphs(0).Tensors(tensorIndex).ShapeAsNumpy(),
488
+ "shape_signature": modelBuf.Subgraphs(0)
489
+ .Tensors(tensorIndex)
490
+ .ShapeSignatureAsNumpy(),
491
+ "dtype": dtype,
492
+ "quantization": (
493
+ modelBuf.Subgraphs(0).Tensors(tensorIndex).Quantization().Scale(0),
494
+ modelBuf.Subgraphs(0)
495
+ .Tensors(tensorIndex)
496
+ .Quantization()
497
+ .ZeroPoint(0),
498
+ ),
499
+ "quantization_parameters": {
500
+ "scales": modelBuf.Subgraphs(0)
501
+ .Tensors(tensorIndex)
502
+ .Quantization()
503
+ .ScaleAsNumpy(),
504
+ "zero_points": modelBuf.Subgraphs(0)
505
+ .Tensors(tensorIndex)
506
+ .Quantization()
507
+ .ZeroPointAsNumpy(),
508
+ "quantized_dimension": modelBuf.Subgraphs(0)
509
+ .Tensors(tensorIndex)
510
+ .Quantization()
511
+ .QuantizedDimension(),
512
+ },
513
+ "sparsity_parameters": {
514
+ modelBuf.Subgraphs(0).Tensors(tensorIndex).Sparsity()
515
+ },
516
+ }
517
+ outputsList.append(details)
518
+
519
+ return outputsList
520
+
521
+ def set_model(
522
+ self,
523
+ model_path: Optional[str] = None,
524
+ model_content: Optional[bytes] = None,
525
+ params_path: Optional[str] = None,
526
+ params_content: Optional[bytes] = None,
527
+ model_index: int = 0,
528
+ secondary_memory: bool = False,
529
+ flash: bool = False,
530
+ ) -> None:
531
+ """! Adds a model to the interpreter's list of models.
532
+ @param model_path The path to the model file (.tflite), alternative to model_content.
533
+ @param model_content The byte array representing a model, alternative to model_path.
534
+ @param params_path The path to the params file for the model,
535
+ alternative to params_content (optional).
536
+ @param params_content The byte array representing the model parameters,
537
+ alternative to params_path (optional).
538
+ @param model_index The model to target, for interpreters that support multiple models
539
+ running concurrently. Defaults to 0 for use with a single model.
540
+ """
541
+
542
+ # Check model_path or model_content is valid
543
+ if not model_path and not model_content:
544
+ raise ValueError("model_path or model_content must be provided")
545
+ tile_found = False
546
+ # Find correct model and replace
547
+ for model in self.models:
548
+ if model.tile == model_index:
549
+ model = self.modelData(
550
+ model_path,
551
+ model_content,
552
+ params_path,
553
+ params_content,
554
+ model_index,
555
+ secondary_memory,
556
+ flash,
557
+ )
558
+ tile_found = True
559
+ break
560
+ # If model wasn't previously set, add it to list
561
+ if not tile_found:
562
+ self.models.append(
563
+ self.modelData(
564
+ model_path,
565
+ model_content,
566
+ params_path,
567
+ params_content,
568
+ model_index,
569
+ secondary_memory,
570
+ flash,
571
+ )
572
+ )
573
+ self.initialise_interpreter(model_index)
574
+
575
+ def get_model(self, model_index: int = 0):
576
+ for model in self.models:
577
+ if model.tile == model_index:
578
+ return model
579
+
580
+ class modelData:
581
+ """! The model data class
582
+ A class that holds a model and data associated with a single model.
583
+ """
584
+
585
+ def __init__(
586
+ self,
587
+ model_path: Optional[str],
588
+ model_content: Optional[bytes],
589
+ params_path: Optional[str],
590
+ params_content: Optional[bytes],
591
+ model_index: int,
592
+ secondary_memory: bool,
593
+ flash: bool,
594
+ ):
595
+ """! Model data initializer.
596
+ Sets up variables, generates a list of operators used in the model,
597
+ and reads model and params paths into byte arrays (content).
598
+ @param model_path Path to the model file (.tflite).
599
+ @param model_content Model model_content (byte array).
600
+ @param params_path Path to model parameters file.
601
+ @param params_content Model parameters content (byte array)
602
+ @param model_index The model to target, for interpreters that support multiple models
603
+ running concurrently. Defaults to 0 for use with a single model.
604
+ """
605
+ self.model_path: Optional[str] = model_path
606
+ self.model_content: Optional[bytes] = model_content
607
+ self.params_path: Optional[str] = params_path
608
+ self.params_content: Optional[bytes] = params_content
609
+ self.tile: int = model_index
610
+ self.secondary_memory = secondary_memory
611
+ self.flash = flash
612
+ self.opList: List[str] = []
613
+ self.pathToContent()
614
+ self.modelToOpList()
615
+
616
+ def modelToOpList(self) -> None:
617
+ """! Generates operator list from model."""
618
+
619
+ # Load model
620
+ buffer = self.model_content
621
+ model = Model.GetRootAsModel(buffer, 0)
622
+ self.opList = []
623
+
624
+ # Iterate through operators in model and add operators to opList
625
+ for y in range(0, model.Subgraphs(0).OperatorsLength()):
626
+ opcode = model.OperatorCodes(
627
+ model.Subgraphs(0).Operators(y).OpcodeIndex()
628
+ )
629
+ # If custom opcode parse string
630
+ if opcode.BuiltinCode() == 32:
631
+ self.opList.append(str(opcode.CustomCode()).strip("b'"))
632
+ # If built in op code, decode
633
+ else:
634
+ self.opList.append(opcode2name(opcode.BuiltinCode()))
635
+
636
+ def pathToContent(self) -> None:
637
+ """! Reads model and params paths to content (byte arrays)"""
638
+
639
+ # Check if path exists but not content
640
+ if self.model_content is None and self.model_path is not None:
641
+ with open(self.model_path, "rb") as input_fd:
642
+ self.model_content = input_fd.read()
643
+
644
+ # Check if params_path exists but not params_content
645
+ if self.params_content is None and self.params_path is not None:
646
+ with open(self.params_path, "rb") as input_fd2:
647
+ self.params_content = input_fd2.read()
648
+
649
+ # If params_content is None, set to empty byte array
650
+ if self.params_content is None:
651
+ self.params_content = bytes([])