xmos-ai-tools 1.2.1.dev24__py3-none-win_amd64.whl → 1.3.2.dev180__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. xmos_ai_tools/__init__.py +7 -7
  2. xmos_ai_tools/io_server/__init__.py +151 -151
  3. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -13
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -8
  5. xmos_ai_tools/runtime/include/flash_server.h +2 -3
  6. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +11 -51
  7. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +3 -0
  8. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +1 -0
  9. xmos_ai_tools/runtime/include/lib_nn/api/version.h +2 -2
  10. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  11. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  12. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +6 -0
  13. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +13 -13
  14. xmos_ai_tools/runtime/include/lib_tflite_micro/api/load_weights.h +64 -0
  15. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +1 -1
  16. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +1 -1
  17. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +2 -2
  18. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +3 -3
  19. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +8 -8
  20. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +21 -7
  21. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +4 -4
  22. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +5 -5
  23. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +3 -3
  24. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +4 -4
  25. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +2 -2
  26. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +3 -3
  27. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +2 -2
  28. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +2 -2
  29. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +3 -3
  30. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +17 -17
  31. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +2 -2
  32. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +2 -2
  33. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +2 -2
  34. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +2 -2
  35. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +2 -2
  36. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +2 -2
  37. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +2 -2
  38. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +3 -3
  39. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +2 -2
  40. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +2 -2
  41. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +2 -2
  42. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +2 -2
  43. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +2 -2
  44. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +2 -2
  45. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +2 -2
  46. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +2 -2
  47. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +2 -2
  48. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +3 -3
  49. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +2 -2
  50. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +2 -2
  51. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +3 -3
  52. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +1 -1
  53. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +1 -1
  54. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +4 -4
  55. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +2 -2
  56. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +2 -2
  57. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +2 -2
  58. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +2 -2
  59. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +2 -2
  60. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +2 -2
  61. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +2 -2
  62. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +2 -2
  63. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +2 -2
  64. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +2 -2
  65. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +2 -2
  66. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +2 -2
  67. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +2 -2
  68. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +2 -2
  69. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +2 -2
  70. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +2 -2
  71. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +2 -2
  72. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +2 -2
  73. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +2 -2
  74. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +4 -4
  75. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +3 -3
  76. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +2 -2
  77. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +2 -2
  78. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +5 -5
  79. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +2 -2
  80. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +2 -2
  81. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +2 -2
  82. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +8 -8
  83. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +2 -2
  84. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +2 -2
  85. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +2 -2
  86. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +2 -2
  87. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +3 -3
  88. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +4 -4
  89. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +3 -3
  90. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +2 -2
  91. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +4 -4
  92. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +3 -3
  93. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +2 -2
  94. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +2 -2
  95. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +5 -5
  96. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +2 -2
  97. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +2 -2
  98. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +3 -3
  99. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +6 -6
  100. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +2 -2
  101. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +2 -2
  102. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +2 -2
  103. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +2 -2
  104. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +2 -2
  105. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +9 -9
  106. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +2 -2
  107. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +2 -2
  108. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +2 -2
  109. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +2 -2
  110. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +2 -2
  111. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +2 -2
  112. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +2 -2
  113. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +2 -2
  114. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +2 -2
  115. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +7 -7
  116. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +2 -2
  117. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +2 -2
  118. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +2 -2
  119. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +6 -2
  120. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +2 -2
  121. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +2 -2
  122. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +2 -2
  123. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +2 -2
  124. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +4 -4
  125. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +2 -2
  126. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +2 -2
  127. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +6 -6
  128. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +2 -2
  129. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +2 -2
  130. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +2 -2
  131. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +2 -2
  132. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +3 -3
  133. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +2 -2
  134. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +2 -2
  135. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +2 -2
  136. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +2 -2
  137. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +2 -2
  138. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +2 -2
  139. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +2 -2
  140. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +2 -2
  141. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +48 -48
  142. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +57 -57
  143. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +2 -2
  144. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +2 -2
  145. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +2 -2
  146. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +2 -2
  147. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +2 -2
  148. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +15 -15
  149. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +2 -2
  150. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +3 -3
  151. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +2 -2
  152. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +2 -2
  153. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +2 -2
  154. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +2 -2
  155. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +2 -2
  156. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +2 -2
  157. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +2 -2
  158. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +7 -7
  159. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +2 -2
  160. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +2 -2
  161. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +2 -2
  162. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +2 -2
  163. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +2 -2
  164. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +2 -2
  165. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +2 -2
  166. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +2 -2
  167. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +2 -2
  168. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +2 -2
  169. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +2 -2
  170. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +2 -2
  171. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +2 -2
  172. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +2 -2
  173. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +3 -3
  174. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +2 -2
  175. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +2 -2
  176. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +2 -2
  177. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +2 -2
  178. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +2 -2
  179. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +3 -3
  180. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +3 -3
  181. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +2 -2
  182. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +3 -3
  183. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +4 -4
  184. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +2 -2
  185. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +2 -2
  186. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +4 -4
  187. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +4 -4
  188. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +50 -50
  189. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +2 -2
  190. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +2 -2
  191. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +2 -2
  192. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +2 -2
  193. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +2 -2
  194. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +2 -2
  195. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +2 -2
  196. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +2 -2
  197. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +2 -2
  198. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +2 -2
  199. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +2 -2
  200. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +2 -2
  201. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +2 -2
  202. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +6 -6
  203. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +4 -4
  204. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +2 -2
  205. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +2 -2
  206. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +2 -2
  207. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +3 -3
  208. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +3 -3
  209. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +2 -2
  210. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +2731 -2731
  211. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +2 -2
  212. xmos_ai_tools/runtime/lib/host_xtflitemicro.lib +0 -0
  213. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  214. xmos_ai_tools/xformer/__init__.py +64 -60
  215. xmos_ai_tools/xformer/flash.py +190 -190
  216. xmos_ai_tools/xinterpreters/__init__.py +1 -1
  217. xmos_ai_tools/xinterpreters/exceptions.py +38 -38
  218. xmos_ai_tools/xinterpreters/host_interpreter.py +651 -652
  219. xmos_ai_tools/xinterpreters/libs/windows/xtflm_python.dll +0 -0
  220. {xmos_ai_tools-1.2.1.dev24.data → xmos_ai_tools-1.3.2.dev180.data}/data/Scripts/xcore-opt.exe +0 -0
  221. {xmos_ai_tools-1.2.1.dev24.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/METADATA +5 -7
  222. {xmos_ai_tools-1.2.1.dev24.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/RECORD +224 -222
  223. {xmos_ai_tools-1.2.1.dev24.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/WHEEL +1 -1
  224. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +0 -19
  225. {xmos_ai_tools-1.2.1.dev24.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/top_level.txt +0 -0
@@ -22,19 +22,19 @@ limitations under the License.
22
22
  #include "tensorflow/lite/kernels/internal/strided_slice_logic.h"
23
23
  #include "tensorflow/lite/kernels/internal/types.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
 
27
27
  namespace reference_ops {
28
28
 
29
29
  template <typename T>
30
- inline void StridedSlice(const tflite::StridedSliceParams& op_params,
30
+ inline void StridedSlice(const tflite_micro::StridedSliceParams& op_params,
31
31
  const RuntimeShape& unextended_input_shape,
32
32
  const RuntimeShape& unextended_output_shape,
33
33
  SequentialTensorWriter<T>* writer) {
34
34
  ruy::profiler::ScopeLabel label("StridedSlice");
35
35
 
36
36
  // Note that the output_shape is not used herein.
37
- tflite::StridedSliceParams params_copy = op_params;
37
+ tflite_micro::StridedSliceParams params_copy = op_params;
38
38
 
39
39
  TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 5);
40
40
  TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 5);
@@ -120,7 +120,7 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params,
120
120
  }
121
121
 
122
122
  template <typename T>
123
- inline void StridedSlice(const tflite::StridedSliceParams& op_params,
123
+ inline void StridedSlice(const tflite_micro::StridedSliceParams& op_params,
124
124
  const RuntimeShape& unextended_input_shape,
125
125
  const T* input_data,
126
126
  const RuntimeShape& unextended_output_shape,
@@ -131,7 +131,7 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params,
131
131
  }
132
132
 
133
133
  template <typename T>
134
- inline void StridedSlice(const tflite::StridedSliceParams& op_params,
134
+ inline void StridedSlice(const tflite_micro::StridedSliceParams& op_params,
135
135
  const RuntimeShape& unextended_input_shape,
136
136
  const TfLiteTensor* input,
137
137
  const RuntimeShape& unextended_output_shape,
@@ -142,6 +142,6 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params,
142
142
  }
143
143
 
144
144
  } // namespace reference_ops
145
- } // namespace tflite
145
+ } // namespace tflite_micro
146
146
 
147
147
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_
@@ -26,7 +26,7 @@ limitations under the License.
26
26
  #include "tensorflow/lite/kernels/internal/compatibility.h"
27
27
  #include "tensorflow/lite/kernels/internal/types.h"
28
28
 
29
- namespace tflite {
29
+ namespace tflite_micro {
30
30
 
31
31
  namespace reference_ops {
32
32
 
@@ -460,6 +460,6 @@ inline void SubWithActivation(
460
460
  }
461
461
 
462
462
  } // namespace reference_ops
463
- } // namespace tflite
463
+ } // namespace tflite_micro
464
464
 
465
465
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_
@@ -23,7 +23,7 @@ limitations under the License.
23
23
  #include "tensorflow/lite/kernels/internal/types.h"
24
24
  #include "tensorflow/lite/kernels/op_macros.h"
25
25
 
26
- namespace tflite {
26
+ namespace tflite_micro {
27
27
  namespace reference_ops {
28
28
 
29
29
  inline void Tanh(const RuntimeShape& input_shape, const float* input_data,
@@ -124,6 +124,6 @@ inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape,
124
124
  }
125
125
 
126
126
  } // namespace reference_ops
127
- } // namespace tflite
127
+ } // namespace tflite_micro
128
128
 
129
129
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TANH_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/types.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  namespace reference_ops {
25
25
 
@@ -198,6 +198,6 @@ void Transpose(const TransposeParams& params, const RuntimeShape& input_shape,
198
198
  }
199
199
 
200
200
  } // namespace reference_ops
201
- } // namespace tflite
201
+ } // namespace tflite_micro
202
202
 
203
203
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_
@@ -20,7 +20,7 @@ limitations under the License.
20
20
  #include "tensorflow/lite/kernels/internal/common.h"
21
21
  #include "tensorflow/lite/kernels/internal/types.h"
22
22
 
23
- namespace tflite {
23
+ namespace tflite_micro {
24
24
 
25
25
  namespace reference_ops {
26
26
 
@@ -220,6 +220,6 @@ inline void TransposeConv(
220
220
  }
221
221
 
222
222
  } // namespace reference_ops
223
- } // namespace tflite
223
+ } // namespace tflite_micro
224
224
 
225
225
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/compatibility.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  template <int N>
25
25
  struct Dims {
@@ -163,6 +163,6 @@ inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3,
163
163
  i4;
164
164
  }
165
165
 
166
- } // namespace tflite
166
+ } // namespace tflite_micro
167
167
 
168
168
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/kernels/internal/compatibility.h"
23
23
  #include "tensorflow/lite/kernels/internal/types.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
  namespace strided_slice {
27
27
 
28
28
  // Use until std::clamp() is available from C++17.
@@ -33,7 +33,7 @@ inline int Clamp(const int v, const int lo, const int hi) {
33
33
  return v;
34
34
  }
35
35
 
36
- inline void StridedSlicePadIndices(tflite::StridedSliceParams* p,
36
+ inline void StridedSlicePadIndices(tflite_micro::StridedSliceParams* p,
37
37
  int dim_count) {
38
38
  // Add indices and mask bits to fully include extra dimensions
39
39
  TFLITE_CHECK_LE(dim_count, 5);
@@ -72,7 +72,7 @@ inline void StridedSlicePadIndices(tflite::StridedSliceParams* p,
72
72
  // Return the index for the first element along that axis. This index will be a
73
73
  // positive integer between [0, axis_size] (or [-1, axis_size -1] if stride < 0)
74
74
  // that can be used to index directly into the data.
75
- inline int StridedSliceStartForAxis(const tflite::StridedSliceParams& params,
75
+ inline int StridedSliceStartForAxis(const tflite_micro::StridedSliceParams& params,
76
76
  const RuntimeShape& input_shape,
77
77
  int32_t axis) {
78
78
  const int32_t axis_size = input_shape.Dims(axis);
@@ -97,7 +97,7 @@ inline int StridedSliceStartForAxis(const tflite::StridedSliceParams& params,
97
97
  return start;
98
98
  }
99
99
 
100
- inline int StridedSliceEndForAxis(const tflite::StridedSliceParams& params,
100
+ inline int StridedSliceEndForAxis(const tflite_micro::StridedSliceParams& params,
101
101
  const RuntimeShape& input_shape, int axis,
102
102
  int start) {
103
103
  const auto shrink_axis_mask = params.shrink_axis_mask;
@@ -139,7 +139,7 @@ inline int StridedSliceEndForAxis(const tflite::StridedSliceParams& params,
139
139
  // Return the index for the first element along that axis. This index will be a
140
140
  // positive integer between [0, axis_size] (or [-1, axis_size -1] if stride < 0)
141
141
  // that can be used to index directly into the data.
142
- inline int StartForAxis(const tflite::StridedSliceParams& params,
142
+ inline int StartForAxis(const tflite_micro::StridedSliceParams& params,
143
143
  const RuntimeShape& input_shape, int axis) {
144
144
  const auto begin_mask = params.begin_mask;
145
145
  const auto* start_indices = params.start_indices;
@@ -186,7 +186,7 @@ inline int StartForAxis(const tflite::StridedSliceParams& params,
186
186
  // element. ie. So if you were iterating through all elements of a 1D array of
187
187
  // size 4, this function would return 4 as the stop, because it is one past the
188
188
  // "real" indices of 0, 1, 2 & 3.
189
- inline int StopForAxis(const tflite::StridedSliceParams& params,
189
+ inline int StopForAxis(const tflite_micro::StridedSliceParams& params,
190
190
  const RuntimeShape& input_shape, int axis,
191
191
  int start_for_axis) {
192
192
  const auto end_mask = params.end_mask;
@@ -246,11 +246,11 @@ inline bool LoopCondition(int index, int stop, int stride) {
246
246
  return stride > 0 ? index >= stop : index <= stop;
247
247
  }
248
248
 
249
- inline tflite::StridedSliceParams BuildStridedSliceParams(
249
+ inline tflite_micro::StridedSliceParams BuildStridedSliceParams(
250
250
  int begin_mask, int end_mask, int shrink_axis_mask,
251
251
  const std::vector<int>& start_indices, const std::vector<int>& stop_indices,
252
252
  const std::vector<int>& strides) {
253
- tflite::StridedSliceParams op_params{};
253
+ tflite_micro::StridedSliceParams op_params{};
254
254
  const int dims_count = start_indices.size();
255
255
 
256
256
  op_params.start_indices_count = dims_count;
@@ -273,6 +273,6 @@ inline tflite::StridedSliceParams BuildStridedSliceParams(
273
273
 
274
274
  } // namespace strided_slice
275
275
 
276
- } // namespace tflite
276
+ } // namespace tflite_micro
277
277
 
278
278
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_STRIDED_SLICE_LOGIC_H_
@@ -21,7 +21,7 @@ limitations under the License.
21
21
  #include "tensorflow/lite/core/macros.h"
22
22
  #include "tensorflow/lite/kernels/internal/types.h"
23
23
 
24
- namespace tflite {
24
+ namespace tflite_micro {
25
25
 
26
26
  template <typename T>
27
27
  inline T* GetTensorData(TfLiteTensor* tensor) {
@@ -37,6 +37,6 @@ inline const T* GetTensorData(const TfLiteTensor* tensor) {
37
37
  TFLITE_NOINLINE RuntimeShape GetTensorShape(const TfLiteTensor* tensor);
38
38
  RuntimeShape GetTensorShape(std::vector<int32_t> data);
39
39
 
40
- } // namespace tflite
40
+ } // namespace tflite_micro
41
41
 
42
42
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_
@@ -23,7 +23,7 @@ limitations under the License.
23
23
  #include "tensorflow/lite/kernels/internal/compatibility.h"
24
24
  #include "tensorflow/lite/kernels/internal/runtime_shape.h"
25
25
 
26
- namespace tflite {
26
+ namespace tflite_micro {
27
27
 
28
28
  enum class FusedActivationFunctionType : uint8_t {
29
29
  kNone,
@@ -1091,6 +1091,6 @@ struct is_int32_or_int64
1091
1091
  std::is_same<T, int64_t>::value> {
1092
1092
  };
1093
1093
 
1094
- } // namespace tflite
1094
+ } // namespace tflite_micro
1095
1095
 
1096
1096
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_
@@ -28,7 +28,7 @@ limitations under the License.
28
28
  #include "tensorflow/lite/kernels/op_macros.h"
29
29
  #endif
30
30
 
31
- namespace tflite {
31
+ namespace tflite_micro {
32
32
 
33
33
  // A fair number of functions in this header have historically been inline.
34
34
  // It is ok to change functions to not be inline if the latency with
@@ -336,6 +336,6 @@ bool IsMobilePlatform();
336
336
  // Returns whether there is unspecified dimension in the tensor's dim signature.
337
337
  bool HasUnspecifiedDimension(const TfLiteTensor* tensor);
338
338
 
339
- } // namespace tflite
339
+ } // namespace tflite_micro
340
340
 
341
341
  #endif // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
@@ -18,7 +18,7 @@ limitations under the License.
18
18
  #include "tensorflow/lite/core/c/builtin_op_data.h"
19
19
  #include "tensorflow/lite/kernels/internal/types.h"
20
20
 
21
- namespace tflite {
21
+ namespace tflite_micro {
22
22
 
23
23
  inline int ComputePadding(int stride, int dilation_rate, int in_size,
24
24
  int filter_size, int out_size) {
@@ -110,6 +110,6 @@ inline Padding3DValues ComputePadding3DValues(
110
110
  padding_values.width_offset = offset;
111
111
  return padding_values;
112
112
  }
113
- } // namespace tflite
113
+ } // namespace tflite_micro
114
114
 
115
115
  #endif // TENSORFLOW_LITE_KERNELS_PADDING_H_
@@ -20,7 +20,7 @@ limitations under the License.
20
20
 
21
21
  #include "tensorflow/lite/c/c_api_types.h"
22
22
 
23
- namespace tflite {
23
+ namespace tflite_micro {
24
24
  // Interface classes that the TFLM framework relies on to get buffers it needs.
25
25
  // There are two types of buffers that the TFLM framework requires: persistent
26
26
  // and non-persistent. Persistent buffers, once allocated, are never freed by
@@ -95,6 +95,6 @@ class INonPersistentBufferAllocator {
95
95
  virtual size_t GetAvailableMemory(size_t alignment) const = 0;
96
96
  };
97
97
 
98
- } // namespace tflite
98
+ } // namespace tflite_micro
99
99
 
100
100
  #endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h"
23
23
  #include "tensorflow/lite/micro/compatibility.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
 
27
27
  // Implement INonPersistentBufferAllocator on an arena that is dedicated for
28
28
  // non-persistent buffers.
@@ -99,6 +99,6 @@ class NonPersistentArenaBufferAllocator : public INonPersistentBufferAllocator {
99
99
  bool resizable_buffer_allocated_ = false;
100
100
  };
101
101
 
102
- } // namespace tflite
102
+ } // namespace tflite_micro
103
103
 
104
104
  #endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h"
23
23
  #include "tensorflow/lite/micro/compatibility.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
 
27
27
  // PersistentArenaBufferAllocator is an implementatation of
28
28
  // IPersistentBufferAllocator interface on an arena that is dedicated for
@@ -53,6 +53,6 @@ class PersistentArenaBufferAllocator : public IPersistentBufferAllocator {
53
53
  uint8_t* tail_temp_;
54
54
  };
55
55
 
56
- } // namespace tflite
56
+ } // namespace tflite_micro
57
57
 
58
58
  #endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
  #include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
20
20
  #include "tensorflow/lite/micro/compatibility.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  // Utility class used to log allocations of a SingleArenaBufferAllocator. Should
25
25
  // only be used in debug/evaluation settings or unit tests to evaluate
@@ -58,6 +58,6 @@ class RecordingSingleArenaBufferAllocator : public SingleArenaBufferAllocator {
58
58
  TF_LITE_REMOVE_VIRTUAL_DELETE
59
59
  };
60
60
 
61
- } // namespace tflite
61
+ } // namespace tflite_micro
62
62
 
63
63
  #endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_
@@ -23,7 +23,7 @@ limitations under the License.
23
23
  #include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h"
24
24
  #include "tensorflow/lite/micro/compatibility.h"
25
25
 
26
- namespace tflite {
26
+ namespace tflite_micro {
27
27
 
28
28
  // TODO(petewarden): This allocator never frees up or reuses any memory, even
29
29
  // though we have enough information about lifetimes of the tensors to do so.
@@ -139,6 +139,6 @@ class SingleArenaBufferAllocator : public INonPersistentBufferAllocator,
139
139
  int temp_buffer_count_ = 0;
140
140
  };
141
141
 
142
- } // namespace tflite
142
+ } // namespace tflite_micro
143
143
 
144
144
  #endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_
@@ -25,7 +25,7 @@ limitations under the License.
25
25
  #include "tensorflow/lite/micro/micro_time.h"
26
26
  #include "tensorflow/lite/micro/recording_micro_interpreter.h"
27
27
 
28
- namespace tflite {
28
+ namespace tflite_micro {
29
29
 
30
30
  template <typename inputT>
31
31
  class MicroBenchmarkRunner {
@@ -33,7 +33,7 @@ class MicroBenchmarkRunner {
33
33
  // The lifetimes of model, op_resolver, tensor_arena, profiler must exceed
34
34
  // that of the created MicroBenchmarkRunner object.
35
35
  MicroBenchmarkRunner(const uint8_t* model,
36
- const tflite::MicroOpResolver* op_resolver,
36
+ const tflite_micro::MicroOpResolver* op_resolver,
37
37
  uint8_t* tensor_arena, int tensor_arena_size,
38
38
  MicroProfilerInterface* profiler,
39
39
  int num_resource_variables = 0)
@@ -63,7 +63,7 @@ class MicroBenchmarkRunner {
63
63
 
64
64
  // Pre-populate input tensor with random values.
65
65
  int input_length = input->bytes / sizeof(inputT);
66
- inputT* input_values = tflite::GetTensorData<inputT>(input);
66
+ inputT* input_values = tflite_micro::GetTensorData<inputT>(input);
67
67
  for (int i = 0; i < input_length; i++) {
68
68
  // Pre-populate input tensor with a random value based on a constant seed.
69
69
  input_values[i] = static_cast<inputT>(
@@ -74,7 +74,7 @@ class MicroBenchmarkRunner {
74
74
 
75
75
  void SetInput(const inputT* custom_input, int input_index = 0) {
76
76
  TfLiteTensor* input = interpreter_.input(input_index);
77
- inputT* input_buffer = tflite::GetTensorData<inputT>(input);
77
+ inputT* input_buffer = tflite_micro::GetTensorData<inputT>(input);
78
78
  int input_length = input->bytes / sizeof(inputT);
79
79
  for (int i = 0; i < input_length; i++) {
80
80
  input_buffer[i] = custom_input[i];
@@ -86,10 +86,10 @@ class MicroBenchmarkRunner {
86
86
  }
87
87
 
88
88
  private:
89
- tflite::RecordingMicroAllocator* allocator_;
90
- tflite::RecordingMicroInterpreter interpreter_;
89
+ tflite_micro::RecordingMicroAllocator* allocator_;
90
+ tflite_micro::RecordingMicroInterpreter interpreter_;
91
91
  };
92
92
 
93
- } // namespace tflite
93
+ } // namespace tflite_micro
94
94
 
95
95
  #endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_MICRO_BENCHMARK_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
  #include "tensorflow/lite/micro/micro_context.h"
20
20
  #include "tensorflow/lite/micro/micro_graph.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
  // A fake of MicroContext for kernel util tests.
24
24
  // TODO(b/272759060): FakeMicroContext currently inherits from MicroContext.
25
25
  // Which allow tests to use functions from MicroContext that weren't added to
@@ -65,6 +65,6 @@ class FakeMicroContext : public MicroContext {
65
65
  TF_LITE_REMOVE_VIRTUAL_DELETE
66
66
  };
67
67
 
68
- } // namespace tflite
68
+ } // namespace tflite_micro
69
69
 
70
70
  #endif // TENSORFLOW_LITE_MICRO_FAKE_MICRO_CONTEXT_H_
@@ -21,7 +21,7 @@ limitations under the License.
21
21
  #include "tensorflow/lite/c/common.h"
22
22
  #include "tensorflow/lite/schema/schema_generated.h"
23
23
 
24
- namespace tflite {
24
+ namespace tflite_micro {
25
25
  // Kernels use flexbuffers::Map to pack their init parameters in a tflite file,
26
26
  // with the parameter names as map keys and the parameter values as the
27
27
  // corresponding map values.
@@ -60,6 +60,6 @@ TfLiteIntArray* FlatBufferVectorToTfLiteTypeArray(
60
60
  TfLiteFloatArray* FlatBufferVectorToTfLiteTypeArray(
61
61
  const flatbuffers::Vector<float>* flatbuffer_array);
62
62
 
63
- } // namespace tflite
63
+ } // namespace tflite_micro
64
64
 
65
65
  #endif // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
@@ -24,7 +24,7 @@ limitations under the License.
24
24
  #include "tensorflow/lite/kernels/internal/max.h"
25
25
  #include "tensorflow/lite/kernels/internal/min.h"
26
26
 
27
- namespace tflite {
27
+ namespace tflite_micro {
28
28
  namespace ops {
29
29
  namespace micro {
30
30
 
@@ -52,6 +52,6 @@ inline float ActivationValFloat(TfLiteFusedActivation act, float a) {
52
52
 
53
53
  } // namespace micro
54
54
  } // namespace ops
55
- } // namespace tflite
55
+ } // namespace tflite_micro
56
56
 
57
57
  #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/c/common.h"
23
23
  #include "tensorflow/lite/kernels/internal/types.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
 
27
27
  extern const int kActivationsInputTensor;
28
28
  extern const int kActivationsOutputTensor;
@@ -55,10 +55,14 @@ void Relu6Quantized(int8_t lower, int8_t upper, const RuntimeShape& input_shape,
55
55
  const int8_t* input_data, const RuntimeShape& output_shape,
56
56
  int8_t* output_data);
57
57
 
58
+ void Relu6Quantized(int16_t lower, int16_t upper, const RuntimeShape& input_shape,
59
+ const int16_t* input_data, const RuntimeShape& output_shape,
60
+ int16_t* output_data);
61
+
58
62
  TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node);
59
63
 
60
64
  TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node);
61
65
 
62
- } // namespace tflite
66
+ } // namespace tflite_micro
63
67
 
64
68
  #endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/c/common.h"
23
23
  #include "tensorflow/lite/micro/micro_common.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
 
27
27
  extern const int kAddInputTensor1;
28
28
  extern const int kAddInputTensor2;
@@ -73,6 +73,6 @@ inline TFLMRegistration Register_ADD_INT8() { return Register_ADD(); }
73
73
 
74
74
  inline TFLMRegistration Register_ADD_INT16() { return Register_ADD(); }
75
75
  #endif
76
- } // namespace tflite
76
+ } // namespace tflite_micro
77
77
 
78
78
  #endif // TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_
@@ -15,7 +15,7 @@ limitations under the License.
15
15
 
16
16
  #include "mli_api.h" // NOLINT
17
17
 
18
- namespace tflite {
18
+ namespace tflite_micro {
19
19
 
20
20
  // Convolution specialized function.
21
21
  typedef mli_status (*conv_func_ptr)(const mli_tensor* /*in*/,
@@ -138,4 +138,4 @@ mli_krn_maxpool(const mli_pool_cfg* cfg) {
138
138
  }
139
139
  #endif
140
140
 
141
- } // namespace tflite
141
+ } // namespace tflite_micro
@@ -18,7 +18,7 @@ limitations under the License.
18
18
 
19
19
  #include "mli_api.h" // NOLINT
20
20
  #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
21
- namespace tflite {
21
+ namespace tflite_micro {
22
22
  namespace ops {
23
23
  namespace micro {
24
24
 
@@ -70,6 +70,6 @@ class MliTensorInterface {
70
70
 
71
71
  } // namespace micro
72
72
  } // namespace ops
73
- } // namespace tflite
73
+ } // namespace tflite_micro
74
74
 
75
75
  #endif // TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_SLICERS_H_
@@ -17,7 +17,7 @@ limitations under the License.
17
17
  #define TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_SLICERS_H_
18
18
 
19
19
  #include "mli_api.h" // NOLINT
20
- namespace tflite {
20
+ namespace tflite_micro {
21
21
  namespace ops {
22
22
  namespace micro {
23
23
 
@@ -52,5 +52,5 @@ class TensorSlicer {
52
52
 
53
53
  } // namespace micro
54
54
  } // namespace ops
55
- } // namespace tflite
55
+ } // namespace tflite_micro
56
56
  #endif // TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_SLICERS_H_
@@ -25,7 +25,7 @@ limitations under the License.
25
25
 
26
26
  #define KRNL_C_DIM_NHWC 0 // output channels
27
27
 
28
- namespace tflite {
28
+ namespace tflite_micro {
29
29
  namespace ops {
30
30
  namespace micro {
31
31
 
@@ -118,7 +118,7 @@ inline void MliTensorAttachBuffer<int8_t>(const TfLiteEvalTensor* tfT,
118
118
  // non-const mli_tensor. This is required by current implementation of MLI
119
119
  // backend and planned for redesign due to this and some other aspects.
120
120
  mliT->SetData<int8_t>(
121
- const_cast<int8_t*>(tflite::micro::GetTensorData<int8_t>(tfT)),
121
+ const_cast<int8_t*>(tflite_micro::micro::GetTensorData<int8_t>(tfT)),
122
122
  *mliT->DataCapacity());
123
123
  }
124
124
 
@@ -129,7 +129,7 @@ inline void MliTensorAttachBuffer<int32_t>(const TfLiteEvalTensor* tfT,
129
129
  // non-const mli_tensor. This is required by current implementation of MLI
130
130
  // backend and planned for redesign due to this and some other aspects.
131
131
  mliT->SetData<int32_t>(
132
- const_cast<int32_t*>(tflite::micro::GetTensorData<int32_t>(tfT)),
132
+ const_cast<int32_t*>(tflite_micro::micro::GetTensorData<int32_t>(tfT)),
133
133
  *mliT->DataCapacity());
134
134
  }
135
135
 
@@ -305,6 +305,6 @@ inline void permute_weights(const mli_tensor* weights_src,
305
305
 
306
306
  } // namespace micro
307
307
  } // namespace ops
308
- } // namespace tflite
308
+ } // namespace tflite_micro
309
309
 
310
310
  #endif // TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_TF_UTILS_H_
@@ -20,7 +20,7 @@ limitations under the License.
20
20
  #include "mli_interface.h"
21
21
  #include "tensorflow/lite/c/common.h"
22
22
 
23
- namespace tflite {
23
+ namespace tflite_micro {
24
24
  namespace ops {
25
25
  namespace micro {
26
26
 
@@ -140,6 +140,6 @@ TfLiteStatus arc_scratch_buffer_calc_slice_size_weights(
140
140
 
141
141
  } // namespace micro
142
142
  } // namespace ops
143
- } // namespace tflite
143
+ } // namespace tflite_micro
144
144
 
145
145
  #endif // TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUF_MGR_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
  #include "mli_api.h" // NOLINT
20
20
  #include "tensorflow/lite/c/common.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
  namespace ops {
24
24
  namespace micro {
25
25
 
@@ -73,6 +73,6 @@ static inline bool inside_arc_ccm(void* p) {
73
73
 
74
74
  } // namespace micro
75
75
  } // namespace ops
76
- } // namespace tflite
76
+ } // namespace tflite_micro
77
77
 
78
78
  #endif // TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUFFERS_H_