xmos-ai-tools 1.2.1.dev20__py3-none-win_amd64.whl → 1.3.2.dev180__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (229) hide show
  1. xmos_ai_tools/__init__.py +7 -7
  2. xmos_ai_tools/io_server/__init__.py +151 -151
  3. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -13
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -8
  5. xmos_ai_tools/runtime/include/flash_server.h +2 -3
  6. xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +2 -1
  7. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +2 -1
  8. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +4 -2
  9. xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +2 -0
  10. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +11 -51
  11. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +3 -0
  12. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +1 -0
  13. xmos_ai_tools/runtime/include/lib_nn/api/version.h +2 -2
  14. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  15. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  16. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +6 -0
  17. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +13 -13
  18. xmos_ai_tools/runtime/include/lib_tflite_micro/api/load_weights.h +64 -0
  19. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +1 -1
  20. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +1 -1
  21. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +2 -2
  22. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +3 -3
  23. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +8 -8
  24. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +21 -7
  25. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +4 -4
  26. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +5 -5
  27. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +3 -3
  28. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +4 -4
  29. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +2 -2
  30. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +3 -3
  31. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +2 -2
  32. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +2 -2
  33. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +3 -3
  34. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +17 -17
  35. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +2 -2
  36. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +2 -2
  37. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +2 -2
  38. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +2 -2
  39. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +2 -2
  40. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +2 -2
  41. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +2 -2
  42. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +3 -3
  43. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +2 -2
  44. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +2 -2
  45. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +2 -2
  46. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +2 -2
  47. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +2 -2
  48. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +2 -2
  49. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +2 -2
  50. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +2 -2
  51. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +2 -2
  52. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +3 -3
  53. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +2 -2
  54. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +2 -2
  55. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +3 -3
  56. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +1 -1
  57. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +1 -1
  58. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +4 -4
  59. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +2 -2
  60. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +2 -2
  61. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +2 -2
  62. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +2 -2
  63. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +2 -2
  64. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +2 -2
  65. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +2 -2
  66. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +2 -2
  67. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +2 -2
  68. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +2 -2
  69. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +2 -2
  70. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +2 -2
  71. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +2 -2
  72. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +2 -2
  73. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +2 -2
  74. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +2 -2
  75. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +2 -2
  76. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +2 -2
  77. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +2 -2
  78. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +4 -4
  79. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +3 -3
  80. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +2 -2
  81. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +2 -2
  82. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +5 -5
  83. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +2 -2
  84. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +2 -2
  85. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +2 -2
  86. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +8 -8
  87. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +2 -2
  88. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +2 -2
  89. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +2 -2
  90. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +2 -2
  91. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +3 -3
  92. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +4 -4
  93. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +3 -3
  94. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +2 -2
  95. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +4 -4
  96. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +3 -3
  97. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +2 -2
  98. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +2 -2
  99. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +5 -5
  100. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +2 -2
  101. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +2 -2
  102. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +3 -3
  103. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +6 -6
  104. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +2 -2
  105. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +2 -2
  106. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +2 -2
  107. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +2 -2
  108. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +2 -2
  109. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +9 -9
  110. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +2 -2
  111. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +2 -2
  112. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +2 -2
  113. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +2 -2
  114. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +2 -2
  115. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +2 -2
  116. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +2 -2
  117. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +2 -2
  118. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +2 -2
  119. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +7 -7
  120. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +2 -2
  121. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +2 -2
  122. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +2 -2
  123. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +6 -2
  124. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +2 -2
  125. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +2 -2
  126. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +2 -2
  127. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +2 -2
  128. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +4 -4
  129. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +2 -2
  130. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +2 -2
  131. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +6 -6
  132. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +2 -2
  133. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +2 -2
  134. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +2 -2
  135. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +2 -2
  136. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +3 -3
  137. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +2 -2
  138. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +2 -2
  139. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +2 -2
  140. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +2 -2
  141. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +2 -2
  142. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +2 -2
  143. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +2 -2
  144. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +2 -2
  145. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +48 -48
  146. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +57 -57
  147. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +2 -2
  148. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +2 -2
  149. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +2 -2
  150. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +2 -2
  151. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +2 -2
  152. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +15 -15
  153. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +2 -2
  154. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +3 -3
  155. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +2 -2
  156. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +2 -2
  157. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +2 -2
  158. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +2 -2
  159. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +2 -2
  160. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +2 -2
  161. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +2 -2
  162. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +7 -7
  163. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +2 -2
  164. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +2 -2
  165. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +2 -2
  166. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +2 -2
  167. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +2 -2
  168. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +2 -2
  169. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +2 -2
  170. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +2 -2
  171. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +2 -2
  172. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +2 -2
  173. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +2 -2
  174. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +2 -2
  175. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +2 -2
  176. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +2 -2
  177. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +3 -3
  178. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +2 -2
  179. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +2 -2
  180. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +2 -2
  181. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +2 -2
  182. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +2 -2
  183. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +3 -3
  184. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +3 -3
  185. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +2 -2
  186. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +3 -3
  187. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +4 -4
  188. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +2 -2
  189. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +2 -2
  190. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +4 -4
  191. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +4 -4
  192. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +50 -50
  193. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +2 -2
  194. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +2 -2
  195. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +2 -2
  196. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +2 -2
  197. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +2 -2
  198. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +2 -2
  199. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +2 -2
  200. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +2 -2
  201. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +2 -2
  202. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +2 -2
  203. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +2 -2
  204. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +2 -2
  205. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +2 -2
  206. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +6 -6
  207. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +4 -4
  208. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +2 -2
  209. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +2 -2
  210. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +2 -2
  211. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +3 -3
  212. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +3 -3
  213. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +2 -2
  214. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +2731 -2731
  215. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +2 -2
  216. xmos_ai_tools/runtime/lib/host_xtflitemicro.lib +0 -0
  217. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  218. xmos_ai_tools/xformer/__init__.py +64 -60
  219. xmos_ai_tools/xformer/flash.py +190 -190
  220. xmos_ai_tools/xinterpreters/__init__.py +1 -1
  221. xmos_ai_tools/xinterpreters/exceptions.py +38 -38
  222. xmos_ai_tools/xinterpreters/host_interpreter.py +651 -652
  223. xmos_ai_tools/xinterpreters/libs/windows/xtflm_python.dll +0 -0
  224. {xmos_ai_tools-1.2.1.dev20.data → xmos_ai_tools-1.3.2.dev180.data}/data/Scripts/xcore-opt.exe +0 -0
  225. {xmos_ai_tools-1.2.1.dev20.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/METADATA +5 -7
  226. {xmos_ai_tools-1.2.1.dev20.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/RECORD +228 -226
  227. {xmos_ai_tools-1.2.1.dev20.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/WHEEL +1 -1
  228. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +0 -19
  229. {xmos_ai_tools-1.2.1.dev20.dist-info → xmos_ai_tools-1.3.2.dev180.dist-info}/top_level.txt +0 -0
@@ -17,7 +17,7 @@ limitations under the License.
17
17
 
18
18
  #include <cmath>
19
19
 
20
- namespace tflite {
20
+ namespace tflite_micro {
21
21
 
22
22
  #if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__)
23
23
  inline float TfLiteMax(const float& x, const float& y) {
@@ -30,6 +30,6 @@ inline T TfLiteMax(const T& x, const T& y) {
30
30
  }
31
31
  #endif
32
32
 
33
- } // namespace tflite
33
+ } // namespace tflite_micro
34
34
 
35
35
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
@@ -17,7 +17,7 @@ limitations under the License.
17
17
 
18
18
  #include <cmath>
19
19
 
20
- namespace tflite {
20
+ namespace tflite_micro {
21
21
 
22
22
  #if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__)
23
23
  inline float TfLiteMin(const float& x, const float& y) {
@@ -30,6 +30,6 @@ inline T TfLiteMin(const T& x, const T& y) {
30
30
  }
31
31
  #endif
32
32
 
33
- } // namespace tflite
33
+ } // namespace tflite_micro
34
34
 
35
35
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
23
23
  #include "tensorflow/lite/kernels/internal/types.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
 
27
27
  // A list of tensors in a format that can be used by kernels like split and
28
28
  // concatenation.
@@ -136,6 +136,6 @@ class SequentialTensorWriter {
136
136
  T* output_ptr_;
137
137
  };
138
138
 
139
- } // namespace tflite
139
+ } // namespace tflite_micro
140
140
 
141
141
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
@@ -27,7 +27,7 @@ limitations under the License.
27
27
  #define __restrict__ __restrict
28
28
  #endif
29
29
 
30
- namespace tflite {
30
+ namespace tflite_micro {
31
31
 
32
32
  // Not all backends support CpuBackendContext usage, so forward declare to avoid
33
33
  // pulling in its implementation. Use of CpuBackendContext in method
@@ -618,6 +618,6 @@ void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements,
618
618
 
619
619
  } // namespace tensor_utils
620
620
 
621
- } // namespace tflite
621
+ } // namespace tflite_micro
622
622
 
623
623
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_
@@ -23,7 +23,7 @@ limitations under the License.
23
23
  #include "tensorflow/lite/kernels/internal/cppmath.h"
24
24
  #include "tensorflow/lite/kernels/internal/types.h"
25
25
 
26
- namespace tflite {
26
+ namespace tflite_micro {
27
27
 
28
28
  // Given the min and max values of a float array, return
29
29
  // reasonable quantization parameters to use for this array.
@@ -287,6 +287,6 @@ void QuantizeMultiplierArray(const double* effective_scales, size_t size,
287
287
  int32_t* effective_scale_significand,
288
288
  int* effective_shift);
289
289
 
290
- } // namespace tflite
290
+ } // namespace tflite_micro
291
291
 
292
292
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_QUANTIZATION_UTIL_H_
@@ -24,7 +24,7 @@ limitations under the License.
24
24
  #include "tensorflow/lite/kernels/internal/common.h"
25
25
  #include "tensorflow/lite/kernels/internal/compatibility.h"
26
26
 
27
- namespace tflite {
27
+ namespace tflite_micro {
28
28
 
29
29
  namespace reference_ops {
30
30
 
@@ -479,7 +479,7 @@ inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
479
479
 
480
480
  const bool use_unswitched =
481
481
  unswitched_params.broadcast_category ==
482
- tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
482
+ tflite_micro::BroadcastableOpCategory::kFirstInputBroadcastsFast;
483
483
 
484
484
  const ArithmeticParams& params =
485
485
  use_unswitched ? unswitched_params : switched_params;
@@ -556,6 +556,6 @@ inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
556
556
  }
557
557
 
558
558
  } // namespace reference_ops
559
- } // namespace tflite
559
+ } // namespace tflite_micro
560
560
 
561
561
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
@@ -20,7 +20,7 @@ limitations under the License.
20
20
 
21
21
  #include "tensorflow/lite/kernels/internal/common.h"
22
22
 
23
- namespace tflite {
23
+ namespace tflite_micro {
24
24
  namespace reference_ops {
25
25
 
26
26
  // T is expected to be either float or int.
@@ -81,6 +81,6 @@ inline void AddN(const ArithmeticParams& params,
81
81
  }
82
82
 
83
83
  } // namespace reference_ops
84
- } // namespace tflite
84
+ } // namespace tflite_micro
85
85
 
86
86
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/types.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  namespace reference_ops {
25
25
 
@@ -83,6 +83,6 @@ void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
83
83
  }
84
84
 
85
85
  } // namespace reference_ops
86
- } // namespace tflite
86
+ } // namespace tflite_micro
87
87
 
88
88
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
@@ -23,7 +23,7 @@ limitations under the License.
23
23
  #include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
24
24
  #include "tensorflow/lite/kernels/internal/types.h"
25
25
 
26
- namespace tflite {
26
+ namespace tflite_micro {
27
27
  namespace reference_ops {
28
28
  namespace batch_matmul {
29
29
 
@@ -270,6 +270,6 @@ inline void BatchMatMul(const FullyConnectedParams& params,
270
270
  }
271
271
 
272
272
  } // namespace reference_ops
273
- } // namespace tflite
273
+ } // namespace tflite_micro
274
274
 
275
275
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
@@ -20,7 +20,7 @@ limitations under the License.
20
20
  #include "ruy/profiler/instrumentation.h" // from @ruy
21
21
  #include "tensorflow/lite/kernels/internal/types.h"
22
22
 
23
- namespace tflite {
23
+ namespace tflite_micro {
24
24
  namespace reference_ops {
25
25
 
26
26
  // TODO(b/135760455): Move this method anonymous namespace in a cc file.
@@ -96,6 +96,6 @@ inline void BatchToSpaceND(const RuntimeShape& unextended_input1_shape,
96
96
  }
97
97
 
98
98
  } // namespace reference_ops
99
- } // namespace tflite
99
+ } // namespace tflite_micro
100
100
 
101
101
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
  #include "tensorflow/lite/kernels/internal/compatibility.h"
20
20
  #include "tensorflow/lite/kernels/internal/types.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  namespace reference_ops {
25
25
 
@@ -86,6 +86,6 @@ inline void BinaryFunction(const RuntimeShape& input1_shape,
86
86
  }
87
87
 
88
88
  } // namespace reference_ops
89
- } // namespace tflite
89
+ } // namespace tflite_micro
90
90
 
91
91
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_
@@ -18,7 +18,7 @@ limitations under the License.
18
18
  #include "tensorflow/lite/kernels/internal/compatibility.h"
19
19
  #include "tensorflow/lite/kernels/internal/types.h"
20
20
 
21
- namespace tflite {
21
+ namespace tflite_micro {
22
22
  namespace reference_ops {
23
23
 
24
24
  template <typename T>
@@ -51,6 +51,6 @@ void BroadcastArgs(const RuntimeShape& input1_shape, const T* input1_data,
51
51
  }
52
52
 
53
53
  } // namespace reference_ops
54
- } // namespace tflite
54
+ } // namespace tflite_micro
55
55
 
56
56
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_
@@ -18,7 +18,7 @@ limitations under the License.
18
18
  #include "tensorflow/lite/kernels/internal/common.h"
19
19
  #include "tensorflow/lite/kernels/kernel_util.h"
20
20
 
21
- namespace tflite {
21
+ namespace tflite_micro {
22
22
  namespace reference_ops {
23
23
  template <int N>
24
24
  void BroadcastImpl(const NdArrayDesc<N>& input_desc, const char* input_data,
@@ -93,5 +93,5 @@ inline void BroadcastTo(const RuntimeShape& unextended_input_shape,
93
93
  last_broadcast_dim, TfLiteTypeGetSize(data_type));
94
94
  }
95
95
  } // namespace reference_ops
96
- } // namespace tflite
96
+ } // namespace tflite_micro
97
97
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/types.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  namespace reference_ops {
25
25
 
@@ -33,5 +33,5 @@ inline void Ceil(const RuntimeShape& input_shape, const float* input_data,
33
33
  }
34
34
 
35
35
  } // namespace reference_ops
36
- } // namespace tflite
36
+ } // namespace tflite_micro
37
37
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
  #include "tensorflow/lite/kernels/internal/common.h"
20
20
  #include "tensorflow/lite/kernels/internal/types.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  namespace reference_ops {
25
25
 
@@ -266,6 +266,6 @@ TFLITE_COMPARISON_OP(LessEqual)
266
266
  #undef TFLITE_COMPARISON_OP
267
267
 
268
268
  } // namespace reference_ops
269
- } // namespace tflite
269
+ } // namespace tflite_micro
270
270
 
271
271
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
@@ -23,7 +23,7 @@ limitations under the License.
23
23
  #include "tensorflow/lite/kernels/internal/cppmath.h"
24
24
  #include "tensorflow/lite/kernels/internal/types.h"
25
25
 
26
- namespace tflite {
26
+ namespace tflite_micro {
27
27
  namespace reference_ops {
28
28
 
29
29
  template <typename Scalar>
@@ -123,7 +123,7 @@ inline void ConcatenationWithScaling(const ConcatenationParams& params,
123
123
  const float scale = input_scale[i] * inverse_output_scale;
124
124
  const float bias = -input_zeropoint[i] * scale;
125
125
  for (int j = 0; j < copy_size; ++j) {
126
- const int32_t value = static_cast<int32_t>(tflite::TfLiteRound(
126
+ const int32_t value = static_cast<int32_t>(tflite_micro::TfLiteRound(
127
127
  input_ptr[j] * scale + bias)) +
128
128
  output_zeropoint;
129
129
  output_ptr[j] = static_cast<uint8_t>(
@@ -136,6 +136,6 @@ inline void ConcatenationWithScaling(const ConcatenationParams& params,
136
136
  }
137
137
 
138
138
  } // namespace reference_ops
139
- } // namespace tflite
139
+ } // namespace tflite_micro
140
140
 
141
141
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
@@ -20,7 +20,7 @@ limitations under the License.
20
20
  #include "tensorflow/lite/kernels/internal/common.h"
21
21
  #include "tensorflow/lite/kernels/internal/types.h"
22
22
 
23
- namespace tflite {
23
+ namespace tflite_micro {
24
24
 
25
25
  namespace reference_ops {
26
26
 
@@ -284,6 +284,6 @@ inline void HybridConvPerChannel(
284
284
  }
285
285
 
286
286
  } // namespace reference_ops
287
- } // namespace tflite
287
+ } // namespace tflite_micro
288
288
 
289
289
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/kernels/internal/common.h"
23
23
  #include "tensorflow/lite/kernels/internal/compatibility.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
  namespace reference_ops {
27
27
 
28
28
  template <typename T>
@@ -170,6 +170,6 @@ inline void CumSum(const ArithmeticParams& params, const int8_t* input_data,
170
170
  }
171
171
 
172
172
  } // namespace reference_ops
173
- } // namespace tflite
173
+ } // namespace tflite_micro
174
174
 
175
175
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
@@ -17,11 +17,11 @@ limitations under the License.
17
17
 
18
18
  #include "tensorflow/lite/kernels/internal/types.h"
19
19
 
20
- namespace tflite {
20
+ namespace tflite_micro {
21
21
  namespace reference_ops {
22
22
 
23
23
  template <typename T>
24
- inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params,
24
+ inline void DepthToSpace(const tflite_micro::DepthToSpaceParams& op_params,
25
25
  const RuntimeShape& unextended_input_shape,
26
26
  const T* input_data,
27
27
  const RuntimeShape& unextended_output_shape,
@@ -74,6 +74,6 @@ inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params,
74
74
  }
75
75
 
76
76
  } // namespace reference_ops
77
- } // namespace tflite
77
+ } // namespace tflite_micro
78
78
 
79
79
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
  #include "tensorflow/lite/kernels/internal/compatibility.h"
20
20
  #include "tensorflow/lite/kernels/internal/types.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
  namespace reference_ops {
24
24
 
25
25
  inline void DepthwiseConv(
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/kernels/internal/compatibility.h"
23
23
  #include "tensorflow/lite/kernels/internal/types.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
 
27
27
  // Used in tests and template parameters to control which version of depthwise
28
28
  // convolution is called. Primarily for reference code, and specializations
@@ -22,13 +22,13 @@ limitations under the License.
22
22
  #include "tensorflow/lite/kernels/internal/common.h"
23
23
  #include "tensorflow/lite/kernels/internal/types.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
 
27
27
  namespace reference_ops {
28
28
 
29
29
  // Dequantizes into a float without rounding.
30
30
  template <typename InputT, typename OutputT>
31
- inline void Dequantize(const tflite::DequantizationParams& op_params,
31
+ inline void Dequantize(const tflite_micro::DequantizationParams& op_params,
32
32
  const RuntimeShape& input_shape,
33
33
  const InputT* input_data,
34
34
  const RuntimeShape& output_shape, OutputT* output_data) {
@@ -46,7 +46,7 @@ inline void Dequantize(const tflite::DequantizationParams& op_params,
46
46
  // Dequantizes per-channel quantized tensor to float.
47
47
  template <typename T>
48
48
  inline void PerChannelDequantize(
49
- const tflite::PerChannelDequantizationParams& op_params,
49
+ const tflite_micro::PerChannelDequantizationParams& op_params,
50
50
  const RuntimeShape& input_shape, const T* input_data,
51
51
  const RuntimeShape& output_shape, float* output_data) {
52
52
  // Ensure flat size is same.
@@ -74,5 +74,5 @@ inline void PerChannelDequantize(
74
74
 
75
75
  } // namespace reference_ops
76
76
 
77
- } // namespace tflite
77
+ } // namespace tflite_micro
78
78
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/common.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  namespace reference_ops {
25
25
 
@@ -242,6 +242,6 @@ inline void Div(const ArithmeticParams& params,
242
242
  }
243
243
 
244
244
  } // namespace reference_ops
245
- } // namespace tflite
245
+ } // namespace tflite_micro
246
246
 
247
247
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
@@ -18,7 +18,7 @@ limitations under the License.
18
18
  #include "tensorflow/lite/kernels/internal/cppmath.h"
19
19
  #include "tensorflow/lite/kernels/internal/types.h"
20
20
 
21
- namespace tflite {
21
+ namespace tflite_micro {
22
22
 
23
23
  namespace reference_ops {
24
24
 
@@ -32,6 +32,6 @@ inline void Elu(const RuntimeShape& input_shape, const float* input_data,
32
32
  }
33
33
 
34
34
  } // namespace reference_ops
35
- } // namespace tflite
35
+ } // namespace tflite_micro
36
36
 
37
37
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
@@ -20,7 +20,7 @@ limitations under the License.
20
20
  #include "ruy/profiler/instrumentation.h" // from @ruy
21
21
  #include "tensorflow/lite/kernels/internal/types.h"
22
22
 
23
- namespace tflite {
23
+ namespace tflite_micro {
24
24
  namespace reference_ops {
25
25
 
26
26
  template <typename T>
@@ -33,6 +33,6 @@ inline void Exp(const T* input_data, const size_t num_elements,
33
33
  }
34
34
 
35
35
  } // namespace reference_ops
36
- } // namespace tflite
36
+ } // namespace tflite_micro
37
37
 
38
38
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/types.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
  namespace reference_ops {
24
24
 
25
25
  template <typename T>
@@ -33,6 +33,6 @@ void Fill(const RuntimeShape& value_shape, const T* value_data,
33
33
  }
34
34
 
35
35
  } // namespace reference_ops
36
- } // namespace tflite
36
+ } // namespace tflite_micro
37
37
 
38
38
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/types.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
 
24
24
  namespace reference_ops {
25
25
 
@@ -34,6 +34,6 @@ inline void Floor(const RuntimeShape& input_shape, const float* input_data,
34
34
  }
35
35
 
36
36
  } // namespace reference_ops
37
- } // namespace tflite
37
+ } // namespace tflite_micro
38
38
 
39
39
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
@@ -20,7 +20,7 @@ limitations under the License.
20
20
 
21
21
  #include "tensorflow/lite/kernels/internal/types.h"
22
22
 
23
- namespace tflite {
23
+ namespace tflite_micro {
24
24
  namespace reference_ops {
25
25
 
26
26
  template <typename T>
@@ -30,6 +30,6 @@ T FloorDiv(T input1, T input2) {
30
30
  }
31
31
 
32
32
  } // namespace reference_ops
33
- } // namespace tflite
33
+ } // namespace tflite_micro
34
34
 
35
35
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
@@ -18,7 +18,7 @@ limitations under the License.
18
18
  #include <cmath>
19
19
  #include <functional>
20
20
 
21
- namespace tflite {
21
+ namespace tflite_micro {
22
22
 
23
23
  namespace reference_ops {
24
24
 
@@ -39,6 +39,6 @@ T FloorMod(T input1, T input2) {
39
39
  }
40
40
 
41
41
  } // namespace reference_ops
42
- } // namespace tflite
42
+ } // namespace tflite_micro
43
43
 
44
44
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
@@ -23,7 +23,7 @@ limitations under the License.
23
23
  #include "tensorflow/lite/kernels/internal/quantization_util.h"
24
24
  #include "tensorflow/lite/kernels/internal/types.h"
25
25
 
26
- namespace tflite {
26
+ namespace tflite_micro {
27
27
  namespace reference_ops {
28
28
 
29
29
  inline void FullyConnected(
@@ -318,6 +318,6 @@ inline void ShuffledFullyConnected(
318
318
  }
319
319
 
320
320
  } // namespace reference_ops
321
- } // namespace tflite
321
+ } // namespace tflite_micro
322
322
 
323
323
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
@@ -21,7 +21,7 @@ limitations under the License.
21
21
  #include "tensorflow/lite/kernels/internal/common.h"
22
22
  #include "tensorflow/lite/kernels/internal/types.h"
23
23
 
24
- namespace tflite {
24
+ namespace tflite_micro {
25
25
  namespace reference_ops {
26
26
 
27
27
  inline int16_t SaturatingLeftShift(int16_t value, int amount) {
@@ -163,6 +163,6 @@ inline void HardSwish(const HardSwishParams& params,
163
163
  }
164
164
 
165
165
  } // namespace reference_ops
166
- } // namespace tflite
166
+ } // namespace tflite_micro
167
167
 
168
168
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_
@@ -22,7 +22,7 @@ limitations under the License.
22
22
  #include "tensorflow/lite/kernels/internal/common.h"
23
23
  #include "tensorflow/lite/kernels/internal/types.h"
24
24
 
25
- namespace tflite {
25
+ namespace tflite_micro {
26
26
  namespace reference_integer_ops {
27
27
 
28
28
  inline void CheckArithmeticParams(const ArithmeticParams& params) {
@@ -245,6 +245,6 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
245
245
  }
246
246
 
247
247
  } // namespace reference_integer_ops
248
- } // namespace tflite
248
+ } // namespace tflite_micro
249
249
 
250
250
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/common.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
  namespace reference_integer_ops {
24
24
 
25
25
  // Fixed-point per-channel-quantization convolution reference kernel.
@@ -236,6 +236,6 @@ inline void ConvPerChannel(
236
236
  }
237
237
 
238
238
  } // namespace reference_integer_ops
239
- } // namespace tflite
239
+ } // namespace tflite_micro
240
240
 
241
241
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/common.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
  namespace reference_integer_ops {
24
24
  inline void DepthwiseConvPerChannel(
25
25
  const DepthwiseParams& params, const int32_t* output_multiplier,
@@ -286,6 +286,6 @@ inline void DepthwiseConvHybridPerChannel(
286
286
  }
287
287
 
288
288
  } // namespace reference_integer_ops
289
- } // namespace tflite
289
+ } // namespace tflite_micro
290
290
 
291
291
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/common.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
  namespace reference_integer_ops {
24
24
 
25
25
  // For per-channel functions, since it is defined in quantization spec that
@@ -121,6 +121,6 @@ void FullyConnected(const FullyConnectedParams& params,
121
121
  }
122
122
 
123
123
  } // namespace reference_integer_ops
124
- } // namespace tflite
124
+ } // namespace tflite_micro
125
125
 
126
126
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
@@ -19,7 +19,7 @@ limitations under the License.
19
19
 
20
20
  #include "tensorflow/lite/kernels/internal/common.h"
21
21
 
22
- namespace tflite {
22
+ namespace tflite_micro {
23
23
  namespace reference_integer_ops {
24
24
 
25
25
  inline void L2Normalization(int32_t input_zero_point, int32_t outer_size,
@@ -62,6 +62,6 @@ inline void L2Normalization(int32_t input_zero_point, int32_t outer_size,
62
62
  }
63
63
  }
64
64
  } // namespace reference_integer_ops
65
- } // namespace tflite
65
+ } // namespace tflite_micro
66
66
 
67
67
  #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_