xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. xmos_ai_tools/__init__.py +7 -0
  2. xmos_ai_tools/io_server/__init__.py +151 -0
  3. xmos_ai_tools/runtime/__init__.py +0 -0
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
  5. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
  6. xmos_ai_tools/runtime/include/flash_server.h +74 -0
  7. xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
  8. xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
  9. xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
  10. xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
  11. xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
  12. xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
  13. xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
  14. xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
  15. xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
  16. xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
  17. xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
  18. xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
  19. xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
  20. xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
  21. xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
  22. xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
  23. xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
  24. xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
  25. xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
  26. xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
  27. xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
  28. xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
  29. xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
  30. xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
  31. xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
  32. xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
  33. xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
  34. xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
  35. xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
  36. xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
  37. xmos_ai_tools/runtime/include/ioserver.h +44 -0
  38. xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
  39. xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
  40. xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
  41. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
  42. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
  43. xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
  44. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
  45. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
  46. xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
  47. xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
  48. xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
  49. xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
  50. xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
  51. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
  52. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
  53. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
  54. xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
  55. xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
  56. xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
  57. xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
  58. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
  59. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
  60. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
  61. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
  62. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
  63. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
  64. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
  65. xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
  66. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  67. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  68. xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
  69. xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
  70. xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
  71. xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
  72. xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
  73. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
  74. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
  75. xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
  76. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
  77. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
  78. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
  79. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
  80. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
  81. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
  82. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
  83. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
  84. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
  85. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
  86. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
  87. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
  88. xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
  89. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
  90. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
  91. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
  92. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
  93. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
  94. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
  95. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
  96. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
  97. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
  98. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
  99. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
  100. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
  101. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
  102. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
  103. xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
  104. xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
  105. xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
  106. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
  107. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
  108. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
  109. xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
  110. xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
  111. xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
  112. xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
  113. xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
  114. xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
  115. xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
  116. xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
  117. xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
  118. xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
  119. xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
  120. xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
  121. xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
  122. xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
  123. xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
  124. xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
  125. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
  126. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
  127. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
  128. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
  129. xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
  130. xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
  131. xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
  132. xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
  133. xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
  134. xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
  135. xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
  136. xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
  137. xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
  138. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
  139. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
  140. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
  141. xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
  142. xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
  143. xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
  144. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
  145. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
  146. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
  147. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
  148. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
  149. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
  150. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
  151. xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
  152. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
  153. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
  154. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
  155. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
  156. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
  157. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
  158. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
  159. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
  160. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
  161. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
  162. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
  163. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
  164. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
  165. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
  166. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
  167. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
  168. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
  169. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
  170. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
  171. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
  172. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
  173. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
  174. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
  175. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
  176. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
  177. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
  178. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
  179. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
  180. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
  181. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
  182. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
  183. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
  184. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
  185. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
  186. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
  187. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
  188. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
  189. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
  190. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
  191. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
  192. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
  193. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
  194. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
  195. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
  196. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
  197. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
  198. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
  199. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
  200. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
  201. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
  202. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
  203. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
  204. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
  205. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
  206. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
  207. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
  208. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
  209. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
  210. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
  211. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
  212. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
  213. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
  214. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
  215. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
  216. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
  217. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
  218. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
  219. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
  220. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
  221. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
  222. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
  223. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
  224. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
  225. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
  226. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
  227. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
  228. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
  229. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
  230. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
  231. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
  232. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
  233. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
  234. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
  235. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
  236. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
  237. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
  238. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
  239. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
  240. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
  241. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
  242. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
  243. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
  244. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
  245. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
  246. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
  247. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
  248. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
  249. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
  250. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
  251. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
  252. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
  253. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
  254. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
  255. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
  256. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
  257. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
  258. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
  259. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
  260. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
  261. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
  262. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
  263. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
  264. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
  265. xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
  266. xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
  267. xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
  268. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
  269. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
  270. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
  271. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
  272. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
  273. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
  274. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
  275. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
  276. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
  277. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
  278. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
  279. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
  280. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
  281. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
  282. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
  283. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
  284. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
  285. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
  286. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
  287. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
  288. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
  289. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
  290. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
  291. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
  292. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
  293. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
  294. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
  295. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
  296. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
  297. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
  298. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
  299. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
  300. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
  301. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
  302. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
  303. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
  304. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
  305. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
  306. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
  307. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
  308. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
  309. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
  310. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
  311. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
  312. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
  313. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
  314. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
  315. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
  316. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
  317. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
  318. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
  319. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
  320. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
  321. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
  322. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
  323. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
  324. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
  325. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
  326. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
  327. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
  328. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
  329. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
  330. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
  331. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
  332. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
  333. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
  334. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
  335. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
  336. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
  337. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
  338. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
  339. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
  340. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
  341. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
  342. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
  343. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
  344. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
  345. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
  346. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
  347. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
  348. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
  349. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
  350. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
  351. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
  352. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
  353. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
  354. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
  355. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
  356. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
  357. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
  358. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
  359. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
  360. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
  361. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
  362. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
  363. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
  364. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
  365. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
  366. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
  367. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
  368. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
  369. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
  370. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
  371. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
  372. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
  373. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
  374. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
  375. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
  376. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
  377. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
  378. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
  379. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
  380. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
  381. xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
  382. xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
  383. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  384. xmos_ai_tools/xformer/__init__.py +60 -0
  385. xmos_ai_tools/xformer/flash.py +190 -0
  386. xmos_ai_tools/xinterpreters/__init__.py +1 -0
  387. xmos_ai_tools/xinterpreters/exceptions.py +38 -0
  388. xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
  389. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
  390. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
  391. xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
  392. xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
  393. xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
  394. xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
  395. xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,310 @@
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_TF_UTILS_H_
17
+ #define TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_TF_UTILS_H_
18
+
19
+ #include "mli_api.h" // NOLINT
20
+ #include "mli_interface.h"
21
+ #include "tensorflow/lite/kernels/internal/common.h"
22
+ #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
23
+ #include "tensorflow/lite/micro/kernels/kernel_util.h"
24
+ #include "tensorflow/lite/micro/micro_log.h"
25
+
26
+ #define KRNL_C_DIM_NHWC 0 // output channels
27
+
28
+ namespace tflite_micro {
29
+ namespace ops {
30
+ namespace micro {
31
+
32
+ inline void ConvertToMliTensorData(const TfLiteTensor* tfT,
33
+ MliTensorInterface* mliT,
34
+ bool is_bias_tensor) {
35
+ // Data is NULL until MliTensorAttachBuffer is called.
36
+ mliT->SetElType(tfT->type);
37
+ if (tfT->type == kTfLiteInt8) {
38
+ mliT->SetData<int8_t>(nullptr, tfT->bytes);
39
+ } else if (tfT->type == kTfLiteInt32) {
40
+ mliT->SetData<int32_t>(nullptr, tfT->bytes);
41
+ } else {
42
+ MicroPrintf("Wrong data type. Expected int8_t or int32_t.");
43
+ TFLITE_ABORT;
44
+ }
45
+ const int32_t dims_count = GetTensorShape(tfT).DimensionsCount();
46
+ *mliT->Rank() = is_bias_tensor ? 1 : dims_count;
47
+
48
+ int mli_tensor_memstride = 1;
49
+ if (is_bias_tensor) {
50
+ mliT->Shape()[0] = GetTensorShape(tfT).Dims(dims_count - 1);
51
+ mliT->MemStride()[0] = mli_tensor_memstride;
52
+ } else {
53
+ for (int i = dims_count - 1; i >= 0; --i) {
54
+ mliT->Shape()[i] = GetTensorShape(tfT).Dims(i);
55
+ mliT->MemStride()[i] = mli_tensor_memstride;
56
+ mli_tensor_memstride *= GetTensorShape(tfT).Dims(i);
57
+ }
58
+ }
59
+ }
60
+
61
+ inline void ConvertToMliQuantParams(const TfLiteTensor* tfT,
62
+ MliTensorInterface* mliT) {
63
+ *mliT->Dim() = -1;
64
+ #ifdef MLI_2_0
65
+ *mliT->ZeroPointCapacity() = 0;
66
+ #endif
67
+ *mliT->ZeroPoint<int16_t*>() = tfT->params.zero_point;
68
+ float fscale = tfT->params.scale;
69
+ mliT->SetScale(fscale);
70
+ }
71
+
72
+ inline void ConvertToMliQuantParamsPerChannel(const TfLiteTensor* tfT,
73
+ MliTensorInterface* mliT,
74
+ bool is_bias_tensor) {
75
+ // mli tensor scale and zero_point arrays should be allocated at this point
76
+ #ifdef MLI_2_0
77
+ TFLITE_DCHECK_NE(*mliT->Scale<int16_t**>(), 0);
78
+ TFLITE_DCHECK_NE(*mliT->ZeroPoint<int16_t**>(), 0);
79
+ #else
80
+ TFLITE_DCHECK_NE(*mliT->Scale<int32_t**>(), 0);
81
+ TFLITE_DCHECK_NE(*mliT->ZeroPoint<int16_t**>(), 0);
82
+ #endif
83
+
84
+ // get per channel quantization parameters
85
+ const auto* affine_quantization =
86
+ reinterpret_cast<TfLiteAffineQuantization*>(tfT->quantization.params);
87
+ int32_t quantized_dimension =
88
+ is_bias_tensor ? 0 : affine_quantization->quantized_dimension;
89
+ const int num_channels = mliT->Shape()[quantized_dimension];
90
+
91
+ *mliT->Dim() = quantized_dimension;
92
+
93
+ // set capacities
94
+ #ifdef MLI_2_0
95
+ *mliT->ScaleFracBitsCapacity() = num_channels * sizeof(int8_t);
96
+ *mliT->ScaleCapacity() = num_channels * sizeof(int16_t);
97
+ *mliT->ZeroPointCapacity() = num_channels * sizeof(int16_t);
98
+ #endif
99
+ float* fscale = affine_quantization->scale->data;
100
+ mliT->SetScalePerChannel(fscale, num_channels);
101
+
102
+ #ifdef MLI_2_0
103
+ int16_t* zero_point = *mliT->ZeroPoint<int16_t**>();
104
+ for (int i = 0; i < num_channels; i++) {
105
+ zero_point[i] = tfT->params.zero_point;
106
+ }
107
+ #endif
108
+ }
109
+
110
+ template <typename datatype>
111
+ inline void MliTensorAttachBuffer(const TfLiteEvalTensor*,
112
+ const MliTensorInterface*);
113
+
114
+ template <>
115
+ inline void MliTensorAttachBuffer<int8_t>(const TfLiteEvalTensor* tfT,
116
+ const MliTensorInterface* mliT) {
117
+ // "const_cast" here used to attach const data buffer to the initially
118
+ // non-const mli_tensor. This is required by current implementation of MLI
119
+ // backend and planned for redesign due to this and some other aspects.
120
+ mliT->SetData<int8_t>(
121
+ const_cast<int8_t*>(tflite_micro::micro::GetTensorData<int8_t>(tfT)),
122
+ *mliT->DataCapacity());
123
+ }
124
+
125
+ template <>
126
+ inline void MliTensorAttachBuffer<int32_t>(const TfLiteEvalTensor* tfT,
127
+ const MliTensorInterface* mliT) {
128
+ // "const_cast" here used to attach const data buffer to the initially
129
+ // non-const mli_tensor. This is required by current implementation of MLI
130
+ // backend and planned for redesign due to this and some other aspects.
131
+ mliT->SetData<int32_t>(
132
+ const_cast<int32_t*>(tflite_micro::micro::GetTensorData<int32_t>(tfT)),
133
+ *mliT->DataCapacity());
134
+ }
135
+
136
+ inline void ConvertToMliTensor(const TfLiteTensor* tfT,
137
+ MliTensorInterface* mliT) {
138
+ ConvertToMliTensorData(tfT, mliT, false);
139
+ ConvertToMliQuantParams(tfT, mliT);
140
+ }
141
+
142
+ inline void ConvertToMliTensorPerChannel(const TfLiteTensor* tfT,
143
+ MliTensorInterface* mliT,
144
+ bool is_bias_tensor) {
145
+ ConvertToMliTensorData(tfT, mliT, is_bias_tensor);
146
+ ConvertToMliQuantParamsPerChannel(tfT, mliT, is_bias_tensor);
147
+ }
148
+
149
+ inline void PrepareLocalTensor(mli_tensor* tensor, mli_tensor* tensor_local) {
150
+ #ifdef MLI_2_0
151
+ int8_t* local_data = tensor_local->data.mem.pi8;
152
+ *tensor_local = *tensor;
153
+ tensor_local->data.mem.pi8 = local_data;
154
+ #else
155
+ int8_t* local_data = static_cast<int8_t*>(tensor_local->data);
156
+ *tensor_local = *tensor;
157
+ tensor_local->data = local_data;
158
+ #endif
159
+ }
160
+
161
+ inline void AdjustBiasTensor(MliTensorInterface* bias, MliTensorInterface* in,
162
+ MliTensorInterface* weights) {
163
+ int32_t quantized_dimension = *bias->Dim();
164
+ const int num_channels =
165
+ quantized_dimension < 0 ? 1 : bias->Shape()[quantized_dimension];
166
+ for (int i = 0; i < num_channels; i++) {
167
+ int32_t adjusted_bias_scale =
168
+ (*in->Scale<int16_t*>()) * (*weights->Scale<int16_t**>())[i];
169
+ int in_shift = *in->ScaleFracBits<int8_t*>();
170
+ int w_shift = (*weights->ScaleFracBits<int8_t**>())[i];
171
+ int b_shift = (*bias->ScaleFracBits<int8_t**>())[i];
172
+ int bias_shift = in_shift + w_shift - b_shift;
173
+ (*bias->Scale<int16_t**>())[i] =
174
+ (int16_t)(adjusted_bias_scale >> bias_shift);
175
+ }
176
+ }
177
+
178
+ #ifdef MLI_2_0_KRNL_TEST
179
+ // Reorder an array according to given indexes. If backward is true, order of
180
+ // index array must be reversed.
181
+ inline static void reorder(uint32_t* arr, const uint8_t index[],
182
+ bool backward) {
183
+ uint32_t temp[MLI_MAX_RANK];
184
+ for (int8_t i = 0; i < MLI_MAX_RANK; i++) {
185
+ if (backward)
186
+ temp[index[i]] = arr[i];
187
+ else
188
+ temp[i] = arr[index[i]];
189
+ }
190
+ for (int8_t i = 0; i < MLI_MAX_RANK; i++) {
191
+ arr[i] = temp[i];
192
+ }
193
+ }
194
+
195
+ // Change shape of mli tensor and recalculate mem strides.
196
+ inline void change_shape(mli_tensor* mliT, const uint8_t dim_order[]) {
197
+ reorder(mliT->shape, dim_order, false);
198
+
199
+ // Calculate strides for new layout
200
+ int mli_tensor_memstride = 1;
201
+ for (int shape_idx = mliT->rank - 1; shape_idx >= 0; --shape_idx) {
202
+ mliT->mem_stride[shape_idx] = mli_tensor_memstride;
203
+ mli_tensor_memstride *= mliT->shape[shape_idx];
204
+ }
205
+ }
206
+
207
+ inline void permute_weights(const mli_tensor* weights_src,
208
+ const mli_permute_cfg* permute_cfg,
209
+ mli_tensor* weights_dst,
210
+ mli_data_container* buffer_data) {
211
+ mli_tensor buffer = {};
212
+ buffer.el_params = weights_dst->el_params;
213
+ buffer.data = *buffer_data;
214
+ // Compare weights tensor size and avaliable buffer capacity.
215
+ int buffer_size = buffer_data->capacity;
216
+ int weights_size = mli_hlp_count_elem_num(weights_src, 0) *
217
+ mli_hlp_tensor_element_size(weights_src);
218
+
219
+ // Need to change shape of distanation weights buffer according to permute
220
+ // dimensions order to calculate slice sizes
221
+ change_shape(weights_dst, permute_cfg->perm_dim);
222
+
223
+ if (buffer_size >= weights_size) {
224
+ mli_mov_cfg_t copy_config;
225
+ mli_mov_cfg_for_copy(&copy_config);
226
+ mli_mov_tensor_sync(weights_src, &copy_config, &buffer);
227
+ mli_krn_permute_sa8(&buffer, permute_cfg, weights_dst);
228
+ } else {
229
+ // Weights shape is NHWC and output (buffer) shape is HWC where N_w = C_o.
230
+ // Buffer size (H_o * W_o) must be more or equal then the weights size (H_w
231
+ // * W_w * C_w). So, this is the reason, why buffer size (output tensor) is
232
+ // divided by channel shape.
233
+ uint32_t slice_size = buffer_size / weights_src->shape[KRNL_C_DIM_NHWC];
234
+
235
+ mli_mov_cfg_t copy_config = {};
236
+ uint32_t src_offsets[] = {0, 0, 0, 0};
237
+ uint32_t src_sizes[] = {0, 0, 0, 0};
238
+ int dst_mem_stride[] = {0, 0, 0, 0};
239
+
240
+ mli_tensor weights_dst_sub_tensor;
241
+ mli_sub_tensor_cfg sub_tensor_cfg = {};
242
+ sub_tensor_cfg.sub_tensor_rank = weights_src->rank;
243
+
244
+ // Calculate dimensions for slice accroding to buffer capacity.
245
+ // Now, after calling change_shape() function, dst weights buffer has the
246
+ // MLI layout (HWCN). This means, the innermost dimension (N) of dst weights
247
+ // tensor is equal to the innermost dimension of output tensor (N).
248
+ sub_tensor_cfg.size[weights_dst->rank - 1] =
249
+ src_sizes[weights_dst->rank - 1] = weights_src->shape[KRNL_C_DIM_NHWC];
250
+ // Now need to calculate other shapes for weights slice. Total slice size is
251
+ // H*W*C*N, so to calculate sizes for each axis, avaliable slice size is
252
+ // divided by shape for each axis.
253
+ uint32_t slice_size_left = slice_size;
254
+ for (uint32_t i = 0; i < weights_dst->rank - 1; i++) {
255
+ sub_tensor_cfg.size[i] = src_sizes[i] =
256
+ slice_size_left / weights_dst->shape[i] > 0 ? weights_dst->shape[i]
257
+ : slice_size_left;
258
+ slice_size_left /= weights_dst->shape[i];
259
+ slice_size_left = slice_size_left > 0 ? slice_size_left : 1;
260
+ }
261
+ // Need to reorder src tensor sizes because it is still in TFLM format
262
+ // (NHWC) and src_sizes array calculated as (HWCN).
263
+ reorder(src_sizes, permute_cfg->perm_dim, true);
264
+
265
+ sub_tensor_cfg.offset[KRNL_C_DIM_HWCN] = src_offsets[KRNL_H_DIM_HWCN] = 0;
266
+ sub_tensor_cfg.offset[KRNL_H_DIM_HWCN] = src_offsets[KRNL_W_DIM_HWCN] = 0;
267
+ sub_tensor_cfg.offset[KRNL_W_DIM_HWCN] = src_offsets[KRNL_D_DIM_HWCN] = 0;
268
+ sub_tensor_cfg.offset[KRNL_D_DIM_HWCN] = src_offsets[KRNL_C_DIM_HWCN] = 0;
269
+ do {
270
+ do {
271
+ do {
272
+ do {
273
+ mli_mov_cfg_for_slice(&copy_config, (int*)src_offsets,
274
+ (int*)src_sizes, dst_mem_stride);
275
+ mli_mov_tensor_sync(weights_src, &copy_config, &buffer);
276
+
277
+ mli_hlp_create_subtensor(weights_dst, &sub_tensor_cfg,
278
+ &weights_dst_sub_tensor);
279
+ mli_krn_permute_sa8(&buffer, permute_cfg, &weights_dst_sub_tensor);
280
+
281
+ // For each axis, it is necessary to recalculate the offsets and
282
+ // slice sizes.
283
+ sub_tensor_cfg.offset[2] = src_offsets[3] += src_sizes[3];
284
+ src_sizes[3] =
285
+ std::min(src_sizes[3], weights_src->shape[3] - src_offsets[3]);
286
+ } while (src_offsets[3] < weights_src->shape[3]);
287
+
288
+ sub_tensor_cfg.offset[1] = src_offsets[2] += src_sizes[2];
289
+ src_sizes[2] =
290
+ std::min(src_sizes[2], weights_src->shape[2] - src_offsets[2]);
291
+ } while (src_offsets[2] < weights_src->shape[2]);
292
+
293
+ sub_tensor_cfg.offset[0] = src_offsets[1] += src_sizes[1];
294
+ src_sizes[1] =
295
+ std::min(src_sizes[1], weights_src->shape[1] - src_offsets[1]);
296
+ } while (src_offsets[1] < weights_src->shape[1]);
297
+
298
+ sub_tensor_cfg.offset[3] = src_offsets[0] += src_sizes[0];
299
+ src_sizes[0] =
300
+ std::min(src_sizes[0], weights_src->shape[0] - src_offsets[0]);
301
+ } while (src_offsets[0] < weights_src->shape[0]);
302
+ }
303
+ }
304
+ #endif
305
+
306
+ } // namespace micro
307
+ } // namespace ops
308
+ } // namespace tflite_micro
309
+
310
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_TF_UTILS_H_
@@ -0,0 +1,145 @@
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUF_MGR_H_
17
+ #define TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUF_MGR_H_
18
+
19
+ #include "mli_api.h" // NOLINT
20
+ #include "mli_interface.h"
21
+ #include "tensorflow/lite/c/common.h"
22
+
23
+ namespace tflite_micro {
24
+ namespace ops {
25
+ namespace micro {
26
+
27
+ /**
28
+ * @brief Function to allocate scratch buffers for the convolution tensors
29
+ *
30
+ * @detail This function will update the data pointers in the 4 tensors with
31
+ * pointers to scratch buffers in fast local memory.
32
+ *
33
+ * @param context [I] pointer to TfLite context (needed for error handling)
34
+ * @param in [IO] pointer to the input tensor
35
+ * @param weights [IO] pointer to the weights tensor
36
+ * @param bias [IO] pointer to the bias tensor
37
+ * @param output [IO] pointer to the output tensor
38
+ *
39
+ * @return Tf Lite status code
40
+ */
41
+ TfLiteStatus get_arc_scratch_buffer_for_conv_tensors(
42
+ TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* weights,
43
+ MliTensorInterface* bias, MliTensorInterface* out);
44
+
45
+ /**
46
+ * @brief Function to allocate scratch buffers for pooling kernels with only
47
+ * input and output buffers
48
+ *
49
+ * @detail This function will update the data pointers in the 2 tensors with
50
+ * pointers to scratch buffers in fast local memory.
51
+ *
52
+ * @param context [I] pointer to TfLite context (needed for error handling)
53
+ * @param in [IO] pointer to the input tensor
54
+ * @param output [IO] pointer to the output tensor
55
+ *
56
+ * @return Tf Lite status code
57
+ */
58
+ TfLiteStatus get_arc_scratch_buffer_for_pooling_tensors(
59
+ TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* out);
60
+
61
+ /**
62
+ * @brief Function to allocate scratch buffers for the fully connect tensors
63
+ *
64
+ * @detail This function will update the data pointers in the 4 tensors with
65
+ * pointers to scratch buffers in fast local memory.
66
+ *
67
+ * @param context [I] pointer to TfLite context (needed for error handling)
68
+ * @param in [IO] pointer to the input tensor
69
+ * @param weights [IO] pointer to the weights tensor
70
+ * @param bias [IO] pointer to the bias tensor
71
+ * @param output [IO] pointer to the output tensor
72
+ *
73
+ * @return Tf Lite status code
74
+ */
75
+ TfLiteStatus get_arc_scratch_buffer_for_fully_connect_tensors(
76
+ TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* weights,
77
+ MliTensorInterface* bias, MliTensorInterface* out);
78
+
79
+ /**
80
+ * @brief Function to allocate scratch buffers for the eltwise function tensors
81
+ *
82
+ * @detail This function will update the data pointers in the 3 tensors with
83
+ * pointers to scratch buffers in fast local memory.
84
+ *
85
+ * @param context [I] pointer to TfLite context (needed for error handling)
86
+ * @param in1 [IO] pointer to the first input tensor
87
+ * @param in2 [IO] pointer to the second input tensor
88
+ * @param output [IO] pointer to the output tensor
89
+ *
90
+ * @return Tf Lite status code
91
+ */
92
+ TfLiteStatus get_arc_scratch_buffer_for_eltwise_tensors(
93
+ TfLiteContext* context, MliTensorInterface* in1, MliTensorInterface* in2,
94
+ MliTensorInterface* out);
95
+
96
+ /**
97
+ * @brief Function to calculate slice size for io tensors
98
+ *
99
+ * @detail This function will calculate the slice size in the height dimension
100
+ * for input and output tensors. it takes into account the kernel size and the
101
+ * padding. the function will look at the capacity filed in the in and out
102
+ * tensor to determine the available buffersize.
103
+ *
104
+ * @param in [I] pointer to the input tensor
105
+ * @param out [I] pointer to the output tensor
106
+ * @param kernelHeight [I] size of the kernel in height dimension
107
+ * @param strideHeight [I] input stride in height dimension
108
+ * @param padding_top [I] number of lines with zeros at the top
109
+ * @param padding_bot [I] number of lines with zeros at the bottom
110
+ * @param inSliceHeight [O] slice size in height dimension for the input
111
+ * tensor
112
+ * @param outSliceHeight [O] slice size in height dimension for the output
113
+ * tensor
114
+ *
115
+ * @return Tf Lite status code
116
+ */
117
+ TfLiteStatus arc_scratch_buffer_calc_slice_size_io(
118
+ const MliTensorInterface* in, const MliTensorInterface* out,
119
+ const int kernelHeight, const int strideHeight, const int padding_top,
120
+ const int padding_bot, int* in_slice_height, int* out_slice_height);
121
+
122
+ /**
123
+ * @brief Function to calculate slice size for weight slicing
124
+ *
125
+ * @detail This function will calculate the slice size in the output channel
126
+ * dimension for weight and bias tensors. the function will look at the capacity
127
+ * filed in the weights and bias tensor to determine the available buffersize.
128
+ *
129
+ * @param weights [I] pointer to the input tensor
130
+ * @param bias [I] pointer to the output tensor
131
+ * @param weightOutChDimension [I] dimension of the output channels in the
132
+ * weights tensor
133
+ * @param sliceChannels [O] slice size in output channel dimension
134
+ *
135
+ * @return Tf Lite status code
136
+ */
137
+ TfLiteStatus arc_scratch_buffer_calc_slice_size_weights(
138
+ const MliTensorInterface* weights, const MliTensorInterface* bias,
139
+ const int weight_out_ch_dimension, int* slice_channels);
140
+
141
+ } // namespace micro
142
+ } // namespace ops
143
+ } // namespace tflite_micro
144
+
145
+ #endif // TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUF_MGR_H_
@@ -0,0 +1,78 @@
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUFFERS_H_
17
+ #define TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUFFERS_H_
18
+
19
+ #include "mli_api.h" // NOLINT
20
+ #include "tensorflow/lite/c/common.h"
21
+
22
+ namespace tflite_micro {
23
+ namespace ops {
24
+ namespace micro {
25
+
26
+ void init_arc_scratch_buffers(void);
27
+ void* get_arc_scratch_buffer(int size); // Function to assign fast memory
28
+ // from one of 3 scratch buffers.
29
+
30
+ void get_arc_scratch_buffer_max_size(int* size);
31
+ void get_arc_scratch_buffer_two_max_sizes(int* size1, int* size2);
32
+
33
+ static inline bool inside_arc_dccm(void* p) {
34
+ #if core_config_dccm_present
35
+ return ((unsigned)p >= core_config_dccm_base) &&
36
+ ((unsigned)p < core_config_dccm_base + core_config_dccm_size);
37
+ #else
38
+ return false;
39
+ #endif
40
+ }
41
+
42
+ static inline bool inside_arc_xccm(void* p) {
43
+ #if core_config_xy
44
+ return ((unsigned)p >= core_config_xy_x_base) &&
45
+ ((unsigned)p < core_config_xy_x_base + core_config_xy_size);
46
+ #else
47
+ return false;
48
+ #endif
49
+ }
50
+
51
+ static inline bool inside_arc_yccm(void* p) {
52
+ #if core_config_xy_size
53
+ return ((unsigned)p >= core_config_xy_y_base) &&
54
+ ((unsigned)p < core_config_xy_y_base + core_config_xy_size);
55
+ #else
56
+ return false;
57
+ #endif
58
+ }
59
+
60
+ static inline bool inside_arc_vccm(void* p) {
61
+ #if core_config_vec_mem_size
62
+ return ((unsigned)p >= core_config_vec_mem_base) &&
63
+ ((unsigned)p < core_config_vec_mem_base + core_config_vec_mem_size);
64
+ #else
65
+ return false;
66
+ #endif
67
+ }
68
+
69
+ static inline bool inside_arc_ccm(void* p) {
70
+ return inside_arc_dccm(p) || inside_arc_xccm(p) || inside_arc_yccm(p) ||
71
+ inside_arc_vccm(p);
72
+ }
73
+
74
+ } // namespace micro
75
+ } // namespace ops
76
+ } // namespace tflite_micro
77
+
78
+ #endif // TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUFFERS_H_
@@ -0,0 +1,24 @@
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_CEVA_CEVA_COMMON_H_
17
+ #define TENSORFLOW_LITE_MICRO_KERNELS_CEVA_CEVA_COMMON_H_
18
+
19
+ #if defined(CEVA_BX1) || defined(CEVA_SP500)
20
+ extern int32_t CEVA_TFLM_KERNELS_SCRATCH[];
21
+ extern int32_t CEVA_TFLM_KERNELS_SCRATCH_SIZE_VAL;
22
+ #endif
23
+
24
+ #endif