xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. xmos_ai_tools/__init__.py +7 -0
  2. xmos_ai_tools/io_server/__init__.py +151 -0
  3. xmos_ai_tools/runtime/__init__.py +0 -0
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
  5. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
  6. xmos_ai_tools/runtime/include/flash_server.h +74 -0
  7. xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
  8. xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
  9. xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
  10. xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
  11. xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
  12. xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
  13. xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
  14. xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
  15. xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
  16. xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
  17. xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
  18. xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
  19. xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
  20. xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
  21. xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
  22. xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
  23. xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
  24. xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
  25. xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
  26. xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
  27. xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
  28. xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
  29. xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
  30. xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
  31. xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
  32. xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
  33. xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
  34. xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
  35. xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
  36. xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
  37. xmos_ai_tools/runtime/include/ioserver.h +44 -0
  38. xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
  39. xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
  40. xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
  41. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
  42. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
  43. xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
  44. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
  45. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
  46. xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
  47. xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
  48. xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
  49. xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
  50. xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
  51. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
  52. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
  53. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
  54. xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
  55. xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
  56. xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
  57. xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
  58. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
  59. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
  60. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
  61. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
  62. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
  63. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
  64. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
  65. xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
  66. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  67. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  68. xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
  69. xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
  70. xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
  71. xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
  72. xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
  73. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
  74. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
  75. xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
  76. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
  77. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
  78. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
  79. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
  80. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
  81. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
  82. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
  83. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
  84. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
  85. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
  86. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
  87. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
  88. xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
  89. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
  90. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
  91. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
  92. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
  93. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
  94. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
  95. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
  96. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
  97. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
  98. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
  99. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
  100. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
  101. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
  102. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
  103. xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
  104. xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
  105. xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
  106. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
  107. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
  108. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
  109. xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
  110. xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
  111. xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
  112. xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
  113. xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
  114. xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
  115. xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
  116. xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
  117. xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
  118. xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
  119. xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
  120. xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
  121. xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
  122. xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
  123. xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
  124. xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
  125. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
  126. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
  127. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
  128. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
  129. xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
  130. xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
  131. xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
  132. xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
  133. xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
  134. xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
  135. xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
  136. xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
  137. xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
  138. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
  139. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
  140. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
  141. xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
  142. xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
  143. xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
  144. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
  145. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
  146. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
  147. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
  148. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
  149. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
  150. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
  151. xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
  152. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
  153. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
  154. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
  155. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
  156. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
  157. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
  158. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
  159. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
  160. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
  161. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
  162. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
  163. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
  164. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
  165. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
  166. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
  167. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
  168. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
  169. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
  170. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
  171. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
  172. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
  173. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
  174. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
  175. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
  176. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
  177. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
  178. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
  179. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
  180. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
  181. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
  182. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
  183. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
  184. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
  185. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
  186. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
  187. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
  188. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
  189. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
  190. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
  191. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
  192. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
  193. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
  194. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
  195. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
  196. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
  197. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
  198. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
  199. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
  200. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
  201. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
  202. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
  203. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
  204. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
  205. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
  206. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
  207. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
  208. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
  209. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
  210. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
  211. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
  212. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
  213. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
  214. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
  215. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
  216. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
  217. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
  218. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
  219. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
  220. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
  221. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
  222. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
  223. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
  224. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
  225. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
  226. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
  227. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
  228. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
  229. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
  230. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
  231. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
  232. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
  233. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
  234. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
  235. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
  236. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
  237. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
  238. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
  239. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
  240. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
  241. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
  242. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
  243. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
  244. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
  245. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
  246. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
  247. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
  248. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
  249. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
  250. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
  251. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
  252. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
  253. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
  254. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
  255. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
  256. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
  257. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
  258. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
  259. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
  260. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
  261. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
  262. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
  263. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
  264. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
  265. xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
  266. xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
  267. xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
  268. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
  269. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
  270. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
  271. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
  272. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
  273. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
  274. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
  275. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
  276. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
  277. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
  278. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
  279. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
  280. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
  281. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
  282. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
  283. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
  284. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
  285. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
  286. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
  287. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
  288. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
  289. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
  290. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
  291. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
  292. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
  293. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
  294. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
  295. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
  296. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
  297. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
  298. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
  299. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
  300. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
  301. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
  302. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
  303. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
  304. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
  305. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
  306. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
  307. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
  308. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
  309. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
  310. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
  311. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
  312. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
  313. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
  314. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
  315. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
  316. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
  317. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
  318. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
  319. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
  320. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
  321. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
  322. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
  323. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
  324. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
  325. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
  326. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
  327. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
  328. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
  329. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
  330. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
  331. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
  332. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
  333. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
  334. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
  335. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
  336. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
  337. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
  338. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
  339. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
  340. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
  341. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
  342. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
  343. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
  344. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
  345. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
  346. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
  347. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
  348. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
  349. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
  350. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
  351. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
  352. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
  353. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
  354. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
  355. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
  356. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
  357. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
  358. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
  359. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
  360. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
  361. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
  362. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
  363. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
  364. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
  365. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
  366. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
  367. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
  368. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
  369. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
  370. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
  371. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
  372. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
  373. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
  374. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
  375. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
  376. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
  377. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
  378. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
  379. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
  380. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
  381. xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
  382. xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
  383. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  384. xmos_ai_tools/xformer/__init__.py +60 -0
  385. xmos_ai_tools/xformer/flash.py +190 -0
  386. xmos_ai_tools/xinterpreters/__init__.py +1 -0
  387. xmos_ai_tools/xinterpreters/exceptions.py +38 -0
  388. xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
  389. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
  390. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
  391. xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
  392. xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
  393. xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
  394. xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
  395. xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,150 @@
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_
16
+ #define TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_
17
+
18
+ #include "tensorflow/lite/c/builtin_op_data.h"
19
+ #include "tensorflow/lite/kernels/internal/types.h"
20
+
21
+ namespace tflite_micro {
22
+
23
+ // Input Tensors of size {n_batch, n_input}
24
+ constexpr int kLstmInputTensor = 0;
25
+
26
+ // Input weight tensors of size: {n_cell, n_input}
27
+ constexpr int kLstmInputToInputWeightsTensor = 1; // Optional
28
+ constexpr int kLstmInputToForgetWeightsTensor = 2;
29
+ constexpr int kLstmInputToCellWeightsTensor = 3;
30
+ constexpr int kLstmInputToOutputWeightsTensor = 4;
31
+
32
+ // Recurrent weight tensors of size {n_cell, n_output}
33
+ constexpr int kLstmRecurrentToInputWeightsTensor = 5; // Optional
34
+ constexpr int kLstmRecurrentToForgetWeightsTensor = 6;
35
+ constexpr int kLstmRecurrentToCellWeightsTensor = 7;
36
+ constexpr int kLstmRecurrentToOutputWeightsTensor = 8;
37
+
38
+ // Peephole weights tensors of size {n_cell}, representing a diagonal matrix.
39
+ constexpr int kLstmCellToInputWeightsTensor = 9; // Optional
40
+ constexpr int kLstmCellToForgetWeightsTensor = 10; // Optional
41
+ constexpr int kLstmCellToOutputWeightsTensor = 11; // Optional
42
+
43
+ // Gates bias tensors of size {n_cell}
44
+ constexpr int kLstmInputGateBiasTensor = 12; // Optional
45
+ constexpr int kLstmForgetGateBiasTensor = 13;
46
+ constexpr int kLstmCellGateBiasTensor = 14;
47
+ constexpr int kLstmOutputGateBiasTensor = 15;
48
+
49
+ // Projection weight tensor of size {n_output, n_cell}
50
+ constexpr int kLstmProjectionWeightsTensor = 16; // Optional
51
+ // Projection bias tensor of size {n_output}
52
+ constexpr int kLstmProjectionBiasTensor = 17; // Optional
53
+
54
+ // These state tensors are defined as variable tensors, and will be modified by
55
+ // this op.
56
+ constexpr int kLstmOutputStateTensor = 18;
57
+ constexpr int kLstmCellStateTensor = 19;
58
+
59
+ // Layer norm coefficient tensors of size {n_cell}, representing a diagonal
60
+ // matrix.
61
+ constexpr int kLstmInputLayerNormCoefficientsTensor = 20; // Optional
62
+ constexpr int kLstmForgetLayerNormCoefficientsTensor = 21; // Optional
63
+ constexpr int kLstmCellLayerNormCoefficientsTensor = 22; // Optional
64
+ constexpr int kLstmOutputLayerNormCoefficientsTensor = 23; // Optional
65
+
66
+ // Output tensors.
67
+ constexpr int kLstmOutputTensor = 0;
68
+
69
+ // Parameters for the two fully conncted computation inside each gate
70
+ struct GateParameters {
71
+ FullyConnectedParams input_fc_params;
72
+ FullyConnectedParams recurrent_fc_params;
73
+ };
74
+
75
+ // Paramaters for the element wise multiplications between gate outputs
76
+ struct InterGateParameters {
77
+ ArithmeticParams forget_cell_mul_params;
78
+ ArithmeticParams input_mul_params;
79
+ ArithmeticParams output_mul_params;
80
+ };
81
+
82
+ // Size information about the LSTM kernel, which is deduced from tensors stored
83
+ // in the flat buffer file.
84
+ struct LstmSizeInfo {
85
+ bool time_major;
86
+ int batch_size;
87
+ int time_steps;
88
+ int input_dimension;
89
+ int state_dimension;
90
+ };
91
+
92
+ // Contains information about the cell state tensor
93
+ struct CellStateInfo {
94
+ float cell_clip;
95
+ // clipping range for cell state only 16 bits cell is supported (could be
96
+ // generalized through templatation)
97
+ int16_t quantized_cell_clip;
98
+ // 2^-cell_state_scale_power = cell state scale, required by integer tanh
99
+ // computation
100
+ int32_t cell_state_scale_power;
101
+ };
102
+
103
+ // Contains required computation information for LSTM kernel evaluation.
104
+ // Specifically, it includes shape and quantization settings for the LSTM
105
+ // internal operations. Formatted to support operations defined in the
106
+ // tensorflow/lite/kernels/internal/reference/integer_ops
107
+ // Should be constructed during the preparation phase
108
+ struct OpDataLSTM {
109
+ LstmSizeInfo size_info;
110
+ CellStateInfo cell_state_info;
111
+ TfLiteFusedActivation cell_gate_nonlinear_type;
112
+ GateParameters forget_gate_parameters;
113
+ GateParameters input_gate_parameters;
114
+ GateParameters cell_gate_parameters;
115
+ GateParameters output_gate_parameters;
116
+ InterGateParameters inter_gate_parameters;
117
+ int buffer_indices[4]; // TFLM only
118
+ };
119
+
120
+ // Provide an interface to access the internal tensors and buffers used for LSTM
121
+ // invocation. Constructed during the invocation phase
122
+ struct LSTMKernelContents {
123
+ public:
124
+ // Internal tensors, fixed (const). see lstm_shared.h for tensor names
125
+ const TfLiteEvalTensor* GetInternalTensor(const int tensor_index) const {
126
+ return internal_tensors[tensor_index];
127
+ }
128
+ // Variable tensors (will be changed, can not be const)
129
+ TfLiteEvalTensor* HiddenStateTensor() {
130
+ return internal_tensors[kLstmOutputStateTensor];
131
+ }
132
+ TfLiteEvalTensor* CellStateTensor() {
133
+ return internal_tensors[kLstmCellStateTensor];
134
+ }
135
+ // Node internal tensors with indexes defined at the beginning of the file
136
+ TfLiteEvalTensor* internal_tensors[24];
137
+ TfLiteEvalTensor* output_tensor;
138
+ };
139
+
140
+ template <typename CellType>
141
+ struct LSTMBuffers {
142
+ // TFLM buffers requires buffer index from LstmOpData.
143
+ CellType* buffer0;
144
+ CellType* buffer1;
145
+ CellType* buffer2;
146
+ CellType* buffer3;
147
+ };
148
+
149
+ } // namespace tflite_micro
150
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_
@@ -0,0 +1,158 @@
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_
16
+ #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_
17
+
18
+ #include "signal/micro/kernels/irfft.h"
19
+ #include "signal/micro/kernels/rfft.h"
20
+ #include "tensorflow/lite/c/common.h"
21
+
22
+ // Forward declaration of all micro op kernel registration methods. These
23
+ // registrations are included with the standard `BuiltinOpResolver`.
24
+ //
25
+ // This header is particularly useful in cases where only a subset of ops are
26
+ // needed. In such cases, the client can selectively add only the registrations
27
+ // their model requires, using a custom `(Micro)MutableOpResolver`. Selective
28
+ // registration in turn allows the linker to strip unused kernels.
29
+
30
+ namespace tflite_micro {
31
+
32
+ // TFLM is incrementally moving towards a flat tflite namespace
33
+ // (https://abseil.io/tips/130). Any new ops (or cleanup of existing ops should
34
+ // have their Register function declarations in the tflite namespace.
35
+
36
+ TFLMRegistration Register_ABS();
37
+ TFLMRegistration Register_ADD();
38
+ TFLMRegistration Register_ADD_N();
39
+ TFLMRegistration Register_ARG_MAX();
40
+ TFLMRegistration Register_ARG_MIN();
41
+ TFLMRegistration Register_ASSIGN_VARIABLE();
42
+ TFLMRegistration Register_AVERAGE_POOL_2D();
43
+ TFLMRegistration Register_BATCH_MATMUL();
44
+ TFLMRegistration Register_BATCH_TO_SPACE_ND();
45
+ TFLMRegistration Register_BROADCAST_ARGS();
46
+ TFLMRegistration Register_BROADCAST_TO();
47
+ TFLMRegistration Register_CALL_ONCE();
48
+ TFLMRegistration Register_CAST();
49
+ TFLMRegistration Register_CEIL();
50
+ // TODO(b/160234179): Change custom OPs to also return by value.
51
+ TFLMRegistration* Register_CIRCULAR_BUFFER();
52
+ TFLMRegistration Register_CONCATENATION();
53
+ TFLMRegistration Register_CONV_2D();
54
+ TFLMRegistration Register_COS();
55
+ TFLMRegistration Register_CUMSUM();
56
+ TFLMRegistration Register_DEPTH_TO_SPACE();
57
+ TFLMRegistration Register_DEPTHWISE_CONV_2D();
58
+ TFLMRegistration Register_DEQUANTIZE();
59
+ TFLMRegistration Register_DIV();
60
+ TFLMRegistration Register_ELU();
61
+ TFLMRegistration Register_EMBEDDING_LOOKUP();
62
+ TFLMRegistration Register_EQUAL();
63
+ TFLMRegistration* Register_ETHOSU();
64
+ TFLMRegistration Register_EXP();
65
+ TFLMRegistration Register_EXPAND_DIMS();
66
+ TFLMRegistration Register_FILL();
67
+ TFLMRegistration Register_FLOOR();
68
+ TFLMRegistration Register_FLOOR_DIV();
69
+ TFLMRegistration Register_FLOOR_MOD();
70
+ TFLMRegistration Register_FULLY_CONNECTED();
71
+ TFLMRegistration Register_GATHER();
72
+ TFLMRegistration Register_GATHER_ND();
73
+ TFLMRegistration Register_GREATER();
74
+ TFLMRegistration Register_GREATER_EQUAL();
75
+ TFLMRegistration Register_HARD_SWISH();
76
+ TFLMRegistration Register_IF();
77
+ TFLMRegistration Register_L2_NORMALIZATION();
78
+ TFLMRegistration Register_L2_POOL_2D();
79
+ TFLMRegistration Register_LEAKY_RELU();
80
+ TFLMRegistration Register_LESS();
81
+ TFLMRegistration Register_LESS_EQUAL();
82
+ TFLMRegistration Register_LOG();
83
+ TFLMRegistration Register_LOG_SOFTMAX();
84
+ TFLMRegistration Register_LOGICAL_AND();
85
+ TFLMRegistration Register_LOGICAL_NOT();
86
+ TFLMRegistration Register_LOGICAL_OR();
87
+ TFLMRegistration Register_LOGISTIC();
88
+ TFLMRegistration Register_MAX_POOL_2D();
89
+ TFLMRegistration Register_MAXIMUM();
90
+ TFLMRegistration Register_MEAN();
91
+ TFLMRegistration Register_MINIMUM();
92
+ TFLMRegistration Register_MIRROR_PAD();
93
+ TFLMRegistration Register_MUL();
94
+ TFLMRegistration Register_NEG();
95
+ TFLMRegistration Register_NOT_EQUAL();
96
+ TFLMRegistration Register_PACK();
97
+ TFLMRegistration Register_PAD();
98
+ TFLMRegistration Register_PADV2();
99
+ TFLMRegistration Register_PRELU();
100
+ TFLMRegistration Register_QUANTIZE();
101
+ TFLMRegistration Register_READ_VARIABLE();
102
+ TFLMRegistration Register_REDUCE_MAX();
103
+ TFLMRegistration Register_RELU();
104
+ TFLMRegistration Register_RELU6();
105
+ TFLMRegistration Register_RESHAPE();
106
+ TFLMRegistration Register_RESIZE_BILINEAR();
107
+ TFLMRegistration Register_RESIZE_NEAREST_NEIGHBOR();
108
+ TFLMRegistration Register_ROUND();
109
+ TFLMRegistration Register_RSQRT();
110
+ TFLMRegistration Register_SELECT_V2();
111
+ TFLMRegistration Register_SHAPE();
112
+ TFLMRegistration Register_SIN();
113
+ TFLMRegistration Register_SLICE();
114
+ TFLMRegistration Register_SOFTMAX();
115
+ TFLMRegistration Register_SPACE_TO_BATCH_ND();
116
+ TFLMRegistration Register_SPACE_TO_DEPTH();
117
+ TFLMRegistration Register_SPLIT();
118
+ TFLMRegistration Register_SPLIT_V();
119
+ TFLMRegistration Register_SQRT();
120
+ TFLMRegistration Register_SQUARE();
121
+ TFLMRegistration Register_SQUARED_DIFFERENCE();
122
+ TFLMRegistration Register_SQUEEZE();
123
+ TFLMRegistration Register_STRIDED_SLICE();
124
+ TFLMRegistration Register_SUB();
125
+ TFLMRegistration Register_SUM();
126
+ TFLMRegistration Register_SVDF();
127
+ TFLMRegistration Register_TANH();
128
+ TFLMRegistration Register_TRANSPOSE();
129
+ TFLMRegistration Register_TRANSPOSE_CONV();
130
+ // TODO(b/230666079): resolve conflict with xtensa implementation
131
+ TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
132
+ TFLMRegistration Register_UNPACK();
133
+ TFLMRegistration Register_VAR_HANDLE();
134
+ TFLMRegistration Register_WHILE();
135
+ TFLMRegistration Register_ZEROS_LIKE();
136
+
137
+ // TODO(b/295174388): Add the rest of inference only registration functions.
138
+ TFLMInferenceRegistration RegisterInference_FULLY_CONNECTED();
139
+
140
+ // TODO(b/160234179): Change custom OPs to also return by value.
141
+ namespace tflm_signal {
142
+ TFLMRegistration* Register_DELAY();
143
+ TFLMRegistration* Register_FFT_AUTO_SCALE();
144
+ TFLMRegistration* Register_FILTER_BANK();
145
+ TFLMRegistration* Register_FILTER_BANK_LOG();
146
+ TFLMRegistration* Register_FILTER_BANK_SPECTRAL_SUBTRACTION();
147
+ TFLMRegistration* Register_FILTER_BANK_SQUARE_ROOT();
148
+ TFLMRegistration* Register_ENERGY();
149
+ TFLMRegistration* Register_FRAMER();
150
+ TFLMRegistration* Register_OVERLAP_ADD();
151
+ TFLMRegistration* Register_PCAN();
152
+ TFLMRegistration* Register_STACKER();
153
+ TFLMRegistration* Register_WINDOW();
154
+ } // namespace tflm_signal
155
+
156
+ } // namespace tflite_micro
157
+
158
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_
@@ -0,0 +1,56 @@
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ // This file and the associated .cc file is branched from
17
+ // tensorflow/lite/kernels/internal/reference/portable_tensor_utils*
18
+ // TFLM needs to create its own because the original files are coupled with
19
+ // the tensor_utils module, which we cannot reuse due to its use of the
20
+ // Eigen library.
21
+
22
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_
23
+ #define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_
24
+
25
+ #include <algorithm>
26
+ #include <cmath>
27
+ #include <cstdint>
28
+
29
+ #include "tensorflow/lite/c/builtin_op_data.h"
30
+ #include "tensorflow/lite/c/common.h"
31
+ #include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
32
+
33
+ #if defined(_MSC_VER)
34
+ #define __restrict__ __restrict
35
+ #endif
36
+
37
+ namespace tflite_micro {
38
+
39
+ // Not all backends support CpuBackendContext usage, so forward declare to avoid
40
+ // pulling in its implementation.
41
+ // TODO(b/230666277): consider removing this since micro does not utilize it
42
+ class CpuBackendContext;
43
+
44
+ // Apply sigmoid to elements of a vector.
45
+ void PortableApplySigmoidToVector(const float* vector, int v_size,
46
+ float* result);
47
+ // Apply tanh to elements of a vector
48
+ void PortableApplyTanhToVector(const float* vector, int v_size, float* result);
49
+ // Apply appropriate activation function to elements of a vector.
50
+ void PortableApplyActivationToVector(const float* vector, int v_size,
51
+ TfLiteFusedActivation activation,
52
+ float* result);
53
+
54
+ } // namespace tflite_micro
55
+
56
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_
@@ -0,0 +1,74 @@
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_
17
+ #define TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_
18
+
19
+ #include <cstdint>
20
+
21
+ #include "tensorflow/lite/c/builtin_op_data.h"
22
+ #include "tensorflow/lite/micro/micro_common.h"
23
+
24
+ namespace tflite_micro {
25
+
26
+ extern const int kMulInput1Tensor;
27
+ extern const int kMulInput2Tensor;
28
+ extern const int kMulOutputTensor;
29
+
30
+ struct OpDataMul {
31
+ int32_t input1_zero_point;
32
+ int32_t input2_zero_point;
33
+
34
+ int32_t output_activation_min;
35
+ int32_t output_activation_max;
36
+ int32_t output_zero_point;
37
+ int32_t output_multiplier;
38
+ int output_shift;
39
+
40
+ float output_activation_min_f32;
41
+ float output_activation_max_f32;
42
+ };
43
+
44
+ void* MulInit(TfLiteContext* context, const char* buffer, size_t length);
45
+
46
+ TfLiteStatus CalculateOpDataMul(TfLiteContext* context, TfLiteNode* node,
47
+ TfLiteMulParams* params, OpDataMul* data);
48
+
49
+ TfLiteStatus MulPrepare(TfLiteContext* context, TfLiteNode* node);
50
+
51
+ TfLiteStatus EvalMulQuantizedReference(TfLiteContext* context, TfLiteNode* node,
52
+ const OpDataMul* data,
53
+ const TfLiteEvalTensor* input1,
54
+ const TfLiteEvalTensor* input2,
55
+ TfLiteEvalTensor* output);
56
+
57
+ void EvalMulFloatReference(TfLiteContext* context, TfLiteNode* node,
58
+ TfLiteMulParams* params, const OpDataMul* data,
59
+ const TfLiteEvalTensor* input1,
60
+ const TfLiteEvalTensor* input2,
61
+ TfLiteEvalTensor* output);
62
+
63
+ // Generic must define registration function.
64
+ TFLMRegistration Register_MUL();
65
+
66
+ #if defined(CMSIS_NN)
67
+ TFLMRegistration Register_MUL_INT8();
68
+ #else
69
+ // Fallback registration
70
+ inline TFLMRegistration Register_MUL_INT8() { return Register_MUL(); }
71
+ #endif
72
+ } // namespace tflite_micro
73
+
74
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_
@@ -0,0 +1,27 @@
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_
17
+ #define TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_
18
+
19
+ #include "tensorflow/lite/c/common.h"
20
+
21
+ namespace tflite_micro {
22
+
23
+ TfLiteStatus PadPrepare(TfLiteContext* context, TfLiteNode* node);
24
+
25
+ } // namespace tflite_micro
26
+
27
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_
@@ -0,0 +1,142 @@
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_
17
+ #define TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_
18
+
19
+ #include <cstdint>
20
+
21
+ #include "tensorflow/lite/c/builtin_op_data.h"
22
+ #include "tensorflow/lite/c/common.h"
23
+ #include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h"
24
+ #include "tensorflow/lite/kernels/internal/reference/pooling.h"
25
+ #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
26
+ #include "tensorflow/lite/kernels/kernel_util.h"
27
+ #include "tensorflow/lite/kernels/padding.h"
28
+ #include "tensorflow/lite/micro/kernels/kernel_util.h"
29
+ #include "tensorflow/lite/micro/kernels/micro_ops.h"
30
+ #include "tensorflow/lite/micro/micro_log.h"
31
+
32
+ namespace tflite_micro {
33
+
34
+ extern const int kPoolingInputTensor;
35
+ extern const int kPoolingOutputTensor;
36
+
37
+ struct OpDataPooling {
38
+ TfLitePaddingValues padding;
39
+ int32_t activation_min;
40
+ int32_t activation_max;
41
+ float activation_min_f32;
42
+ float activation_max_f32;
43
+ };
44
+
45
+ TfLiteStatus CalculateOpDataPooling(const TfLiteContext* context,
46
+ const TfLitePoolParams* params,
47
+ const TfLiteTensor* input,
48
+ const TfLiteTensor* output,
49
+ OpDataPooling* data);
50
+
51
+ TfLiteStatus PoolingPrepare(TfLiteContext* context, TfLiteNode* node);
52
+
53
+ void AveragePoolingEvalFloat(const TfLiteContext* context,
54
+ const TfLiteNode* node,
55
+ const TfLitePoolParams* params,
56
+ const OpDataPooling* data,
57
+ const TfLiteEvalTensor* input,
58
+ TfLiteEvalTensor* output);
59
+
60
+ template <typename T>
61
+ void AveragePoolingEvalQuantized(TfLiteContext* context, const TfLiteNode* node,
62
+ const TfLitePoolParams* params,
63
+ const OpDataPooling* data,
64
+ const TfLiteEvalTensor* input,
65
+ TfLiteEvalTensor* output) {
66
+ TFLITE_DCHECK(input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
67
+
68
+ PoolParams op_params;
69
+ op_params.stride_height = params->stride_height;
70
+ op_params.stride_width = params->stride_width;
71
+ op_params.filter_height = params->filter_height;
72
+ op_params.filter_width = params->filter_width;
73
+ op_params.padding_values.height = data->padding.height;
74
+ op_params.padding_values.width = data->padding.width;
75
+ op_params.quantized_activation_min = data->activation_min;
76
+ op_params.quantized_activation_max = data->activation_max;
77
+
78
+ reference_integer_ops::AveragePool(op_params,
79
+ tflite_micro::micro::GetTensorShape(input),
80
+ tflite_micro::micro::GetTensorData<T>(input),
81
+ tflite_micro::micro::GetTensorShape(output),
82
+ tflite_micro::micro::GetTensorData<T>(output));
83
+ }
84
+
85
+ void MaxPoolingEvalFloat(TfLiteContext* context, TfLiteNode* node,
86
+ TfLitePoolParams* params, const OpDataPooling* data,
87
+ const TfLiteEvalTensor* input,
88
+ TfLiteEvalTensor* output);
89
+
90
+ template <typename T>
91
+ void MaxPoolingEvalQuantized(TfLiteContext* context, TfLiteNode* node,
92
+ TfLitePoolParams* params,
93
+ const OpDataPooling* data,
94
+ const TfLiteEvalTensor* input,
95
+ TfLiteEvalTensor* output) {
96
+ TFLITE_DCHECK(input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
97
+
98
+ tflite_micro::PoolParams op_params;
99
+ op_params.stride_height = params->stride_height;
100
+ op_params.stride_width = params->stride_width;
101
+ op_params.filter_height = params->filter_height;
102
+ op_params.filter_width = params->filter_width;
103
+ op_params.padding_values.height = data->padding.height;
104
+ op_params.padding_values.width = data->padding.width;
105
+ op_params.quantized_activation_min = data->activation_min;
106
+ op_params.quantized_activation_max = data->activation_max;
107
+
108
+ reference_integer_ops::MaxPool(op_params,
109
+ tflite_micro::micro::GetTensorShape(input),
110
+ tflite_micro::micro::GetTensorData<T>(input),
111
+ tflite_micro::micro::GetTensorShape(output),
112
+ tflite_micro::micro::GetTensorData<T>(output));
113
+ }
114
+
115
+ #if defined(CMSIS_NN) || defined(XTENSA)
116
+ TFLMRegistration Register_AVERAGE_POOL_2D_INT8();
117
+
118
+ TFLMRegistration Register_MAX_POOL_2D_INT8();
119
+
120
+ TFLMRegistration Register_AVERAGE_POOL_2D_INT16();
121
+
122
+ TFLMRegistration Register_MAX_POOL_2D_INT16();
123
+ #else
124
+ inline TFLMRegistration Register_AVERAGE_POOL_2D_INT8() {
125
+ return tflite_micro::Register_AVERAGE_POOL_2D();
126
+ }
127
+
128
+ inline TFLMRegistration Register_MAX_POOL_2D_INT8() {
129
+ return tflite_micro::Register_MAX_POOL_2D();
130
+ }
131
+
132
+ inline TFLMRegistration Register_AVERAGE_POOL_2D_INT16() {
133
+ return tflite_micro::Register_AVERAGE_POOL_2D();
134
+ }
135
+
136
+ inline TFLMRegistration Register_MAX_POOL_2D_INT16() {
137
+ return tflite_micro::Register_MAX_POOL_2D();
138
+ }
139
+ #endif
140
+ } // namespace tflite_micro
141
+
142
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_
@@ -0,0 +1,39 @@
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_
17
+ #define TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_
18
+
19
+ #include "tensorflow/lite/c/common.h"
20
+ #include "tensorflow/lite/kernels/internal/types.h"
21
+
22
+ namespace tflite_micro {
23
+
24
+ TfLiteStatus CalculatePreluParams(const TfLiteTensor* input,
25
+ const TfLiteTensor* alpha,
26
+ TfLiteTensor* output, PreluParams* params);
27
+
28
+ void BroadcastPrelu4DSlowFloat(const RuntimeShape& unextended_input1_shape,
29
+ const float* input1_data,
30
+ const RuntimeShape& unextended_input2_shape,
31
+ const float* input2_data,
32
+ const RuntimeShape& unextended_output_shape,
33
+ float* output_data);
34
+
35
+ TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node);
36
+
37
+ } // namespace tflite_micro
38
+
39
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_
@@ -0,0 +1,37 @@
1
+ /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_
16
+ #define TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_
17
+
18
+ #include "tensorflow/lite/c/common.h"
19
+ #include "tensorflow/lite/kernels/internal/types.h"
20
+
21
+ namespace tflite_micro {
22
+
23
+ struct OpDataQuantizeReference {
24
+ tflite_micro::QuantizationParams quantization_params;
25
+ // The scaling factor from input to output (aka the 'real multiplier') can
26
+ // be represented as a fixed point multiplier plus a left shift.
27
+ int32_t requantize_output_multiplier;
28
+ int requantize_output_shift;
29
+
30
+ int32_t input_zero_point;
31
+ };
32
+
33
+ TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node);
34
+ TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, TfLiteNode* node);
35
+ } // namespace tflite_micro
36
+
37
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_