xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. xmos_ai_tools/__init__.py +7 -0
  2. xmos_ai_tools/io_server/__init__.py +151 -0
  3. xmos_ai_tools/runtime/__init__.py +0 -0
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
  5. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
  6. xmos_ai_tools/runtime/include/flash_server.h +74 -0
  7. xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
  8. xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
  9. xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
  10. xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
  11. xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
  12. xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
  13. xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
  14. xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
  15. xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
  16. xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
  17. xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
  18. xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
  19. xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
  20. xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
  21. xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
  22. xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
  23. xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
  24. xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
  25. xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
  26. xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
  27. xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
  28. xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
  29. xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
  30. xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
  31. xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
  32. xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
  33. xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
  34. xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
  35. xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
  36. xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
  37. xmos_ai_tools/runtime/include/ioserver.h +44 -0
  38. xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
  39. xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
  40. xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
  41. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
  42. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
  43. xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
  44. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
  45. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
  46. xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
  47. xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
  48. xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
  49. xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
  50. xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
  51. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
  52. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
  53. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
  54. xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
  55. xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
  56. xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
  57. xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
  58. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
  59. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
  60. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
  61. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
  62. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
  63. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
  64. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
  65. xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
  66. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  67. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  68. xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
  69. xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
  70. xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
  71. xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
  72. xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
  73. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
  74. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
  75. xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
  76. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
  77. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
  78. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
  79. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
  80. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
  81. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
  82. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
  83. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
  84. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
  85. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
  86. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
  87. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
  88. xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
  89. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
  90. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
  91. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
  92. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
  93. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
  94. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
  95. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
  96. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
  97. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
  98. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
  99. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
  100. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
  101. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
  102. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
  103. xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
  104. xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
  105. xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
  106. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
  107. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
  108. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
  109. xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
  110. xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
  111. xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
  112. xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
  113. xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
  114. xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
  115. xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
  116. xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
  117. xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
  118. xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
  119. xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
  120. xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
  121. xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
  122. xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
  123. xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
  124. xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
  125. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
  126. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
  127. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
  128. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
  129. xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
  130. xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
  131. xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
  132. xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
  133. xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
  134. xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
  135. xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
  136. xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
  137. xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
  138. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
  139. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
  140. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
  141. xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
  142. xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
  143. xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
  144. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
  145. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
  146. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
  147. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
  148. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
  149. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
  150. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
  151. xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
  152. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
  153. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
  154. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
  155. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
  156. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
  157. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
  158. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
  159. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
  160. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
  161. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
  162. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
  163. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
  164. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
  165. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
  166. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
  167. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
  168. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
  169. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
  170. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
  171. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
  172. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
  173. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
  174. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
  175. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
  176. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
  177. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
  178. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
  179. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
  180. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
  181. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
  182. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
  183. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
  184. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
  185. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
  186. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
  187. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
  188. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
  189. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
  190. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
  191. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
  192. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
  193. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
  194. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
  195. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
  196. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
  197. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
  198. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
  199. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
  200. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
  201. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
  202. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
  203. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
  204. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
  205. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
  206. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
  207. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
  208. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
  209. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
  210. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
  211. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
  212. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
  213. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
  214. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
  215. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
  216. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
  217. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
  218. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
  219. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
  220. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
  221. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
  222. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
  223. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
  224. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
  225. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
  226. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
  227. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
  228. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
  229. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
  230. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
  231. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
  232. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
  233. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
  234. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
  235. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
  236. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
  237. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
  238. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
  239. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
  240. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
  241. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
  242. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
  243. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
  244. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
  245. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
  246. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
  247. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
  248. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
  249. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
  250. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
  251. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
  252. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
  253. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
  254. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
  255. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
  256. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
  257. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
  258. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
  259. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
  260. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
  261. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
  262. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
  263. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
  264. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
  265. xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
  266. xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
  267. xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
  268. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
  269. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
  270. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
  271. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
  272. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
  273. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
  274. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
  275. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
  276. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
  277. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
  278. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
  279. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
  280. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
  281. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
  282. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
  283. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
  284. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
  285. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
  286. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
  287. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
  288. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
  289. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
  290. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
  291. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
  292. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
  293. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
  294. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
  295. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
  296. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
  297. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
  298. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
  299. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
  300. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
  301. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
  302. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
  303. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
  304. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
  305. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
  306. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
  307. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
  308. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
  309. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
  310. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
  311. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
  312. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
  313. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
  314. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
  315. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
  316. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
  317. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
  318. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
  319. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
  320. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
  321. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
  322. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
  323. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
  324. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
  325. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
  326. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
  327. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
  328. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
  329. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
  330. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
  331. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
  332. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
  333. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
  334. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
  335. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
  336. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
  337. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
  338. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
  339. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
  340. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
  341. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
  342. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
  343. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
  344. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
  345. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
  346. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
  347. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
  348. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
  349. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
  350. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
  351. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
  352. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
  353. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
  354. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
  355. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
  356. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
  357. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
  358. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
  359. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
  360. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
  361. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
  362. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
  363. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
  364. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
  365. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
  366. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
  367. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
  368. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
  369. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
  370. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
  371. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
  372. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
  373. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
  374. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
  375. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
  376. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
  377. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
  378. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
  379. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
  380. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
  381. xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
  382. xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
  383. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  384. xmos_ai_tools/xformer/__init__.py +60 -0
  385. xmos_ai_tools/xformer/flash.py +190 -0
  386. xmos_ai_tools/xinterpreters/__init__.py +1 -0
  387. xmos_ai_tools/xinterpreters/exceptions.py +38 -0
  388. xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
  389. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
  390. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
  391. xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
  392. xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
  393. xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
  394. xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
  395. xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,579 @@
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_TESTDATA_LSTM_TEST_DATA_H_
17
+ #define TENSORFLOW_LITE_MICRO_KERNELS_TESTDATA_LSTM_TEST_DATA_H_
18
+ #include <string>
19
+
20
+ #include "tensorflow/lite/c/common.h"
21
+ #include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
22
+ #include "tensorflow/lite/micro/kernels/lstm_shared.h"
23
+ #include "tensorflow/lite/micro/test_helpers.h"
24
+
25
+ namespace tflite_micro {
26
+ namespace testing {
27
+ // Data structure to store all the data used to check output of internal gates
28
+ // of one time step
29
+ // input_size = batch_size*input_dimension (size of the input array)
30
+ // gate_output_size = batch_size*state_dimension (size of the gate output)
31
+ template <int input_size, int gate_output_size>
32
+ struct GateOutputCheckData {
33
+ float input_data[input_size];
34
+ float hidden_state[gate_output_size];
35
+ float cell_state[gate_output_size];
36
+ float expected_forget_gate_output[gate_output_size];
37
+ float expected_input_gate_output[gate_output_size];
38
+ float expected_output_gate_output[gate_output_size];
39
+ float expected_cell_gate_output[gate_output_size];
40
+ float expected_updated_cell[gate_output_size];
41
+ float expected_updated_hidden[gate_output_size];
42
+ };
43
+
44
+ // Data structure to store all the data used to check the output of the kernel
45
+ // of multiple batch, multiple timesteps
46
+ // input_size = batch_size*time_steps*input_dimension (size of the input array)
47
+ // gate_output_size = batch_size*state_dimension (size of the gate output)
48
+ // output_size = time_steps*gate_output_size (size of the output from the
49
+ // kernel)
50
+ template <int input_size, int gate_output_size, int output_size>
51
+ struct LstmEvalCheckData {
52
+ float input_data[input_size];
53
+ float hidden_state[gate_output_size];
54
+ float expected_output[output_size];
55
+ float expected_hidden_state[gate_output_size];
56
+ float expected_cell_state[gate_output_size];
57
+ };
58
+
59
+ // Struct that holds the weight/bias information for a standard gate (i.e. no
60
+ // modification such as layer normalization, peephole, etc.)
61
+ // Every gate is defined by the type and size of the weights (bias included)
62
+ // inside.
63
+ // Specifically, types are weight type and bias type (normally the same
64
+ // type of MatMul accumulator).
65
+ // activation_weight has shape (hidden state dimension * input tensor dimension)
66
+ // recurrent_weight has shape (hidden state dimension * hidden state dimension)
67
+ // bias has shape (hidden state dimension, 1)
68
+ template <typename WeightType, typename BiasType, int input_dimension,
69
+ int state_dimension>
70
+ struct GateData {
71
+ WeightType activation_weight[state_dimension * input_dimension];
72
+ WeightType recurrent_weight[state_dimension * state_dimension];
73
+ BiasType fused_bias[state_dimension];
74
+ // Quantized model folded the zero point of activations into biases:
75
+ // bias + zero_point * weight.
76
+ // Note: folded bias is only required for the legacy 8x8->16 pass. Therefore
77
+ // the data type is fixed here to avoid compilation errors (the computation of
78
+ // folding does not support other types)
79
+ int32_t activation_zp_folded_bias[state_dimension];
80
+ int32_t recurrent_zp_folded_bias[state_dimension];
81
+ };
82
+
83
+ // A struct that holds quantization parameters for a LSTM Tensor
84
+ struct TensorQuantizationParameters {
85
+ double scale;
86
+ int zero_point;
87
+ bool symmetry;
88
+ };
89
+
90
+ // A struct that holds quantization parameters for an internal gate, which is
91
+ // defined by activation/recurrent weight and bias (assuming no internal layer
92
+ // normalization)
93
+ struct GateQuantizationParameters {
94
+ TensorQuantizationParameters activation_weight;
95
+ TensorQuantizationParameters recurrent_weight;
96
+ TensorQuantizationParameters bias;
97
+ };
98
+
99
+ // A struct that holds the quantization settings for the LSTM node. Data
100
+ // members can be grouped into five parts.
101
+ // 1. Data types (activation,weight, cell, bias)
102
+ // 2. Non-linear activation (i.e., tanh and sigmoid) fixed point
103
+ // calculation settings
104
+ // 3. Input/output tensor quantization settings
105
+ // 4. Internal state (hidden and cell) quantization settings
106
+ // 5. Internal gate (forget, input, cell, output) settings
107
+ struct NodeQuantizationParameters {
108
+ TfLiteType activation_type;
109
+ TfLiteType weight_type;
110
+ TfLiteType cell_type;
111
+ TfLiteType bias_type;
112
+ // Fixed point setting for integer nonlinear activation calculation
113
+ double nonlinear_activation_input_scale;
114
+ double nonlinear_activation_output_scale;
115
+ // Quantization parameters for input/output
116
+ TensorQuantizationParameters input;
117
+ TensorQuantizationParameters output;
118
+ // Quantization parameters for internal states
119
+ TensorQuantizationParameters hidden_state;
120
+ TensorQuantizationParameters cell_state;
121
+ // Quantization parameters for gates
122
+ GateQuantizationParameters forget_gate;
123
+ GateQuantizationParameters input_gate;
124
+ GateQuantizationParameters cell_gate;
125
+ GateQuantizationParameters output_gate;
126
+ };
127
+
128
+ // Data structure that holds all the information to evaluate a LSTM kernel
129
+ // (mimic the LSTM node).
130
+ // Tensor Types:
131
+ // ActivationType defines the data type of input/output of the layer. The hidden
132
+ // state has the ActivationType as well since it is the layer output of the
133
+ // previous time.
134
+ // WeightType defines the weight data type inside the internal gates.
135
+ // BiasType defines the bias data type inside the internal gates. (normally the
136
+ // same type of MatMul accumulator).
137
+ // Tensor Shapes:
138
+ // The input to the layer has shape (batch_size,time_steps,input_dimension).
139
+ // Both the hidden state and cell state has shape (state_dimension, 1)
140
+ // The output of the layer has shape (batch_size,time_steps,state_dimension)
141
+ // Note: state values can change through calls (stateful)
142
+ template <typename ActivationType, typename WeightType, typename BiasType,
143
+ typename CellType, int batch_size, int time_steps,
144
+ int input_dimension, int state_dimension>
145
+ class LstmNodeContent {
146
+ public:
147
+ LstmNodeContent(const LstmNodeContent& other) = default;
148
+ LstmNodeContent& operator=(const LstmNodeContent& other) = default;
149
+ // Use the general model setting (builtin data) and the four gates data to
150
+ // construct the node content. Note the input, hidden state, and cell state
151
+ // data is provided later for flexible testing (initialize as zero now)
152
+ LstmNodeContent(
153
+ const TfLiteUnidirectionalSequenceLSTMParams builtin_data,
154
+ const GateData<WeightType, BiasType, input_dimension, state_dimension>
155
+ forget_gate_params,
156
+ const GateData<WeightType, BiasType, input_dimension, state_dimension>
157
+ input_gate_params,
158
+ const GateData<WeightType, BiasType, input_dimension, state_dimension>
159
+ cell_gate_params,
160
+ const GateData<WeightType, BiasType, input_dimension, state_dimension>
161
+ output_gate_params)
162
+ : builtin_data_(builtin_data),
163
+ forget_gate_data_(forget_gate_params),
164
+ input_gate_data_(input_gate_params),
165
+ cell_gate_data_(cell_gate_params),
166
+ output_gate_data_(output_gate_params) {
167
+ InitializeTensors();
168
+ }
169
+
170
+ // Add quantization parameters (scale, zero point) to tensors
171
+ // Only required for the integer kernel
172
+ void AddQuantizationParameters(
173
+ const NodeQuantizationParameters& quantization_params) {
174
+ quantization_settings_ = quantization_params;
175
+ // Input Tensor
176
+ SetTensorQuantizationParam(kLstmInputTensor, quantization_params.input);
177
+ // Forget Gate Tensors
178
+ const auto& forget_gate_quant_param = quantization_params.forget_gate;
179
+ SetTensorQuantizationParam(kLstmInputToForgetWeightsTensor,
180
+ forget_gate_quant_param.activation_weight);
181
+ SetTensorQuantizationParam(kLstmRecurrentToForgetWeightsTensor,
182
+ forget_gate_quant_param.recurrent_weight);
183
+ SetTensorQuantizationParam(kLstmForgetGateBiasTensor,
184
+ forget_gate_quant_param.bias);
185
+ // Input Gate Tensors
186
+ const auto& input_gate_quant_param = quantization_params.input_gate;
187
+ SetTensorQuantizationParam(kLstmInputToInputWeightsTensor,
188
+ input_gate_quant_param.activation_weight);
189
+ SetTensorQuantizationParam(kLstmRecurrentToInputWeightsTensor,
190
+ input_gate_quant_param.recurrent_weight);
191
+ SetTensorQuantizationParam(kLstmInputGateBiasTensor,
192
+ input_gate_quant_param.bias);
193
+ // Cell Gate Tensors
194
+ const auto& cell_gate_quant_param = quantization_params.cell_gate;
195
+ SetTensorQuantizationParam(kLstmInputToCellWeightsTensor,
196
+ cell_gate_quant_param.activation_weight);
197
+ SetTensorQuantizationParam(kLstmRecurrentToCellWeightsTensor,
198
+ cell_gate_quant_param.recurrent_weight);
199
+ SetTensorQuantizationParam(kLstmCellGateBiasTensor,
200
+ cell_gate_quant_param.bias);
201
+ // Output Gate Tensors
202
+ const auto& output_gate_quant_param = quantization_params.output_gate;
203
+ SetTensorQuantizationParam(kLstmInputToOutputWeightsTensor,
204
+ output_gate_quant_param.activation_weight);
205
+ SetTensorQuantizationParam(kLstmRecurrentToOutputWeightsTensor,
206
+ output_gate_quant_param.recurrent_weight);
207
+ SetTensorQuantizationParam(kLstmOutputGateBiasTensor,
208
+ output_gate_quant_param.bias);
209
+ // State Tensors
210
+ SetTensorQuantizationParam(kLstmOutputStateTensor,
211
+ quantization_params.hidden_state);
212
+ SetTensorQuantizationParam(kLstmCellStateTensor,
213
+ quantization_params.cell_state);
214
+ // Output Tensor
215
+ SetTensorQuantizationParam(24, quantization_params.output);
216
+ }
217
+
218
+ // Provide interface to set the input tensor values for flexible testing
219
+ void SetInputData(const ActivationType* data) {
220
+ std::memcpy(
221
+ input_, data,
222
+ batch_size * input_dimension * time_steps * sizeof(ActivationType));
223
+ SetTensor(kLstmInputTensor, input_, input_size_);
224
+ }
225
+ const ActivationType* GetInputData() const { return input_; }
226
+
227
+ // Provide interface to set the hidden state tensor values for flexible
228
+ // testing
229
+ void SetHiddenStateData(const ActivationType* data) {
230
+ std::memcpy(hidden_state_, data,
231
+ batch_size * state_dimension * sizeof(ActivationType));
232
+ }
233
+ ActivationType* GetHiddenStateData() { return hidden_state_; }
234
+
235
+ // Provide interface to set the cell state tensor values for flexible
236
+ // testing
237
+ void SetCellStateData(const CellType* data) {
238
+ std::memcpy(cell_state_, data,
239
+ batch_size * state_dimension * sizeof(CellType));
240
+ }
241
+ CellType* GetCellStateData() { return cell_state_; }
242
+ ActivationType* GetOutputData() { return output_; }
243
+
244
+ // Internal tensors, see lstm_shared.h for tensor names
245
+ TfLiteEvalTensor* GetEvalTensor(const int tensor_index) {
246
+ auto valid_index = input_tensor_indices_[tensor_index + 1];
247
+ if (valid_index < 0) {
248
+ return nullptr;
249
+ }
250
+ return &eval_tensors_[tensor_index];
251
+ }
252
+
253
+ TfLiteTensor* GetTensors() { return tensors_; }
254
+
255
+ // Required by the kernel runner
256
+ TfLiteIntArray* KernelInputs() {
257
+ return IntArrayFromInts(input_tensor_indices_);
258
+ }
259
+ // Required by the kernel runner
260
+ TfLiteIntArray* KernelOutputs() {
261
+ return IntArrayFromInts(output_tensor_indices_);
262
+ }
263
+
264
+ // Variable tensors (will be changed, can not be const)
265
+ TfLiteEvalTensor* HiddenStateEvalTensor() {
266
+ return &eval_tensors_[kLstmOutputStateTensor];
267
+ }
268
+ TfLiteEvalTensor* CellStateEvalTensor() {
269
+ return &eval_tensors_[kLstmCellStateTensor];
270
+ }
271
+ TfLiteEvalTensor* OutputEvalTensor() { return &eval_tensors_[24]; }
272
+
273
+ const GateData<WeightType, BiasType, input_dimension, state_dimension>&
274
+ ForgetGateData() const {
275
+ return forget_gate_data_;
276
+ }
277
+ const GateData<WeightType, BiasType, input_dimension, state_dimension>&
278
+ InputGateData() const {
279
+ return input_gate_data_;
280
+ }
281
+ const GateData<WeightType, BiasType, input_dimension, state_dimension>&
282
+ CellGateData() const {
283
+ return cell_gate_data_;
284
+ }
285
+ const GateData<WeightType, BiasType, input_dimension, state_dimension>&
286
+ OutputGateData() const {
287
+ return output_gate_data_;
288
+ }
289
+
290
+ const TfLiteUnidirectionalSequenceLSTMParams& BuiltinData() const {
291
+ return builtin_data_;
292
+ }
293
+
294
+ const NodeQuantizationParameters& QuantizationSettings() const {
295
+ return quantization_settings_;
296
+ }
297
+
298
+ private:
299
+ void InitializeTensors() {
300
+ // Invalid all the input tensors untill we set it
301
+ input_tensor_indices_[0] = 24; // tot elements
302
+ for (size_t i = 1; i < 25; i++) {
303
+ input_tensor_indices_[i] = kTfLiteOptionalTensor;
304
+ }
305
+ // Input Tensor
306
+ SetTensor(kLstmInputTensor, input_, input_size_);
307
+ // Forget Gate Tensors
308
+ SetTensor(kLstmInputToForgetWeightsTensor,
309
+ forget_gate_data_.activation_weight, activation_weight_size_);
310
+ SetTensor(kLstmRecurrentToForgetWeightsTensor,
311
+ forget_gate_data_.recurrent_weight, recurrent_weight_size_);
312
+ SetTensor(kLstmForgetGateBiasTensor, forget_gate_data_.fused_bias,
313
+ bias_size_);
314
+ // Input Gate Tensors
315
+ SetTensor(kLstmInputToInputWeightsTensor,
316
+ input_gate_data_.activation_weight, activation_weight_size_);
317
+ SetTensor(kLstmRecurrentToInputWeightsTensor,
318
+ input_gate_data_.recurrent_weight, recurrent_weight_size_);
319
+ SetTensor(kLstmInputGateBiasTensor, input_gate_data_.fused_bias,
320
+ bias_size_);
321
+ // Cell Gate Tensors
322
+ SetTensor(kLstmInputToCellWeightsTensor, cell_gate_data_.activation_weight,
323
+ activation_weight_size_);
324
+ SetTensor(kLstmRecurrentToCellWeightsTensor,
325
+ cell_gate_data_.recurrent_weight, recurrent_weight_size_);
326
+ SetTensor(kLstmCellGateBiasTensor, cell_gate_data_.fused_bias, bias_size_);
327
+ // Output Gate Tensors
328
+ SetTensor(kLstmInputToOutputWeightsTensor,
329
+ output_gate_data_.activation_weight, activation_weight_size_);
330
+ SetTensor(kLstmRecurrentToOutputWeightsTensor,
331
+ output_gate_data_.recurrent_weight, recurrent_weight_size_);
332
+ SetTensor(kLstmOutputGateBiasTensor, output_gate_data_.fused_bias,
333
+ bias_size_);
334
+ // State Tensors
335
+ SetTensor(kLstmOutputStateTensor, hidden_state_, state_size_,
336
+ /*is_variable=*/true);
337
+ SetTensor(kLstmCellStateTensor, cell_state_, state_size_,
338
+ /*is_variable=*/true);
339
+ // // Output Tensor
340
+ SetTensor(24, output_, output_size_, /*is_variable=*/true);
341
+ }
342
+
343
+ template <typename T>
344
+ void SetTensor(const int index, const T* data, int* dims,
345
+ const bool is_variable = false) {
346
+ // Lite tensors for kernel level testing
347
+ tensors_[index].data.data = const_cast<T*>(data);
348
+ tensors_[index].dims = IntArrayFromInts(dims);
349
+ tensors_[index].type = typeToTfLiteType<T>();
350
+ tensors_[index].is_variable = is_variable;
351
+ // Eval tensors for internal computation testing
352
+ eval_tensors_[index].data.data = const_cast<T*>(data);
353
+ eval_tensors_[index].dims = IntArrayFromInts(dims);
354
+ eval_tensors_[index].type = typeToTfLiteType<T>();
355
+ // update the index
356
+ if (index < 24) {
357
+ input_tensor_indices_[index + 1] = index;
358
+ }
359
+ }
360
+
361
+ void SetTensorQuantizationParam(
362
+ const int index, const TensorQuantizationParameters& quant_param) {
363
+ tensors_[index].params.scale = quant_param.scale;
364
+ tensors_[index].params.zero_point = quant_param.zero_point;
365
+ }
366
+
367
+ const TfLiteUnidirectionalSequenceLSTMParams builtin_data_;
368
+ GateData<WeightType, BiasType, input_dimension, state_dimension>
369
+ forget_gate_data_;
370
+ GateData<WeightType, BiasType, input_dimension, state_dimension>
371
+ input_gate_data_;
372
+ GateData<WeightType, BiasType, input_dimension, state_dimension>
373
+ cell_gate_data_;
374
+ GateData<WeightType, BiasType, input_dimension, state_dimension>
375
+ output_gate_data_;
376
+
377
+ // Keep to ease the testing process (although all quantization information can
378
+ // be obtained from individual tensors, they are well organized here and light
379
+ // weighted)
380
+ NodeQuantizationParameters quantization_settings_;
381
+
382
+ // Not const since IntArrayFromInts takes int *; the first element of the
383
+ // array must be the size of the array
384
+ int input_size_[4] = {3, batch_size, time_steps, input_dimension};
385
+ int output_size_[4] = {3, batch_size, time_steps, state_dimension};
386
+ // weight tensor has C-style "row-major" memory ordering
387
+ int activation_weight_size_[3] = {2, state_dimension, input_dimension};
388
+ int recurrent_weight_size_[3] = {2, state_dimension, state_dimension};
389
+ int bias_size_[2] = {1, state_dimension};
390
+ int state_size_[3] = {2, batch_size, state_dimension};
391
+
392
+ // see lstm_shared.h for tensor names, the last tensor is the output tensor
393
+ TfLiteTensor tensors_[24 + 1];
394
+ // Use for internel kernel testing
395
+ TfLiteEvalTensor eval_tensors_[24 + 1];
396
+ // indices for the tensors inside the node (required by kernel runner)
397
+ int input_tensor_indices_[1 + 24] = {};
398
+ // single output (last in the tensors array)
399
+ int output_tensor_indices_[2] = {1, 24};
400
+
401
+ // tennsor data
402
+ // states are initialized to zero
403
+ ActivationType hidden_state_[batch_size * state_dimension] = {};
404
+ CellType cell_state_[batch_size * state_dimension] = {};
405
+ // input is defined in the ModelContent (const across all derived models)
406
+ ActivationType input_[batch_size * input_dimension * time_steps] = {};
407
+ ActivationType output_[batch_size * state_dimension * time_steps] = {};
408
+ };
409
+
410
+ // Converts floating point gate parameters to the corresponding quantized
411
+ // version
412
+ template <typename WeightType, typename BiasType, int input_dimension,
413
+ int state_dimension>
414
+ GateData<WeightType, BiasType, input_dimension, state_dimension>
415
+ CreateQuantizedGateData(
416
+ const GateData<float, float, input_dimension, state_dimension>&
417
+ gate_parameters,
418
+ const TensorQuantizationParameters& input_quantization_params,
419
+ const TensorQuantizationParameters& output_quantization_params,
420
+ const GateQuantizationParameters& gate_quantization_params,
421
+ const bool fold_zero_point) {
422
+ GateData<WeightType, BiasType, input_dimension, state_dimension>
423
+ quantized_gate_params;
424
+ tflite_micro::SymmetricQuantize(gate_parameters.activation_weight,
425
+ quantized_gate_params.activation_weight,
426
+ state_dimension * input_dimension,
427
+ gate_quantization_params.activation_weight.scale);
428
+ tflite_micro::SymmetricQuantize(gate_parameters.recurrent_weight,
429
+ quantized_gate_params.recurrent_weight,
430
+ state_dimension * state_dimension,
431
+ gate_quantization_params.recurrent_weight.scale);
432
+ tflite_micro::SymmetricQuantize(gate_parameters.fused_bias,
433
+ quantized_gate_params.fused_bias, state_dimension,
434
+ gate_quantization_params.bias.scale);
435
+ // Note: steps below are not required for the generalized LSTM evaluation
436
+ // (e.g., 16bits activation)
437
+ if (fold_zero_point) {
438
+ // Copy the bias values to prepare zero_point folded
439
+ // bias precomputation. bias has same scale as
440
+ // input_scale*input_weight_scale)
441
+ std::memcpy(quantized_gate_params.activation_zp_folded_bias,
442
+ quantized_gate_params.fused_bias, 2 * sizeof(int32_t));
443
+ // Pre-calculate bias - zero_point * weight (a constant).
444
+ tflite_micro::tensor_utils::MatrixScalarMultiplyAccumulate(
445
+ quantized_gate_params.activation_weight,
446
+ -1 * input_quantization_params.zero_point, 2, 2,
447
+ quantized_gate_params.activation_zp_folded_bias);
448
+
449
+ // Initialize the folded bias to zeros for accumulation
450
+ for (size_t i = 0; i < 2; i++) {
451
+ quantized_gate_params.recurrent_zp_folded_bias[i] = 0;
452
+ }
453
+ // Calculate : -zero_point * weight since it is a constant
454
+ tflite_micro::tensor_utils::MatrixScalarMultiplyAccumulate(
455
+ quantized_gate_params.recurrent_weight,
456
+ -1 * output_quantization_params.zero_point, 2, 2,
457
+ quantized_gate_params.recurrent_zp_folded_bias);
458
+ }
459
+ return quantized_gate_params;
460
+ }
461
+
462
+ // Create integer LSTM node content from the float node contents and
463
+ // quantization settings
464
+ // Note: fold_zero_point folds the zero point into the bias (precomputation),
465
+ // which is not required for the generalized integer inference (16 bits act
466
+ // LSTM).
467
+ template <typename ActivationType, typename WeightType, typename BiasType,
468
+ typename CellType, int batch_size, int time_steps,
469
+ int input_dimension, int state_dimension>
470
+ LstmNodeContent<ActivationType, WeightType, BiasType, CellType, batch_size,
471
+ time_steps, input_dimension, state_dimension>
472
+ CreateIntegerNodeContents(
473
+ const NodeQuantizationParameters& quantization_settings,
474
+ const bool fold_zero_point,
475
+ LstmNodeContent<float, float, float, float, batch_size, time_steps,
476
+ input_dimension, state_dimension>& float_node_contents) {
477
+ const auto quantized_forget_gate_data =
478
+ CreateQuantizedGateData<WeightType, BiasType, input_dimension,
479
+ state_dimension>(
480
+ float_node_contents.ForgetGateData(), quantization_settings.input,
481
+ quantization_settings.output, quantization_settings.forget_gate,
482
+ fold_zero_point);
483
+ const auto quantized_input_gate_data =
484
+ CreateQuantizedGateData<WeightType, BiasType, input_dimension,
485
+ state_dimension>(
486
+ float_node_contents.InputGateData(), quantization_settings.input,
487
+ quantization_settings.output, quantization_settings.input_gate,
488
+ fold_zero_point);
489
+ const auto quantized_cell_gate_data =
490
+ CreateQuantizedGateData<WeightType, BiasType, input_dimension,
491
+ state_dimension>(
492
+ float_node_contents.CellGateData(), quantization_settings.input,
493
+ quantization_settings.output, quantization_settings.cell_gate,
494
+ fold_zero_point);
495
+ const auto quantized_output_gate_params =
496
+ CreateQuantizedGateData<WeightType, BiasType, input_dimension,
497
+ state_dimension>(
498
+ float_node_contents.OutputGateData(), quantization_settings.input,
499
+ quantization_settings.output, quantization_settings.output_gate,
500
+ fold_zero_point);
501
+ LstmNodeContent<ActivationType, WeightType, BiasType, CellType, batch_size,
502
+ time_steps, input_dimension, state_dimension>
503
+ quantized_node_content(
504
+ float_node_contents.BuiltinData(), quantized_forget_gate_data,
505
+ quantized_input_gate_data, quantized_cell_gate_data,
506
+ quantized_output_gate_params);
507
+
508
+ // Quantize the floating point input
509
+ ActivationType quantized_input[batch_size * input_dimension * time_steps] =
510
+ {};
511
+ Quantize(float_node_contents.GetInputData(), quantized_input,
512
+ batch_size * input_dimension * time_steps,
513
+ quantization_settings.input.scale,
514
+ quantization_settings.input.zero_point);
515
+ quantized_node_content.SetInputData(quantized_input);
516
+ // Quantize the floating point hidden state
517
+ ActivationType quantized_hidden_state[batch_size * state_dimension] = {};
518
+ Quantize(float_node_contents.GetHiddenStateData(), quantized_hidden_state,
519
+ batch_size * state_dimension,
520
+ quantization_settings.hidden_state.scale,
521
+ quantization_settings.hidden_state.zero_point);
522
+ quantized_node_content.SetHiddenStateData(quantized_hidden_state);
523
+ // Quantize the floating point cell state
524
+ CellType quantized_cell_state[batch_size * state_dimension] = {};
525
+ Quantize(float_node_contents.GetCellStateData(), quantized_cell_state,
526
+ batch_size * state_dimension, quantization_settings.cell_state.scale,
527
+ quantization_settings.cell_state.zero_point);
528
+ quantized_node_content.SetCellStateData(quantized_cell_state);
529
+
530
+ // Add scale and zero point to tensors
531
+ quantized_node_content.AddQuantizationParameters(quantization_settings);
532
+ return quantized_node_content;
533
+ }
534
+
535
+ // Get the gate output data (one time step) for a simple 2X2 model
536
+ // batch_size = 2; time_steps = 1; input_dimension = 2; state_dimension = 2
537
+ // input_size = batch_size*time_steps*input_dimension = 4
538
+ // gate_output_size = batch_size*state_dimension = 4
539
+ GateOutputCheckData<4, 4> Get2X2GateOutputCheckData();
540
+
541
+ // Get the kernel output data for a simple 2X2 model
542
+ // batch_size = 2; time_steps = 3; input_dimension = 2; state_dimension = 2
543
+ // input_size = batch_size*time_steps*input_dimension = 12
544
+ // gate_output_size = batch_size*state_dimension = 4
545
+ // output_size = time_steps*gate_output_size = 12
546
+ LstmEvalCheckData<12, 4, 12> Get2X2LstmEvalCheckData();
547
+
548
+ // Create a 2x2 float node content
549
+ // batch_size = 2; time_steps = 3; input_dimension = 2; state_dimension = 2
550
+ LstmNodeContent<float, float, float, float, 2, 3, 2, 2>
551
+ Create2x3x2X2FloatNodeContents(const float* input_data = nullptr,
552
+ const float* hidden_state = nullptr,
553
+ const float* cell_state = nullptr);
554
+
555
+ // Get the quantization settings for the 2X2 model
556
+ NodeQuantizationParameters Get2X2Int8LstmQuantizationSettings();
557
+
558
+ // Create int8 (activation) x int8 (weight) -> int16 (cell) node
559
+ // batch_size = 2; time_steps = 3; input_dimension = 2; state_dimension = 2
560
+ // input is in float format since the source of truth is always the float
561
+ // configuration
562
+ LstmNodeContent<int8_t, int8_t, int32_t, int16_t, 2, 3, 2, 2>
563
+ Create2x3x2X2Int8NodeContents(const float* input_data = nullptr,
564
+ const float* hidden_state = nullptr,
565
+ const float* cell_state = nullptr);
566
+
567
+ // Create int16 (activation) x int8 (weight) -> int16 (cell) node
568
+ // batch_size = 2; time_steps = 3; input_dimension = 2; state_dimension = 2
569
+ // input is in float format since the source of truth is always the float
570
+ // configuration
571
+ LstmNodeContent<int16_t, int8_t, int64_t, int16_t, 2, 3, 2, 2>
572
+ Create2x3x2X2Int16NodeContents(const float* input_data = nullptr,
573
+ const float* hidden_state = nullptr,
574
+ const float* cell_state = nullptr);
575
+
576
+ } // namespace testing
577
+ } // namespace tflite_micro
578
+
579
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_TESTDATA_LSTM_TEST_DATA_H_
@@ -0,0 +1,47 @@
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_H_
17
+ #define TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_H_
18
+
19
+ #include <cstdint>
20
+
21
+ #include "tensorflow/lite/c/builtin_op_data.h"
22
+ #include "tensorflow/lite/c/common.h"
23
+ #include "tensorflow/lite/kernels/internal/types.h"
24
+
25
+ namespace tflite_micro {
26
+
27
+ // This is the most generic TFLMRegistration. The actual supported types
28
+ // may still be target dependent. The only requirement is that every
29
+ // implementation (reference or optimized) must define this function.
30
+ // TODO(b/230666079): resolve conflict with xtensa implementation
31
+ TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
32
+
33
+ #if defined(CMSIS_NN)
34
+ // Returns a TFLMRegistration struct for kernel variant that only supports
35
+ // int8 activations and int8 weights and uses the latency optimized
36
+ // implementations.
37
+ TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT8();
38
+
39
+ #else
40
+ inline TFLMRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM_INT8() {
41
+ return Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
42
+ }
43
+ #endif
44
+
45
+ } // namespace tflite_micro
46
+
47
+ #endif // TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_H_