xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. xmos_ai_tools/__init__.py +7 -0
  2. xmos_ai_tools/io_server/__init__.py +151 -0
  3. xmos_ai_tools/runtime/__init__.py +0 -0
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
  5. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
  6. xmos_ai_tools/runtime/include/flash_server.h +74 -0
  7. xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
  8. xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
  9. xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
  10. xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
  11. xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
  12. xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
  13. xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
  14. xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
  15. xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
  16. xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
  17. xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
  18. xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
  19. xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
  20. xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
  21. xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
  22. xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
  23. xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
  24. xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
  25. xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
  26. xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
  27. xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
  28. xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
  29. xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
  30. xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
  31. xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
  32. xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
  33. xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
  34. xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
  35. xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
  36. xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
  37. xmos_ai_tools/runtime/include/ioserver.h +44 -0
  38. xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
  39. xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
  40. xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
  41. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
  42. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
  43. xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
  44. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
  45. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
  46. xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
  47. xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
  48. xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
  49. xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
  50. xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
  51. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
  52. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
  53. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
  54. xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
  55. xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
  56. xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
  57. xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
  58. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
  59. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
  60. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
  61. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
  62. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
  63. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
  64. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
  65. xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
  66. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  67. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  68. xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
  69. xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
  70. xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
  71. xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
  72. xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
  73. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
  74. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
  75. xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
  76. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
  77. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
  78. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
  79. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
  80. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
  81. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
  82. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
  83. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
  84. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
  85. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
  86. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
  87. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
  88. xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
  89. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
  90. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
  91. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
  92. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
  93. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
  94. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
  95. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
  96. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
  97. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
  98. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
  99. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
  100. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
  101. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
  102. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
  103. xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
  104. xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
  105. xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
  106. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
  107. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
  108. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
  109. xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
  110. xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
  111. xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
  112. xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
  113. xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
  114. xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
  115. xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
  116. xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
  117. xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
  118. xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
  119. xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
  120. xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
  121. xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
  122. xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
  123. xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
  124. xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
  125. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
  126. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
  127. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
  128. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
  129. xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
  130. xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
  131. xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
  132. xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
  133. xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
  134. xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
  135. xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
  136. xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
  137. xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
  138. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
  139. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
  140. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
  141. xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
  142. xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
  143. xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
  144. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
  145. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
  146. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
  147. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
  148. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
  149. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
  150. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
  151. xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
  152. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
  153. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
  154. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
  155. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
  156. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
  157. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
  158. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
  159. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
  160. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
  161. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
  162. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
  163. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
  164. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
  165. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
  166. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
  167. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
  168. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
  169. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
  170. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
  171. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
  172. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
  173. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
  174. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
  175. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
  176. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
  177. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
  178. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
  179. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
  180. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
  181. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
  182. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
  183. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
  184. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
  185. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
  186. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
  187. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
  188. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
  189. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
  190. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
  191. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
  192. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
  193. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
  194. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
  195. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
  196. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
  197. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
  198. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
  199. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
  200. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
  201. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
  202. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
  203. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
  204. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
  205. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
  206. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
  207. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
  208. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
  209. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
  210. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
  211. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
  212. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
  213. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
  214. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
  215. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
  216. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
  217. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
  218. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
  219. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
  220. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
  221. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
  222. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
  223. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
  224. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
  225. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
  226. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
  227. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
  228. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
  229. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
  230. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
  231. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
  232. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
  233. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
  234. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
  235. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
  236. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
  237. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
  238. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
  239. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
  240. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
  241. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
  242. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
  243. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
  244. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
  245. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
  246. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
  247. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
  248. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
  249. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
  250. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
  251. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
  252. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
  253. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
  254. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
  255. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
  256. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
  257. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
  258. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
  259. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
  260. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
  261. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
  262. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
  263. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
  264. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
  265. xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
  266. xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
  267. xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
  268. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
  269. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
  270. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
  271. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
  272. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
  273. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
  274. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
  275. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
  276. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
  277. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
  278. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
  279. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
  280. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
  281. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
  282. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
  283. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
  284. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
  285. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
  286. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
  287. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
  288. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
  289. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
  290. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
  291. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
  292. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
  293. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
  294. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
  295. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
  296. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
  297. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
  298. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
  299. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
  300. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
  301. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
  302. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
  303. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
  304. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
  305. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
  306. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
  307. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
  308. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
  309. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
  310. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
  311. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
  312. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
  313. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
  314. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
  315. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
  316. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
  317. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
  318. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
  319. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
  320. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
  321. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
  322. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
  323. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
  324. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
  325. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
  326. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
  327. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
  328. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
  329. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
  330. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
  331. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
  332. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
  333. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
  334. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
  335. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
  336. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
  337. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
  338. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
  339. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
  340. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
  341. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
  342. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
  343. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
  344. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
  345. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
  346. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
  347. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
  348. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
  349. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
  350. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
  351. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
  352. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
  353. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
  354. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
  355. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
  356. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
  357. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
  358. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
  359. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
  360. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
  361. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
  362. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
  363. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
  364. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
  365. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
  366. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
  367. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
  368. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
  369. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
  370. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
  371. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
  372. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
  373. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
  374. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
  375. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
  376. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
  377. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
  378. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
  379. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
  380. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
  381. xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
  382. xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
  383. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  384. xmos_ai_tools/xformer/__init__.py +60 -0
  385. xmos_ai_tools/xformer/flash.py +190 -0
  386. xmos_ai_tools/xinterpreters/__init__.py +1 -0
  387. xmos_ai_tools/xinterpreters/exceptions.py +38 -0
  388. xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
  389. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
  390. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
  391. xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
  392. xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
  393. xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
  394. xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
  395. xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1096 @@
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_
16
+ #define TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_
17
+
18
+ #include <algorithm>
19
+ #include <cstdint>
20
+ #include <cstring>
21
+ #include <initializer_list>
22
+
23
+ #include "tensorflow/lite/kernels/internal/compatibility.h"
24
+ #include "tensorflow/lite/kernels/internal/runtime_shape.h"
25
+
26
+ namespace tflite_micro {
27
+
28
+ enum class FusedActivationFunctionType : uint8_t {
29
+ kNone,
30
+ kRelu6,
31
+ kRelu1,
32
+ kRelu
33
+ };
34
+ enum class PaddingType : uint8_t { kNone, kSame, kValid };
35
+
36
+ struct PaddingValues {
37
+ int16_t width;
38
+ int16_t height;
39
+ // offset is used for calculating "remaining" padding, for example, `width`
40
+ // is 1 and `width_offset` is 1, so padding_left is 1 while padding_right is
41
+ // 1 + 1 = 2.
42
+ int16_t width_offset;
43
+ // Same as width_offset except it's over the height dimension.
44
+ int16_t height_offset;
45
+ };
46
+
47
+ struct Padding3DValues {
48
+ int16_t width;
49
+ int16_t height;
50
+ int16_t depth;
51
+ // offset is used for calculating "remaining" padding, for example, `width`
52
+ // is 1 and `width_offset` is 1, so padding_left is 1 while padding_right is
53
+ // 1 + 1 = 2.
54
+ int16_t width_offset;
55
+ // Same as width_offset except it's over the height dimension.
56
+ int16_t height_offset;
57
+ // Same as width_offset except it's over the depth dimension.
58
+ int16_t depth_offset;
59
+ };
60
+
61
+ // This enumeration allows for non-default formats for the weights array
62
+ // of a fully-connected operator, allowing the use of special optimized
63
+ // runtime paths.
64
+ enum class FullyConnectedWeightsFormat : uint8_t {
65
+ // Default format (flat 2D layout, the inner contiguous dimension
66
+ // is input_depth, the outer non-contiguous dimension is output_depth)
67
+ kDefault,
68
+ // Summary: optimized layout for fast CPU runtime implementation,
69
+ // aimed specifically at ARM CPUs at the moment, and specialized for
70
+ // 8-bit quantized layers.
71
+ //
72
+ // The use case we're concerned with here is: 8-bit quantization,
73
+ // large weights matrix that doesn't fit in cache (e.g. 4096x2048 in
74
+ // a key application that drove this), very small batch size (e.g. 1 -- 4).
75
+ //
76
+ // Even with 8-bit quantization of weights, the performance of memory
77
+ // accesses to the weights can become the dominant issue when
78
+ // the batch size is small, so each weight value is used in only a few
79
+ // arithmetic ops, i.e. the fully-connected node has a low arithmetic
80
+ // intensity. The specific issues that arise are of three kinds:
81
+ // (1) One may, ideally, max out DRAM bandwidth, i.e. be truly memory
82
+ // bound. That's the "good" issue to run into.
83
+ // (2) One may run into sub-optimal pre-fetching: the data hasn't been
84
+ // prefetched into the cache by the time we need it.
85
+ // (3) One may run into cache aliasing: multiple values that are
86
+ // pre-fetched, alias each other in the L1 cache (which typically
87
+ // has only 4-way set associativity in ARM CPUs) and thus evict
88
+ // each other before we get to using them.
89
+ //
90
+ // The point of this shuffling is to avoid issues (2) and (3) so that
91
+ // we get as fast as possible given only the hard constraint (1).
92
+ // This is achieved by turning the difficulty into a solution: the
93
+ // difficulty, that each value loaded from memory is used only in
94
+ // one kernel iteration, making this operation memory-intensive, hints at
95
+ // the solution, of shuffling the weights so that they are stored in the
96
+ // exact order as the kernel needs to load them, so that the memory
97
+ // accesses made by the kernel are trivial. This solves (2) because the
98
+ // trivial memory access pattern allows the CPU's automatic prefetching
99
+ // to perform very well (no need even for preload instructions), and this
100
+ // solves (3) because the values being loaded concurrently are now
101
+ // contiguous in the address space, thus don't alias each other in the cache.
102
+ //
103
+ // On ARM, we typically want our kernel to process a 4x16 block of weights
104
+ // at a time, because:
105
+ // - 16 is the number of bytes in a NEON register.
106
+ // - 4 is how many rows we need to handle concurrently in the kernel in
107
+ // order to have sufficient mutual independence of instructions to
108
+ // maximize arithmetic throughput.
109
+ //
110
+ // Finally, the 'Int8' part in the name refers to the fact that this
111
+ // weights format has each weights value encoded as a signed int8_t value,
112
+ // even if the data type of the weights buffer is uint8_t. This is intended
113
+ // to save runtime kernels the effort to have to XOR the top bit of these
114
+ // bytes before using them in signed arithmetic, see this file for more
115
+ // explanations on the 'signed int8_t trick' in matrix multiplication kernels:
116
+ //
117
+ // tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc
118
+ //
119
+ kShuffled4x16Int8,
120
+ };
121
+
122
+ // Quantization parameters, determining the mapping of quantized values
123
+ // to real values (i.e. determining how quantized values are mathematically
124
+ // interpreted).
125
+ //
126
+ // The correspondence is as follows:
127
+ //
128
+ // real_value = scale * (quantized_value - zero_point);
129
+ //
130
+ // In other words, zero_point designates which quantized value corresponds to
131
+ // the real 0 value, and scale designates the difference between the real values
132
+ // corresponding to consecutive quantized values differing by 1.
133
+ struct QuantizationParams {
134
+ int32_t zero_point = 0;
135
+ double scale = 0.0;
136
+ };
137
+
138
+ inline bool operator==(const QuantizationParams& qp1,
139
+ const QuantizationParams& qp2) {
140
+ return qp1.zero_point == qp2.zero_point && qp1.scale == qp2.scale;
141
+ }
142
+
143
+ // Quantization parameters for each channel, determining the mapping of
144
+ // quantized values to real values. See QuantizationParams for a single set of
145
+ // parameters per tensor. This has one parameters set per each channel.
146
+ //
147
+ // The correspondence is as follows:
148
+ //
149
+ // real_value = scale[channel] * (quantized_value - zero_point[channel]);
150
+ //
151
+ struct PerChannelQuantizationParams {
152
+ // The following members typically point to the corresponding members of a
153
+ // TfLiteAffineQuantization struct.
154
+ const float* scale;
155
+ const int32_t* zero_point;
156
+ int32_t quantized_dimension;
157
+ };
158
+
159
+ // Gets next index to iterate through a multidimensional array.
160
+ template <typename IndexType = int>
161
+ inline bool NextIndex(const int num_dims, const int* dims, IndexType* current) {
162
+ if (num_dims == 0) {
163
+ return false;
164
+ }
165
+ TFLITE_DCHECK(dims != nullptr);
166
+ TFLITE_DCHECK(current != nullptr);
167
+ int carry = 1;
168
+ for (int idx = num_dims - 1; idx >= 0; --idx) {
169
+ IndexType current_val = current[idx] + carry;
170
+ TFLITE_DCHECK_GE(dims[idx], current_val);
171
+ if (dims[idx] == current_val) {
172
+ current[idx] = 0;
173
+ } else {
174
+ current[idx] = current_val;
175
+ carry = 0;
176
+ break;
177
+ }
178
+ }
179
+ return (carry == 0);
180
+ }
181
+
182
+ // Gets offset of index if reducing on axis. When reducing, the flattened offset
183
+ // will not change, if the input index changes on the given axis. For example,
184
+ // if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
185
+ // then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
186
+ // offset.
187
+ // TODO(kanlig): uses Dims to represent dimensions.
188
+ inline size_t ReducedOutputOffset(const int num_dims, const int* dims,
189
+ const int* index, const int num_axis,
190
+ const int* axis) {
191
+ if (num_dims == 0) {
192
+ return 0;
193
+ }
194
+ TFLITE_DCHECK(dims != nullptr);
195
+ TFLITE_DCHECK(index != nullptr);
196
+ size_t offset = 0;
197
+ for (int idx = 0; idx < num_dims; ++idx) {
198
+ // if we need to skip this axis
199
+ bool is_axis = false;
200
+ if (axis != nullptr) {
201
+ for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) {
202
+ if (idx == axis[axis_idx]) {
203
+ is_axis = true;
204
+ break;
205
+ }
206
+ }
207
+ }
208
+ if (!is_axis) {
209
+ offset = offset * static_cast<size_t>(dims[idx]) +
210
+ static_cast<size_t>(index[idx]);
211
+ }
212
+ }
213
+ return offset;
214
+ }
215
+
216
+ // Since tensors with '0' in their shape are valid in TF, these offset functions
217
+ // allow that as long as the corresponding index is also 0. It is upto the
218
+ // calling ops to ensure that they perform verification checks on tensor shapes
219
+ // if they don't support a particular behavior.
220
+
221
+ inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) {
222
+ TFLITE_DCHECK((i0 == 0 && dims.sizes[0] == 0) ||
223
+ (i0 >= 0 && i0 < dims.sizes[0]));
224
+ TFLITE_DCHECK((i1 == 0 && dims.sizes[1] == 0) ||
225
+ (i1 >= 0 && i1 < dims.sizes[1]));
226
+ TFLITE_DCHECK((i2 == 0 && dims.sizes[2] == 0) ||
227
+ (i2 >= 0 && i2 < dims.sizes[2]));
228
+ TFLITE_DCHECK((i3 == 0 && dims.sizes[3] == 0) ||
229
+ (i3 >= 0 && i3 < dims.sizes[3]));
230
+ return i0 * dims.strides[0] + i1 * dims.strides[1] + i2 * dims.strides[2] +
231
+ i3 * dims.strides[3];
232
+ }
233
+
234
+ inline int Offset(const Dims<4>& dims, int* index) {
235
+ return Offset(dims, index[0], index[1], index[2], index[3]);
236
+ }
237
+
238
+ // Get array size, DCHECKing that the dim index is in range.
239
+ //
240
+ // Note that this will be phased out with Dims<4>, since RuntimeShape::Dims()
241
+ // already performs this check.
242
+ template <int N>
243
+ int ArraySize(const Dims<N>& array, int index) {
244
+ TFLITE_DCHECK(index >= 0 && index < N);
245
+ return array.sizes[index];
246
+ }
247
+
248
+ // Get common array size, DCHECKing that they all agree.
249
+ template <typename ArrayType1, typename ArrayType2>
250
+ int MatchingArraySize(const ArrayType1& array1, int index1,
251
+ const ArrayType2& array2, int index2) {
252
+ TFLITE_DCHECK_EQ(ArraySize(array1, index1), ArraySize(array2, index2));
253
+ return ArraySize(array1, index1);
254
+ }
255
+
256
+ template <typename ArrayType1, typename ArrayType2, typename... Args>
257
+ int MatchingArraySize(const ArrayType1& array1, int index1,
258
+ const ArrayType2& array2, int index2, Args... args) {
259
+ TFLITE_DCHECK_EQ(ArraySize(array1, index1), ArraySize(array2, index2));
260
+ return MatchingArraySize(array1, index1, args...);
261
+ }
262
+
263
+ // Get common shape dim, DCHECKing that they all agree.
264
+ inline int MatchingDim(const RuntimeShape& shape1, int index1,
265
+ const RuntimeShape& shape2, int index2) {
266
+ TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2));
267
+ return std::min(shape1.Dims(index1), shape2.Dims(index2));
268
+ }
269
+
270
+ template <typename... Args>
271
+ int MatchingDim(const RuntimeShape& shape1, int index1,
272
+ const RuntimeShape& shape2, int index2, Args... args) {
273
+ TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2));
274
+ return MatchingDim(shape1, index1, args...);
275
+ }
276
+
277
+ // Will be phased out with Dims<4>, replaced by RuntimeShape::FlatSize().
278
+ template <int N>
279
+ inline int FlatSize(const Dims<N>& dims) {
280
+ int flat_size = 1;
281
+ for (int i = 0; i < N; ++i) {
282
+ flat_size *= dims.sizes[i];
283
+ }
284
+ return flat_size;
285
+ }
286
+
287
+ TFLITE_DEPRECATED("Prefer FlatSize.")
288
+ inline int RequiredBufferSizeForDims(const Dims<4>& dims) {
289
+ return FlatSize(dims);
290
+ }
291
+
292
+ inline int MatchingElementsSize(const RuntimeShape& shape,
293
+ const RuntimeShape& check_shape_0) {
294
+ const int size_1 = shape.FlatSize();
295
+ const int size_2 = check_shape_0.FlatSize();
296
+ TFLITE_CHECK_EQ(size_1, size_2);
297
+ return size_1;
298
+ }
299
+
300
+ inline int MatchingElementsSize(const RuntimeShape& shape,
301
+ const RuntimeShape& check_shape_0,
302
+ const RuntimeShape& check_shape_1) {
303
+ const int size_1 = shape.FlatSize();
304
+ const int size_2 = check_shape_0.FlatSize();
305
+ const int size_3 = check_shape_1.FlatSize();
306
+ TFLITE_CHECK_EQ(size_1, size_2);
307
+ TFLITE_CHECK_EQ(size_2, size_3);
308
+ return size_1;
309
+ }
310
+
311
+ // Flat size calculation, checking that dimensions match with one or more other
312
+ // arrays.
313
+ inline int MatchingFlatSize(const RuntimeShape& shape,
314
+ const RuntimeShape& check_shape_0) {
315
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount());
316
+ const int dims_count = shape.DimensionsCount();
317
+ for (int i = 0; i < dims_count; ++i) {
318
+ TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
319
+ }
320
+ return shape.FlatSize();
321
+ }
322
+
323
+ inline int MatchingFlatSize(const RuntimeShape& shape,
324
+ const RuntimeShape& check_shape_0,
325
+ const RuntimeShape& check_shape_1) {
326
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount());
327
+ const int dims_count = shape.DimensionsCount();
328
+ for (int i = 0; i < dims_count; ++i) {
329
+ TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
330
+ }
331
+ return MatchingFlatSize(shape, check_shape_1);
332
+ }
333
+
334
+ inline int MatchingFlatSize(const RuntimeShape& shape,
335
+ const RuntimeShape& check_shape_0,
336
+ const RuntimeShape& check_shape_1,
337
+ const RuntimeShape& check_shape_2) {
338
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount());
339
+ const int dims_count = shape.DimensionsCount();
340
+ for (int i = 0; i < dims_count; ++i) {
341
+ TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
342
+ }
343
+ return MatchingFlatSize(shape, check_shape_1, check_shape_2);
344
+ }
345
+
346
+ inline int MatchingFlatSize(const RuntimeShape& shape,
347
+ const RuntimeShape& check_shape_0,
348
+ const RuntimeShape& check_shape_1,
349
+ const RuntimeShape& check_shape_2,
350
+ const RuntimeShape& check_shape_3) {
351
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount());
352
+ const int dims_count = shape.DimensionsCount();
353
+ for (int i = 0; i < dims_count; ++i) {
354
+ TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
355
+ }
356
+ return MatchingFlatSize(shape, check_shape_1, check_shape_2, check_shape_3);
357
+ }
358
+
359
+ // Flat size calculation, checking that dimensions match with one or more other
360
+ // arrays.
361
+ template <int N>
362
+ inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0) {
363
+ for (int i = 0; i < N; ++i) {
364
+ TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i));
365
+ }
366
+ return FlatSize(dims);
367
+ }
368
+
369
+ template <int N>
370
+ inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0,
371
+ const Dims<N>& check_dims_1) {
372
+ for (int i = 0; i < N; ++i) {
373
+ TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i));
374
+ }
375
+ return MatchingFlatSize(dims, check_dims_1);
376
+ }
377
+
378
+ template <int N>
379
+ inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0,
380
+ const Dims<N>& check_dims_1,
381
+ const Dims<N>& check_dims_2) {
382
+ for (int i = 0; i < N; ++i) {
383
+ TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i));
384
+ }
385
+ return MatchingFlatSize(dims, check_dims_1, check_dims_2);
386
+ }
387
+
388
+ template <int N>
389
+ inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0,
390
+ const Dims<N>& check_dims_1,
391
+ const Dims<N>& check_dims_2,
392
+ const Dims<N>& check_dims_3) {
393
+ for (int i = 0; i < N; ++i) {
394
+ TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i));
395
+ }
396
+ return MatchingFlatSize(dims, check_dims_1, check_dims_2, check_dims_3);
397
+ }
398
+
399
+ // Flat size calculation, checking if their extended shapes match.
400
+ inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
401
+ const RuntimeShape& check_shape_0) {
402
+ const int shape_dims = shape.DimensionsCount();
403
+ const int check_shape_0_dims = check_shape_0.DimensionsCount();
404
+ const int min_dims = std::min(shape_dims, check_shape_0_dims);
405
+
406
+ for (int i = 0; i < min_dims; ++i) {
407
+ TFLITE_DCHECK_EQ(shape.Dims(shape_dims - 1 - i),
408
+ check_shape_0.Dims(check_shape_0_dims - 1 - i));
409
+ }
410
+ for (int i = min_dims; i < shape_dims; ++i) {
411
+ TFLITE_DCHECK_EQ(shape.Dims(shape_dims - 1 - i), 1);
412
+ }
413
+ for (int i = min_dims; i < check_shape_0_dims; ++i) {
414
+ TFLITE_DCHECK_EQ(check_shape_0.Dims(check_shape_0_dims - 1 - i), 1);
415
+ }
416
+ return shape.FlatSize();
417
+ }
418
+
419
+ inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
420
+ const RuntimeShape& check_shape_0,
421
+ const RuntimeShape& check_shape_1) {
422
+ const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0);
423
+ TFLITE_DCHECK_EQ(MatchingExtendedShapeFlatSize(shape, check_shape_1),
424
+ flat_size);
425
+ return flat_size;
426
+ }
427
+
428
+ inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
429
+ const RuntimeShape& check_shape_0,
430
+ const RuntimeShape& check_shape_1,
431
+ const RuntimeShape& check_shape_2) {
432
+ const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0);
433
+ TFLITE_DCHECK_EQ(
434
+ MatchingExtendedShapeFlatSize(shape, check_shape_1, check_shape_2),
435
+ flat_size);
436
+ return flat_size;
437
+ }
438
+
439
+ inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
440
+ const RuntimeShape& check_shape_0,
441
+ const RuntimeShape& check_shape_1,
442
+ const RuntimeShape& check_shape_2,
443
+ const RuntimeShape& check_shape_3) {
444
+ const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0);
445
+ TFLITE_DCHECK_EQ(MatchingExtendedShapeFlatSize(shape, check_shape_1,
446
+ check_shape_2, check_shape_3),
447
+ flat_size);
448
+ return flat_size;
449
+ }
450
+
451
+ // Data is required to be contiguous, and so many operators can use either the
452
+ // full array flat size or the flat size with one dimension skipped (commonly
453
+ // the depth).
454
+ template <int N>
455
+ inline int FlatSizeSkipDim(const Dims<N>& dims, int skip_dim) {
456
+ TFLITE_DCHECK(skip_dim >= 0 && skip_dim < N);
457
+ int flat_size = 1;
458
+ for (int i = 0; i < N; ++i) {
459
+ flat_size *= (i == skip_dim) ? 1 : dims.sizes[i];
460
+ }
461
+ return flat_size;
462
+ }
463
+
464
+ // A combination of MatchingFlatSize() and FlatSizeSkipDim().
465
+ template <int N>
466
+ inline int MatchingFlatSizeSkipDim(const Dims<N>& dims, int skip_dim,
467
+ const Dims<N>& check_dims_0) {
468
+ for (int i = 0; i < N; ++i) {
469
+ if (i != skip_dim) {
470
+ TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i));
471
+ }
472
+ }
473
+ return FlatSizeSkipDim(dims, skip_dim);
474
+ }
475
+
476
+ template <int N>
477
+ inline int MatchingFlatSizeSkipDim(const Dims<N>& dims, int skip_dim,
478
+ const Dims<N>& check_dims_0,
479
+ const Dims<N>& check_dims_1) {
480
+ for (int i = 0; i < N; ++i) {
481
+ if (i != skip_dim) {
482
+ TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i));
483
+ }
484
+ }
485
+ return MatchingFlatSizeSkipDim(dims, skip_dim, check_dims_1);
486
+ }
487
+
488
+ template <int N>
489
+ inline int MatchingFlatSizeSkipDim(const Dims<N>& dims, int skip_dim,
490
+ const Dims<N>& check_dims_0,
491
+ const Dims<N>& check_dims_1,
492
+ const Dims<N>& check_dims_2) {
493
+ for (int i = 0; i < N; ++i) {
494
+ if (i != skip_dim) {
495
+ TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i));
496
+ }
497
+ }
498
+ return MatchingFlatSizeSkipDim(dims, skip_dim, check_dims_1, check_dims_2);
499
+ }
500
+
501
+ template <int N>
502
+ inline int MatchingFlatSizeSkipDim(const Dims<N>& dims, int skip_dim,
503
+ const Dims<N>& check_dims_0,
504
+ const Dims<N>& check_dims_1,
505
+ const Dims<N>& check_dims_2,
506
+ const Dims<N>& check_dims_3) {
507
+ for (int i = 0; i < N; ++i) {
508
+ if (i != skip_dim) {
509
+ TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i));
510
+ }
511
+ }
512
+ return MatchingFlatSizeSkipDim(dims, skip_dim, check_dims_1, check_dims_2,
513
+ check_dims_3);
514
+ }
515
+
516
+ // Data is required to be contiguous, and so many operators can use either the
517
+ // full array flat size or the flat size with one dimension skipped (commonly
518
+ // the depth).
519
+ inline int FlatSizeSkipDim(const RuntimeShape& shape, int skip_dim) {
520
+ const int dims_count = shape.DimensionsCount();
521
+ TFLITE_DCHECK(skip_dim >= 0 && skip_dim < dims_count);
522
+ const auto* dims_data = shape.DimsData();
523
+ int flat_size = 1;
524
+ for (int i = 0; i < dims_count; ++i) {
525
+ flat_size *= (i == skip_dim) ? 1 : dims_data[i];
526
+ }
527
+ return flat_size;
528
+ }
529
+
530
+ // A combination of MatchingFlatSize() and FlatSizeSkipDim().
531
+ inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim,
532
+ const RuntimeShape& check_shape_0) {
533
+ const int dims_count = shape.DimensionsCount();
534
+ for (int i = 0; i < dims_count; ++i) {
535
+ if (i != skip_dim) {
536
+ TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
537
+ }
538
+ }
539
+ return FlatSizeSkipDim(shape, skip_dim);
540
+ }
541
+
542
+ inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim,
543
+ const RuntimeShape& check_shape_0,
544
+ const RuntimeShape& check_shape_1) {
545
+ const int dims_count = shape.DimensionsCount();
546
+ for (int i = 0; i < dims_count; ++i) {
547
+ if (i != skip_dim) {
548
+ TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
549
+ }
550
+ }
551
+ return MatchingFlatSizeSkipDim(shape, skip_dim, check_shape_1);
552
+ }
553
+
554
+ inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim,
555
+ const RuntimeShape& check_shape_0,
556
+ const RuntimeShape& check_shape_1,
557
+ const RuntimeShape& check_shape_2) {
558
+ const int dims_count = shape.DimensionsCount();
559
+ for (int i = 0; i < dims_count; ++i) {
560
+ if (i != skip_dim) {
561
+ TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
562
+ }
563
+ }
564
+ return MatchingFlatSizeSkipDim(shape, skip_dim, check_shape_1, check_shape_2);
565
+ }
566
+
567
+ inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim,
568
+ const RuntimeShape& check_shape_0,
569
+ const RuntimeShape& check_shape_1,
570
+ const RuntimeShape& check_shape_2,
571
+ const RuntimeShape& check_shape_3) {
572
+ const int dims_count = shape.DimensionsCount();
573
+ for (int i = 0; i < dims_count; ++i) {
574
+ if (i != skip_dim) {
575
+ TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
576
+ }
577
+ }
578
+ return MatchingFlatSizeSkipDim(shape, skip_dim, check_shape_1, check_shape_2,
579
+ check_shape_3);
580
+ }
581
+
582
+ template <int N>
583
+ bool IsPackedWithoutStrides(const Dims<N>& dims) {
584
+ int expected_stride = 1;
585
+ for (int d = 0; d < N; d++) {
586
+ if (dims.strides[d] != expected_stride) return false;
587
+ expected_stride *= dims.sizes[d];
588
+ }
589
+ return true;
590
+ }
591
+
592
+ template <int N>
593
+ void ComputeStrides(Dims<N>* dims) {
594
+ dims->strides[0] = 1;
595
+ for (int d = 1; d < N; d++) {
596
+ dims->strides[d] = dims->strides[d - 1] * dims->sizes[d - 1];
597
+ }
598
+ }
599
+
600
+ enum class BroadcastableOpCategory : uint8_t {
601
+ kNone,
602
+ kNonBroadcast, // Matching input shapes.
603
+ kFirstInputBroadcastsFast, // Fivefold nested loops.
604
+ kSecondInputBroadcastsFast, // Fivefold nested loops.
605
+ kGenericBroadcast, // Fall-back.
606
+ };
607
+
608
+ struct MinMax {
609
+ float min;
610
+ float max;
611
+ };
612
+ static_assert(sizeof(MinMax) == 8, "");
613
+
614
+ struct ActivationParams {
615
+ FusedActivationFunctionType activation_type;
616
+ // uint8_t, etc, activation params.
617
+ int32_t quantized_activation_min;
618
+ int32_t quantized_activation_max;
619
+ };
620
+
621
+ struct ReluParams : public ActivationParams {
622
+ int32_t input_offset;
623
+ int32_t output_offset;
624
+ int32_t output_multiplier;
625
+ int output_shift;
626
+ };
627
+
628
+ // Styles of resizing op usages. For example, kImageStyle can be used with a Pad
629
+ // op for pattern-specific optimization.
630
+ enum class ResizingCategory : uint8_t {
631
+ kNone,
632
+ kImageStyle, // 4D, operating on inner dimensions, say {0, a, b, 0}.
633
+ kGenericResize,
634
+ };
635
+
636
+ // For Add, Sub, Mul ops.
637
+ struct ArithmeticParams {
638
+ // Shape dependent / common to data / op types.
639
+ BroadcastableOpCategory broadcast_category;
640
+ // uint8_t inference params.
641
+ int32_t input1_offset;
642
+ int32_t input2_offset;
643
+ int32_t output_offset;
644
+ int32_t output_multiplier;
645
+ int output_shift;
646
+ // Add / Sub, not Mul, uint8_t inference params.
647
+ int left_shift;
648
+ int32_t input1_multiplier;
649
+ int input1_shift;
650
+ int32_t input2_multiplier;
651
+ int input2_shift;
652
+
653
+ // TODO(b/158622529): Union the following activation params.
654
+ // uint8_t, etc, activation params.
655
+ int32_t quantized_activation_min;
656
+ int32_t quantized_activation_max;
657
+ // float activation params.
658
+ float float_activation_min;
659
+ float float_activation_max;
660
+ // int64_t activation params.
661
+ int64_t int64_activation_min;
662
+ int64_t int64_activation_max;
663
+ // int16_t activation params.
664
+ int16_t int16_activation_min;
665
+ int16_t int16_activation_max;
666
+
667
+ // Processed output dimensions.
668
+ // Let input "a" be the one that broadcasts in the faster-changing dimension.
669
+ // Then, after coalescing, for shapes {a0, a1, a2, a3, a4} and
670
+ // {b0, b1, b2, b3, b4},
671
+ // broadcast_shape[4] = b0 = a0.
672
+ // broadcast_shape[3] = b1; a1 = 1.
673
+ // broadcast_shape[2] = b2 = a2.
674
+ // broadcast_shape[1] = a3; b3 = 1.
675
+ // broadcast_shape[0] = b4 = a4.
676
+ int broadcast_shape[5];
677
+ };
678
+
679
+ struct ConcatenationParams {
680
+ int8_t axis;
681
+ const int32_t* input_zeropoint;
682
+ const float* input_scale;
683
+ uint16_t inputs_count;
684
+ int32_t output_zeropoint;
685
+ float output_scale;
686
+ };
687
+
688
+ struct ComparisonParams {
689
+ // uint8_t inference params.
690
+ int left_shift;
691
+ int32_t input1_offset;
692
+ int32_t input1_multiplier;
693
+ int input1_shift;
694
+ int32_t input2_offset;
695
+ int32_t input2_multiplier;
696
+ int input2_shift;
697
+ // Shape dependent / common to inference types.
698
+ bool is_broadcast;
699
+ };
700
+
701
+ struct ConvParams {
702
+ PaddingType padding_type;
703
+ PaddingValues padding_values;
704
+ // TODO(starka): This was just "stride", so check that width+height is OK.
705
+ int16_t stride_width;
706
+ int16_t stride_height;
707
+ int16_t dilation_width_factor;
708
+ int16_t dilation_height_factor;
709
+ // uint8_t inference params.
710
+ // TODO(b/65838351): Use smaller types if appropriate.
711
+ int32_t input_offset;
712
+ int32_t weights_offset;
713
+ int32_t output_offset;
714
+ int32_t output_multiplier;
715
+ int output_shift;
716
+ // uint8_t, etc, activation params.
717
+ int32_t quantized_activation_min;
718
+ int32_t quantized_activation_max;
719
+ // float activation params.
720
+ float float_activation_min;
721
+ float float_activation_max;
722
+ };
723
+
724
+ struct Conv3DParams {
725
+ Padding3DValues padding_values;
726
+ int stride_width;
727
+ int stride_height;
728
+ int stride_depth;
729
+ int dilation_width;
730
+ int dilation_height;
731
+ int dilation_depth;
732
+ // float activation params.
733
+ float float_activation_min;
734
+ float float_activation_max;
735
+ };
736
+
737
+ typedef Conv3DParams Conv3DTransposeParams;
738
+
739
+ struct DepthToSpaceParams {
740
+ int32_t block_size;
741
+ };
742
+
743
+ struct DepthwiseParams {
744
+ PaddingType padding_type;
745
+ PaddingValues padding_values;
746
+ int16_t stride_width;
747
+ int16_t stride_height;
748
+ int16_t dilation_width_factor;
749
+ int16_t dilation_height_factor;
750
+ int16_t depth_multiplier;
751
+ // uint8_t inference params.
752
+ // TODO(b/65838351): Use smaller types if appropriate.
753
+ int32_t input_offset;
754
+ int32_t weights_offset;
755
+ int32_t output_offset;
756
+ int32_t output_multiplier;
757
+ int output_shift;
758
+ // uint8_t, etc, activation params.
759
+ int32_t quantized_activation_min;
760
+ int32_t quantized_activation_max;
761
+ // float activation params.
762
+ float float_activation_min;
763
+ float float_activation_max;
764
+ const int32_t* output_multiplier_per_channel;
765
+ const int32_t* output_shift_per_channel;
766
+ };
767
+
768
+ struct DequantizationParams {
769
+ double scale;
770
+ int32_t zero_point;
771
+ };
772
+
773
+ struct PerChannelDequantizationParams {
774
+ const float* scale;
775
+ const int32_t* zero_point;
776
+ int32_t quantized_dimension;
777
+ };
778
+
779
+ struct FakeQuantParams {
780
+ MinMax minmax;
781
+ int32_t num_bits;
782
+ };
783
+
784
+ struct FullyConnectedParams {
785
+ // uint8_t inference params.
786
+ // TODO(b/65838351): Use smaller types if appropriate.
787
+ int32_t input_offset;
788
+ int32_t weights_offset;
789
+ int32_t output_offset;
790
+ int32_t output_multiplier;
791
+ int output_shift;
792
+ // uint8_t, etc, activation params.
793
+ int32_t quantized_activation_min;
794
+ int32_t quantized_activation_max;
795
+ // float activation params.
796
+ float float_activation_min;
797
+ float float_activation_max;
798
+ // Mark the operands as cacheable if they are unchanging, e.g. weights.
799
+ bool lhs_cacheable;
800
+ bool rhs_cacheable;
801
+ FullyConnectedWeightsFormat weights_format;
802
+ };
803
+
804
+ struct GatherParams {
805
+ int16_t axis;
806
+ int16_t batch_dims;
807
+ };
808
+
809
+ struct L2NormalizationParams {
810
+ // uint8_t inference params.
811
+ int32_t input_zero_point;
812
+ };
813
+
814
+ struct LocalResponseNormalizationParams {
815
+ int32_t range;
816
+ double bias;
817
+ double alpha;
818
+ double beta;
819
+ };
820
+
821
+ struct HardSwishParams {
822
+ // zero_point of the input activations.
823
+ int16_t input_zero_point;
824
+ // zero_point of the output activations.
825
+ int16_t output_zero_point;
826
+ // 16bit fixed-point component of the multiplier to apply to go from the
827
+ // "high-res input scale", which is the input scale multiplied by 2^7, to the
828
+ // "relu-ish scale", which 3.0/32768.
829
+ // See the implementation of HardSwishPrepare.
830
+ int16_t reluish_multiplier_fixedpoint_int16;
831
+ // exponent/bit-shift component of the aforementioned multiplier.
832
+ int reluish_multiplier_exponent;
833
+ // 16bit fixed-point component of the multiplier to apply to go from the
834
+ // "high-res input scale", which is the input scale multiplied by 2^7, to the
835
+ // output scale.
836
+ // See the implementation of HardSwishPrepare.
837
+ int16_t output_multiplier_fixedpoint_int16;
838
+ // exponent/bit-shift component of the aforementioned multiplier.
839
+ int output_multiplier_exponent;
840
+ };
841
+
842
+ struct LogisticParams {
843
+ // uint8_t inference params.
844
+ int32_t input_zero_point;
845
+ int32_t input_range_radius;
846
+ int32_t input_multiplier;
847
+ int input_left_shift;
848
+ };
849
+
850
+ struct LstmCellParams {
851
+ int32_t weights_zero_point;
852
+ int32_t accum_multiplier;
853
+ int accum_shift;
854
+ int state_integer_bits;
855
+ };
856
+
857
+ struct MeanParams {
858
+ int8_t axis_count;
859
+ int16_t axis[4];
860
+ };
861
+
862
+ struct PackParams {
863
+ int8_t axis;
864
+ const int32_t* input_zeropoint;
865
+ const float* input_scale;
866
+ uint16_t inputs_count;
867
+ int32_t output_zeropoint;
868
+ float output_scale;
869
+ };
870
+
871
+ struct PadParams {
872
+ int8_t left_padding_count;
873
+ int32_t left_padding[5];
874
+ int8_t right_padding_count;
875
+ int32_t right_padding[5];
876
+ ResizingCategory resizing_category;
877
+ };
878
+
879
+ struct PreluParams {
880
+ int32_t input_offset;
881
+ int32_t alpha_offset;
882
+ int32_t output_offset;
883
+ int32_t output_multiplier_1;
884
+ int output_shift_1;
885
+ int32_t output_multiplier_2;
886
+ int output_shift_2;
887
+ };
888
+
889
+ struct PoolParams {
890
+ FusedActivationFunctionType activation;
891
+ PaddingType padding_type;
892
+ PaddingValues padding_values;
893
+ int stride_height;
894
+ int stride_width;
895
+ int filter_height;
896
+ int filter_width;
897
+ // uint8_t, etc, activation params.
898
+ int32_t quantized_activation_min;
899
+ int32_t quantized_activation_max;
900
+ // float activation params.
901
+ float float_activation_min;
902
+ float float_activation_max;
903
+ };
904
+
905
+ struct ReshapeParams {
906
+ int8_t shape_count;
907
+ int32_t shape[4];
908
+ };
909
+
910
+ struct ResizeBilinearParams {
911
+ bool align_corners;
912
+ // half_pixel_centers assumes pixels are of half the actual dimensions, and
913
+ // yields more accurate resizes. Corresponds to the same argument for the
914
+ // original TensorFlow op in TF2.0.
915
+ bool half_pixel_centers;
916
+ };
917
+
918
+ struct ResizeNearestNeighborParams {
919
+ bool align_corners;
920
+ bool half_pixel_centers;
921
+ };
922
+
923
+ struct SliceParams {
924
+ int8_t begin_count;
925
+ int32_t begin[5];
926
+ int8_t size_count;
927
+ int32_t size[5];
928
+ };
929
+
930
+ struct SoftmaxParams {
931
+ // beta is not really used (not a Tensorflow parameter) and not implemented
932
+ // for LogSoftmax.
933
+ double beta;
934
+ // uint8_t inference params. Used even when beta defaults to 1.0.
935
+ int32_t input_multiplier;
936
+ int32_t input_left_shift;
937
+ // Reverse scaling is only used by LogSoftmax.
938
+ int32_t reverse_scaling_divisor;
939
+ int32_t reverse_scaling_right_shift;
940
+ int diff_min;
941
+ int32_t zero_point;
942
+ float scale;
943
+ float* table;
944
+ // int16 LUT for exp(x), where x uniform distributed between [-10.0 , 0.0]
945
+ int16_t* exp_lut;
946
+ // int16 LUT for 1 / (1 + x), where x uniform distributed between [0.0 , 1.0]
947
+ int16_t* one_over_one_plus_x_lut;
948
+ uint8_t* uint8_table1;
949
+ uint8_t* uint8_table2;
950
+ };
951
+
952
+ struct SpaceToBatchParams {
953
+ // "Zero" padding for uint8_t means padding with the output offset.
954
+ int32_t output_offset;
955
+ };
956
+
957
+ struct SpaceToDepthParams {
958
+ int32_t block_size;
959
+ };
960
+
961
+ struct SplitParams {
962
+ // Graphs that split into, say, 2000 nodes are encountered. The indices in
963
+ // OperatorEdges are of type uint16_t.
964
+ uint16_t num_split;
965
+ int16_t axis;
966
+ };
967
+
968
+ struct SqueezeParams {
969
+ int8_t squeeze_dims_count;
970
+ int32_t squeeze_dims[4];
971
+ };
972
+
973
+ struct StridedSliceParams {
974
+ int8_t start_indices_count;
975
+ int32_t start_indices[5];
976
+ int8_t stop_indices_count;
977
+ int32_t stop_indices[5];
978
+ int8_t strides_count;
979
+ int32_t strides[5];
980
+
981
+ uint16_t begin_mask;
982
+ uint16_t ellipsis_mask;
983
+ uint16_t end_mask;
984
+ uint16_t new_axis_mask;
985
+ uint16_t shrink_axis_mask;
986
+ bool offset;
987
+ };
988
+
989
+ struct TanhParams {
990
+ int32_t input_zero_point;
991
+ int32_t input_range_radius;
992
+ int32_t input_multiplier;
993
+ int input_left_shift;
994
+ };
995
+
996
+ constexpr int kTransposeMaxDimensions = 6;
997
+
998
+ struct TransposeParams {
999
+ int8_t perm_count;
1000
+ int32_t perm[kTransposeMaxDimensions];
1001
+ };
1002
+
1003
+ struct UnpackParams {
1004
+ uint16_t num_split;
1005
+ int16_t axis;
1006
+ };
1007
+
1008
+ struct LeakyReluParams {
1009
+ float alpha;
1010
+ int32_t input_offset;
1011
+ int32_t output_offset;
1012
+ int32_t output_multiplier_alpha;
1013
+ int32_t output_shift_alpha;
1014
+ int32_t output_multiplier_identity;
1015
+ int32_t output_shift_identity;
1016
+ };
1017
+
1018
+ template <typename P>
1019
+ inline void SetActivationParams(float min, float max, P* params) {
1020
+ params->float_activation_min = min;
1021
+ params->float_activation_max = max;
1022
+ }
1023
+
1024
+ template <typename P>
1025
+ inline void SetActivationParams(int32_t min, int32_t max, P* params) {
1026
+ params->quantized_activation_min = min;
1027
+ params->quantized_activation_max = max;
1028
+ }
1029
+
1030
+ template <typename P>
1031
+ inline void SetActivationParams(uint32_t min, uint32_t max, P* params) {
1032
+ params->quantized_activation_min = min;
1033
+ params->quantized_activation_max = max;
1034
+ }
1035
+
1036
+ template <typename P>
1037
+ inline void SetActivationParams(int16_t min, int16_t max, P* params) {
1038
+ params->int16_activation_min = min;
1039
+ params->int16_activation_max = max;
1040
+ }
1041
+
1042
+ template <typename P>
1043
+ inline void SetActivationParams(int64_t min, int64_t max, P* params) {
1044
+ params->int64_activation_min = min;
1045
+ params->int64_activation_max = max;
1046
+ }
1047
+
1048
+ template <typename P>
1049
+ inline void GetActivationParams(const P& params, int32_t* min, int32_t* max) {
1050
+ *min = params.quantized_activation_min;
1051
+ *max = params.quantized_activation_max;
1052
+ }
1053
+
1054
+ template <typename P>
1055
+ inline void GetActivationParams(const P& params, uint32_t* min, uint32_t* max) {
1056
+ *min = params.quantized_activation_min;
1057
+ *max = params.quantized_activation_max;
1058
+ }
1059
+
1060
+ template <typename P>
1061
+ inline void GetActivationParams(const P& params, int16_t* min, int16_t* max) {
1062
+ *min = params.int16_activation_min;
1063
+ *max = params.int16_activation_max;
1064
+ }
1065
+
1066
+ template <typename P>
1067
+ inline void GetActivationParams(const P& params, float* min, float* max) {
1068
+ *min = params.float_activation_min;
1069
+ *max = params.float_activation_max;
1070
+ }
1071
+
1072
+ template <typename P>
1073
+ inline void GetActivationParams(const P& params, int64_t* min, int64_t* max) {
1074
+ *min = params.int64_activation_min;
1075
+ *max = params.int64_activation_max;
1076
+ }
1077
+
1078
+ // Type trait to check of given type has size smaller than 4 bytes.
1079
+ template <typename T>
1080
+ struct is_small_integer
1081
+ : public std::integral_constant<bool,
1082
+ std::is_same<T, int8_t>::value ||
1083
+ std::is_same<T, uint8_t>::value ||
1084
+ std::is_same<T, int16_t>::value ||
1085
+ std::is_same<T, uint16_t>::value> {};
1086
+
1087
+ // Type trait to check of given type is int32 or int64.
1088
+ template <typename T>
1089
+ struct is_int32_or_int64
1090
+ : public std::integral_constant<bool, std::is_same<T, int32_t>::value ||
1091
+ std::is_same<T, int64_t>::value> {
1092
+ };
1093
+
1094
+ } // namespace tflite_micro
1095
+
1096
+ #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_