xmos-ai-tools 1.3.2.dev180__py3-none-macosx_10_15_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. xmos_ai_tools/__init__.py +7 -0
  2. xmos_ai_tools/io_server/__init__.py +151 -0
  3. xmos_ai_tools/runtime/__init__.py +0 -0
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
  5. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
  6. xmos_ai_tools/runtime/include/flash_server.h +73 -0
  7. xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
  8. xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
  9. xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
  10. xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
  11. xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
  12. xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
  13. xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
  14. xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
  15. xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
  16. xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
  17. xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
  18. xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
  19. xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
  20. xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
  21. xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
  22. xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
  23. xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
  24. xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
  25. xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
  26. xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
  27. xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
  28. xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
  29. xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
  30. xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
  31. xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
  32. xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
  33. xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
  34. xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
  35. xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
  36. xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
  37. xmos_ai_tools/runtime/include/ioserver.h +44 -0
  38. xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
  39. xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
  40. xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
  41. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
  42. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
  43. xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
  44. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
  45. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
  46. xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
  47. xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
  48. xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
  49. xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
  50. xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
  51. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +307 -0
  52. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
  53. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +153 -0
  54. xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
  55. xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
  56. xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
  57. xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
  58. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
  59. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
  60. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
  61. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +83 -0
  62. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
  63. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
  64. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
  65. xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
  66. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  67. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  68. xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
  69. xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
  70. xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
  71. xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
  72. xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
  73. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +53 -0
  74. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
  75. xmos_ai_tools/runtime/include/lib_tflite_micro/api/load_weights.h +64 -0
  76. xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
  77. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
  78. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
  79. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
  80. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
  81. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
  82. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
  83. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
  84. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
  85. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +79 -0
  86. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
  87. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
  88. xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
  89. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
  90. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
  91. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
  92. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
  93. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
  94. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
  95. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
  96. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
  97. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
  98. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
  99. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
  100. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
  101. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
  102. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
  103. xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
  104. xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
  105. xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
  106. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
  107. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
  108. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
  109. xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
  110. xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
  111. xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
  112. xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
  113. xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
  114. xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
  115. xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
  116. xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
  117. xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
  118. xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
  119. xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
  120. xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
  121. xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
  122. xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
  123. xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
  124. xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
  125. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
  126. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
  127. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
  128. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
  129. xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
  130. xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
  131. xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
  132. xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
  133. xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
  134. xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
  135. xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
  136. xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
  137. xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
  138. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
  139. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
  140. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
  141. xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
  142. xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
  143. xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
  144. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
  145. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
  146. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
  147. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
  148. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
  149. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
  150. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
  151. xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
  152. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
  153. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
  154. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
  155. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
  156. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
  157. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
  158. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
  159. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
  160. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
  161. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
  162. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
  163. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
  164. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
  165. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
  166. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
  167. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
  168. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
  169. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
  170. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
  171. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
  172. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
  173. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
  174. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
  175. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
  176. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
  177. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
  178. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
  179. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
  180. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
  181. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
  182. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
  183. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
  184. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
  185. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
  186. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
  187. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
  188. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
  189. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
  190. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
  191. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
  192. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
  193. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
  194. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
  195. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
  196. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
  197. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
  198. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
  199. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
  200. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
  201. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
  202. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
  203. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
  204. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
  205. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
  206. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
  207. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
  208. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
  209. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
  210. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
  211. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
  212. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
  213. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
  214. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
  215. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
  216. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
  217. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
  218. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
  219. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
  220. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
  221. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
  222. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
  223. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
  224. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
  225. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
  226. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
  227. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
  228. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
  229. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
  230. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
  231. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
  232. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
  233. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
  234. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
  235. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
  236. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
  237. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
  238. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
  239. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
  240. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
  241. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
  242. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
  243. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
  244. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
  245. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
  246. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
  247. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
  248. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
  249. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
  250. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
  251. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
  252. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
  253. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
  254. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
  255. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
  256. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
  257. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
  258. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
  259. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
  260. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
  261. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
  262. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
  263. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
  264. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
  265. xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
  266. xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
  267. xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
  268. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
  269. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
  270. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
  271. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
  272. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
  273. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
  274. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
  275. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
  276. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
  277. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
  278. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
  279. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +68 -0
  280. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
  281. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
  282. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
  283. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
  284. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
  285. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
  286. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
  287. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
  288. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
  289. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
  290. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
  291. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
  292. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
  293. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
  294. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
  295. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
  296. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
  297. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
  298. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
  299. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
  300. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
  301. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
  302. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
  303. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
  304. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
  305. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
  306. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
  307. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
  308. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
  309. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
  310. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
  311. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
  312. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
  313. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
  314. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
  315. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
  316. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
  317. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
  318. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
  319. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
  320. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
  321. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
  322. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
  323. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
  324. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
  325. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
  326. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
  327. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
  328. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
  329. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
  330. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
  331. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
  332. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
  333. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
  334. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
  335. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
  336. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
  337. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
  338. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
  339. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
  340. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
  341. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
  342. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
  343. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
  344. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
  345. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
  346. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
  347. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
  348. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
  349. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
  350. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
  351. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
  352. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
  353. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
  354. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
  355. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
  356. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
  357. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
  358. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
  359. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
  360. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
  361. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
  362. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
  363. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
  364. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
  365. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
  366. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
  367. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
  368. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
  369. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
  370. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
  371. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
  372. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
  373. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
  374. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
  375. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
  376. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
  377. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
  378. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
  379. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
  380. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
  381. xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
  382. xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
  383. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  384. xmos_ai_tools/xformer/__init__.py +64 -0
  385. xmos_ai_tools/xformer/flash.py +190 -0
  386. xmos_ai_tools/xinterpreters/__init__.py +1 -0
  387. xmos_ai_tools/xinterpreters/exceptions.py +38 -0
  388. xmos_ai_tools/xinterpreters/host_interpreter.py +651 -0
  389. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
  390. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
  391. xmos_ai_tools-1.3.2.dev180.data/data/bin/xcore-opt +0 -0
  392. xmos_ai_tools-1.3.2.dev180.dist-info/METADATA +33 -0
  393. xmos_ai_tools-1.3.2.dev180.dist-info/RECORD +395 -0
  394. xmos_ai_tools-1.3.2.dev180.dist-info/WHEEL +5 -0
  395. xmos_ai_tools-1.3.2.dev180.dist-info/top_level.txt +1 -0
@@ -0,0 +1,244 @@
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_
16
+ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_
17
+
18
+ #include <algorithm>
19
+ #include <cstdint>
20
+
21
+ #if defined(_MSC_VER)
22
+ #define __restrict__ __restrict
23
+ #endif
24
+
25
+ namespace tflite_micro {
26
+
27
+ // Not all backends support CpuBackendContext usage, so forward declare to avoid
28
+ // pulling in its implementation.
29
+ class CpuBackendContext;
30
+
31
+ namespace tensor_utils {
32
+
33
+ template <typename T>
34
+ bool PortableIsZeroVector(const T* vector, int v_size) {
35
+ for (int i = 0; i < v_size; ++i) {
36
+ if (vector[i] != 0) {
37
+ return false;
38
+ }
39
+ }
40
+ return true;
41
+ }
42
+
43
+ void PortableSymmetricQuantizeFloats(const float* values, const int size,
44
+ int8_t* quantized_values, float* min_value,
45
+ float* max_value, float* scaling_factor);
46
+
47
+ void PortableSymmetricQuantizeFloats(const float* values, const int size,
48
+ int8_t* quantized_values, float min_value,
49
+ float max_value, float* scaling_factor);
50
+
51
+ void PortableAsymmetricQuantizeFloats(const float* values, const int size,
52
+ int8_t* quantized_values,
53
+ float* scaling_factor, int32_t* offset);
54
+
55
+ // Multiply a matrix by a batch vector, and store results in a batch-size
56
+ // vector.
57
+ void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix,
58
+ int m_rows, int m_cols,
59
+ const float* vector,
60
+ int n_batch, float* result);
61
+
62
+ void PortableMatrixBatchVectorMultiplyAccumulate(
63
+ const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
64
+ const int8_t* __restrict__ vectors, const float* scaling_factors,
65
+ int n_batch, float* __restrict__ result);
66
+
67
+ void PortableMatrixBatchVectorMultiplyAccumulate(
68
+ const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
69
+ const int8_t* __restrict__ vectors, const float* scaling_factors,
70
+ int n_batch, float* __restrict__ result, const float* per_channel_scale,
71
+ const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
72
+ bool* compute_row_sums, CpuBackendContext* context);
73
+
74
+ void PortableMatrixBatchVectorMultiplyAccumulate(
75
+ const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
76
+ const int8_t* __restrict__ vector, const float* scaling_factors,
77
+ int n_batch, int32_t* scratch, float* __restrict__ result,
78
+ CpuBackendContext* context);
79
+
80
+ void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4(
81
+ const float* __restrict__ matrix, const int32_t* __restrict__ segments,
82
+ const int32_t* __restrict__ indices, int m_rows, int m_cols,
83
+ const float* __restrict__ vector, int n_batch, float* __restrict__ result);
84
+
85
+ void PortableSparseMatrixBatchVectorMultiplyAccumulate(
86
+ const float* __restrict__ matrix, const uint8_t* __restrict__ ledger,
87
+ int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
88
+ float* __restrict__ result);
89
+
90
+ void PortableSparseMatrixBatchVectorMultiplyAccumulate1x16(
91
+ const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments,
92
+ const int32_t* __restrict__ indices, int m_rows, int m_cols,
93
+ const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector,
94
+ int n_batch, const int32_t input_offset, const int32_t output_multiplier,
95
+ const int32_t output_shift, const int32_t output_offset,
96
+ const int32_t output_activation_min, const int32_t output_activation_max,
97
+ int8_t* __restrict__ result);
98
+
99
+ void PortableSparseMatrixBatchVectorMultiplyAccumulate(
100
+ const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
101
+ const int m_cols, const int8_t* __restrict__ vectors,
102
+ const float* scaling_factors, int n_batch, float* __restrict__ result);
103
+
104
+ // Dot product of two vectors.
105
+ float PortableVectorVectorDotProduct(const float* vector1, const float* vector2,
106
+ int v_size);
107
+
108
+ void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1,
109
+ const int16_t* vector2,
110
+ int v_size, int n_batch,
111
+ int32_t* result);
112
+
113
+ void PortableVectorBatchVectorCwiseProductAccumulate(
114
+ const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch,
115
+ int32_t multiplier, int shift, int16_t* result);
116
+
117
+ void PortableMatrixBatchVectorMultiplyAccumulate(
118
+ const int8_t* input, const int32_t* bias,
119
+ const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
120
+ int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
121
+ int32_t* scratch, int16_t* output, CpuBackendContext* context);
122
+
123
+ void PortableMatrixBatchVectorMultiplyAccumulate(
124
+ const int8_t* input, const int32_t* bias,
125
+ const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
126
+ int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
127
+ int32_t* scratch, int8_t* output, CpuBackendContext* context);
128
+
129
+ void PortableMatrixBatchVectorMultiply(const int8_t* input,
130
+ int32_t input_zeropoint,
131
+ const int8_t* input_to_gate_weights,
132
+ int32_t input_to_gate_effective_scale_a,
133
+ int32_t input_to_gate_effective_scale_b,
134
+ int32_t n_batch, int32_t n_input,
135
+ int32_t n_cell, int8_t* gate_output,
136
+ int8_t gate_output_zp);
137
+
138
+ void PortableMatrixBatchVectorMultiply(
139
+ const int16_t* hidden, const int8_t* hidden_to_output_weights,
140
+ int32_t proj_effective_scale_a, int32_t proj_effective_scale_b,
141
+ const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden,
142
+ int32_t n_output, int32_t output_zp, int8_t* proj_output);
143
+
144
+ void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix,
145
+ int32_t scalar, int32_t n_row,
146
+ int32_t n_col, int32_t* output);
147
+
148
+ void PortableApplyLayerNorm(const int16_t* input,
149
+ const int16_t* layer_norm_weights,
150
+ const int32_t* bias, int32_t layer_norm_scale_a,
151
+ int32_t layer_norm_scale_b, int32_t variance_limit,
152
+ int n_batch, int n_input, int16_t* output);
153
+
154
+ void PortableApplyLayerNormFloat(const int16_t* input,
155
+ const int16_t* layer_norm_weights,
156
+ int32_t layer_norm_scale_a,
157
+ int32_t layer_norm_scale_b,
158
+ const int32_t* bias, int n_batch, int n_input,
159
+ int16_t* output);
160
+
161
+ void PortableApplySigmoid(const int16_t* input, int32_t n_batch,
162
+ int32_t n_input, int16_t* output);
163
+
164
+ void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch,
165
+ int32_t n_input, int16_t* output);
166
+
167
+ void PortableApplyTanh(int32_t integer_bits, const int16_t* input,
168
+ int32_t n_batch, int32_t n_input, int16_t* output);
169
+
170
+ void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch,
171
+ int32_t n_input, int32_t integer_bits,
172
+ int16_t* output);
173
+
174
+ void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
175
+ int n_batch, int n_input, int shift, int16_t* output);
176
+
177
+ void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
178
+ int32_t multiplier, int32_t shift, int32_t n_batch,
179
+ int32_t n_input, int32_t output_zp, int8_t* output);
180
+
181
+ void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2,
182
+ int n_batch, int n_input, int16_t* output);
183
+
184
+ template <typename T>
185
+ void PortableCwiseClipping(T* vector, const int v_size,
186
+ const T& clipping_value) {
187
+ for (int i = 0; i < v_size; i++) {
188
+ vector[i] = std::max(std::min(clipping_value, vector[i]),
189
+ static_cast<T>(-clipping_value));
190
+ }
191
+ }
192
+
193
+ // Batch vector initialization with another vector.
194
+ void PortableVectorBatchVectorAssign(const float* vector, int v_size,
195
+ int n_batch, float* batch_vector);
196
+
197
+ // Compute "1.0f - elements of vector" (used in CIFG).
198
+ void PortableSub1Vector(const float* vector, int v_size, float* result);
199
+
200
+ void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result);
201
+
202
+ // Multiply all elements of vector with a scalar.
203
+ void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
204
+ float* result);
205
+
206
+ // Reduce-sum on a vector:
207
+ // input_vector: pointer to input vector.
208
+ // output_vector: pointer to vector.
209
+ // output_size: output vector size.
210
+ // reduction_size: number of consecutive elements from input vector which are
211
+ // added to get one element of output.
212
+ template <typename INPUT, typename OUTPUT>
213
+ void PortableReductionSumVector(const INPUT* input_vector,
214
+ OUTPUT* output_vector, int output_size,
215
+ int reduction_size) {
216
+ for (int o = 0; o < output_size; o++) {
217
+ OUTPUT result = 0;
218
+ for (int r = 0; r < reduction_size; r++) {
219
+ result += input_vector[r];
220
+ }
221
+ output_vector[o] = result;
222
+ input_vector += reduction_size;
223
+ }
224
+ }
225
+
226
+ // Layer norm for each batch.
227
+ void PortableMeanStddevNormalization(const float* __restrict__ input_vector,
228
+ float* __restrict__ output_vector,
229
+ int v_size, int n_batch);
230
+
231
+ // Saturate Add.
232
+ void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
233
+ const int8_t* recurrent, int8_t recurrent_zp,
234
+ int32_t input_effective_scale_a,
235
+ int32_t input_effective_scale_b,
236
+ int32_t recurrent_effective_scale_a,
237
+ int32_t recurrent_effective_scale_b,
238
+ int32_t n_batch, int32_t n_cell,
239
+ int16_t* output);
240
+
241
+ } // namespace tensor_utils
242
+ } // namespace tflite_micro
243
+
244
+ #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_
@@ -0,0 +1,111 @@
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_
16
+ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_
17
+
18
+ #include <algorithm>
19
+
20
+ #include "tensorflow/lite/kernels/internal/common.h"
21
+ #include "tensorflow/lite/kernels/internal/compatibility.h"
22
+ #include "tensorflow/lite/kernels/internal/types.h"
23
+
24
+ namespace tflite_micro {
25
+
26
+ namespace reference_ops {
27
+
28
+ // Broadcast prelu to output_shape for quantized uint8_t/int8_t data.
29
+ template <typename T>
30
+ inline void BroadcastPrelu4DSlow(
31
+ const PreluParams& params, const RuntimeShape& input_shape,
32
+ const T* input_data, const RuntimeShape& alpha_shape, const T* alpha_data,
33
+ const RuntimeShape& output_shape, T* output_data) {
34
+ TFLITE_DCHECK_LE(input_shape.DimensionsCount(), 4);
35
+ TFLITE_DCHECK_LE(alpha_shape.DimensionsCount(), 4);
36
+ TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 4);
37
+ const RuntimeShape extended_output_shape =
38
+ RuntimeShape::ExtendedShape(4, output_shape);
39
+ NdArrayDesc<4> desc1;
40
+ NdArrayDesc<4> desc2;
41
+ NdArrayDescsForElementwiseBroadcast(input_shape, alpha_shape, &desc1, &desc2);
42
+
43
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
44
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
45
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
46
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
47
+ int output_index = Offset(extended_output_shape, b, y, x, c);
48
+ int input_index = SubscriptToIndex(desc1, b, y, x, c);
49
+ const int32_t input_value =
50
+ params.input_offset + input_data[input_index];
51
+ int32_t output_value;
52
+ if (input_value >= 0) {
53
+ output_value = MultiplyByQuantizedMultiplier(
54
+ input_value, params.output_multiplier_1, params.output_shift_1);
55
+ } else {
56
+ auto alpha_index = SubscriptToIndex(desc2, b, y, x, c);
57
+ const int32_t alpha_value =
58
+ params.alpha_offset + alpha_data[alpha_index];
59
+
60
+ output_value = MultiplyByQuantizedMultiplier(
61
+ input_value * alpha_value, params.output_multiplier_2,
62
+ params.output_shift_2);
63
+ }
64
+ output_value += params.output_offset;
65
+
66
+ const int32_t quantized_min = std::numeric_limits<T>::min();
67
+ const int32_t quantized_max = std::numeric_limits<T>::max();
68
+ const int32_t clamped_output =
69
+ std::min(quantized_max, std::max(quantized_min, output_value));
70
+ output_data[output_index] = static_cast<T>(clamped_output);
71
+ }
72
+ }
73
+ }
74
+ }
75
+ }
76
+
77
+ template <typename T>
78
+ inline void Prelu(const PreluParams& params, const RuntimeShape& input_shape,
79
+ const T* input_data, const RuntimeShape& alpha_shape,
80
+ const T* alpha_data, const RuntimeShape& output_shape,
81
+ T* output_data) {
82
+ const int32_t quantized_min = std::numeric_limits<T>::min();
83
+ const int32_t quantized_max = std::numeric_limits<T>::max();
84
+
85
+ const int flat_size =
86
+ MatchingElementsSize(input_shape, alpha_shape, output_shape);
87
+ for (int i = 0; i < flat_size; ++i) {
88
+ const int32_t input_value = params.input_offset + input_data[i];
89
+ int32_t output_value;
90
+ if (input_value >= 0) {
91
+ output_value = MultiplyByQuantizedMultiplier(
92
+ input_value, params.output_multiplier_1, params.output_shift_1);
93
+ } else {
94
+ const int32_t alpha_value = params.alpha_offset + alpha_data[i];
95
+
96
+ output_value = MultiplyByQuantizedMultiplier(input_value * alpha_value,
97
+ params.output_multiplier_2,
98
+ params.output_shift_2);
99
+ }
100
+ output_value += params.output_offset;
101
+
102
+ const int32_t clamped_output =
103
+ std::min(quantized_max, std::max(quantized_min, output_value));
104
+ output_data[i] = static_cast<T>(clamped_output);
105
+ }
106
+ }
107
+
108
+ } // namespace reference_ops
109
+ } // namespace tflite_micro
110
+
111
+ #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_
@@ -0,0 +1,140 @@
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_
16
+ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_
17
+
18
+ #include <algorithm>
19
+
20
+ #include "tensorflow/lite/kernels/internal/types.h"
21
+
22
+ namespace tflite_micro {
23
+
24
+ namespace reference_ops {
25
+
26
+ // Consolidates dimensions in broadcast inputs, checks for five-fold pattern.
27
+ //
28
+ // For example, if sequence of dimensions of one input is
29
+ // ..., 1, 3, 1, 7, 9, 5,... and the other is ..., 2, 3, 1, 7, 1, 1, ...
30
+ // we can consolidate these as
31
+ // ..., 1, 3*7, 9*5, ... and 2, 3*7, 1.
32
+ //
33
+ // The category is updated in the less-frequent case of shapes that are
34
+ // not suited to a fivefold-loop broadcast.
35
+ //
36
+ // Falls back to generic pattern when it does not know how to process properly.
37
+ //
38
+ // Returns true iff there is some sort of broadcast, which includes five-fold
39
+ // patterns and falling back to generic broadcast.
40
+ inline bool ProcessBroadcastShapes(const RuntimeShape& shape0,
41
+ const RuntimeShape& shape1,
42
+ tflite_micro::ArithmeticParams* params) {
43
+ const int dims_count =
44
+ std::max(shape0.DimensionsCount(), shape1.DimensionsCount());
45
+
46
+ params->broadcast_category = BroadcastableOpCategory::kGenericBroadcast;
47
+ RuntimeShape scalar_shape(dims_count, 1);
48
+
49
+ auto extended_shape0 = RuntimeShape::ExtendedShape(dims_count, shape0);
50
+ auto extended_shape1 = RuntimeShape::ExtendedShape(dims_count, shape1);
51
+
52
+ // Check for "exact" match, implicitly accepting any scalar shapes.
53
+ if (extended_shape0 == extended_shape1) {
54
+ params->broadcast_category = BroadcastableOpCategory::kNonBroadcast;
55
+ return false;
56
+ }
57
+
58
+ for (int i = dims_count - 1; i >= 0; --i) {
59
+ if (extended_shape0.Dims(i) == extended_shape1.Dims(i)) {
60
+ continue;
61
+ } else if (extended_shape0.Dims(i) == 1) {
62
+ params->broadcast_category =
63
+ BroadcastableOpCategory::kFirstInputBroadcastsFast;
64
+ break;
65
+ } else if (extended_shape1.Dims(i) == 1) {
66
+ params->broadcast_category =
67
+ BroadcastableOpCategory::kSecondInputBroadcastsFast;
68
+ break;
69
+ } else {
70
+ // This case is erroneous: there is a dimension that does not match and
71
+ // is not a broadcast from one shape to the other.
72
+ params->broadcast_category = BroadcastableOpCategory::kGenericBroadcast;
73
+ return true;
74
+ }
75
+ }
76
+
77
+ if (params->broadcast_category !=
78
+ BroadcastableOpCategory::kFirstInputBroadcastsFast &&
79
+ params->broadcast_category !=
80
+ BroadcastableOpCategory::kSecondInputBroadcastsFast) {
81
+ // This is unreachable because at least one else clause in the above loop
82
+ // must be reached.
83
+ TFLITE_DCHECK(false);
84
+ params->broadcast_category = BroadcastableOpCategory::kNonBroadcast;
85
+ return false;
86
+ }
87
+
88
+ // From this point it is assumed contractually that corresponding dimensions
89
+ // in shape0 and shape1 are either (a) equal or (b) one or other equals 1.
90
+ const bool swap_inputs = params->broadcast_category ==
91
+ BroadcastableOpCategory::kSecondInputBroadcastsFast;
92
+ const RuntimeShape* shape_a =
93
+ swap_inputs ? &extended_shape1 : &extended_shape0;
94
+ const RuntimeShape* shape_b =
95
+ swap_inputs ? &extended_shape0 : &extended_shape1;
96
+
97
+ int i = dims_count - 1;
98
+ params->broadcast_shape[0] = 1;
99
+ params->broadcast_shape[1] = 1;
100
+ params->broadcast_shape[2] = 1;
101
+ params->broadcast_shape[3] = 1;
102
+ params->broadcast_shape[4] = 1;
103
+ // y_0 is greedy: include dims if both or neither equal 1: in other words,
104
+ // test for equality rather than (shape_a->Dims(i) != 1).
105
+ while (i >= 0 && shape_a->Dims(i) == shape_b->Dims(i)) {
106
+ params->broadcast_shape[4] *= shape_b->Dims(i);
107
+ --i;
108
+ }
109
+ // Here either input_a or input_b has dim of 1 (if i >= 0). If it is input_b
110
+ // that has the unit dimension, the next two loops are not entered.
111
+ while (i >= 0 && shape_a->Dims(i) == 1) {
112
+ params->broadcast_shape[3] *= shape_b->Dims(i);
113
+ --i;
114
+ }
115
+ while (i >= 0 && shape_a->Dims(i) == shape_b->Dims(i)) {
116
+ params->broadcast_shape[2] *= shape_a->Dims(i);
117
+ --i;
118
+ }
119
+ // Here either input_a or input_b has dim of 1 (if i >= 0).
120
+ while (i >= 0 && shape_b->Dims(i) == 1) {
121
+ params->broadcast_shape[1] *= shape_a->Dims(i);
122
+ --i;
123
+ }
124
+ while (i >= 0 && shape_a->Dims(i) == shape_b->Dims(i)) {
125
+ params->broadcast_shape[0] *= shape_b->Dims(i);
126
+ --i;
127
+ }
128
+
129
+ // Rarer case is when the broadcast dimensions cannot be handled by a fivefold
130
+ // loop.
131
+ if (i >= 0) {
132
+ params->broadcast_category = BroadcastableOpCategory::kGenericBroadcast;
133
+ }
134
+ return true;
135
+ }
136
+
137
+ } // namespace reference_ops
138
+ } // namespace tflite_micro
139
+
140
+ #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_
@@ -0,0 +1,89 @@
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
16
+ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
17
+
18
+ #include <algorithm>
19
+ #include <limits>
20
+ #include <vector>
21
+
22
+ #include "tensorflow/lite/kernels/internal/common.h"
23
+ #include "tensorflow/lite/kernels/internal/compatibility.h"
24
+ #include "tensorflow/lite/kernels/internal/cppmath.h"
25
+ #include "tensorflow/lite/kernels/internal/types.h"
26
+
27
+ namespace tflite_micro {
28
+
29
+ namespace reference_ops {
30
+
31
+ template <typename InputT, typename OutputT>
32
+ inline void AffineQuantize(const tflite_micro::QuantizationParams& op_params,
33
+ const RuntimeShape& input_shape,
34
+ const InputT* input_data,
35
+ const RuntimeShape& output_shape,
36
+ OutputT* output_data) {
37
+ const int32_t zero_point = op_params.zero_point;
38
+ const double scale = op_params.scale;
39
+ const int flat_size = MatchingFlatSize(input_shape, output_shape);
40
+ static constexpr int32_t min_val = std::numeric_limits<OutputT>::min();
41
+ static constexpr int32_t max_val = std::numeric_limits<OutputT>::max();
42
+
43
+ for (int i = 0; i < flat_size; i++) {
44
+ const InputT val = input_data[i];
45
+ int32_t unclamped =
46
+ static_cast<int32_t>(TfLiteRound(val / static_cast<float>(scale))) +
47
+ zero_point;
48
+ int32_t clamped = std::min(std::max(unclamped, min_val), max_val);
49
+ output_data[i] = clamped;
50
+ }
51
+ }
52
+
53
+ // Quantizes per-channel.
54
+ template <typename InputT, typename OutputT>
55
+ inline void PerChannelQuantize(
56
+ const tflite_micro::PerChannelQuantizationParams& op_params,
57
+ const RuntimeShape& input_shape, const InputT* input_data,
58
+ const RuntimeShape& output_shape, OutputT* output_data) {
59
+ // Ensure flat size is same.
60
+ MatchingFlatSize(input_shape, output_shape);
61
+
62
+ const int32_t* zero_point = op_params.zero_point;
63
+ const float* scale = op_params.scale;
64
+ const int32_t quantized_dimension = op_params.quantized_dimension;
65
+ const int32_t num_dims = input_shape.DimensionsCount();
66
+ const int32_t* dims_data = input_shape.DimsData();
67
+ std::vector<int> current_dim(num_dims, 0);
68
+ static constexpr int32_t min_val = std::numeric_limits<OutputT>::min();
69
+ static constexpr int32_t max_val = std::numeric_limits<OutputT>::max();
70
+
71
+ do {
72
+ size_t offset =
73
+ ReducedOutputOffset(num_dims, reinterpret_cast<const int*>(dims_data),
74
+ current_dim.data(), 0, nullptr);
75
+ const InputT val = input_data[offset];
76
+ const int channel = current_dim[quantized_dimension];
77
+ int32_t unclamped = static_cast<int32_t>(TfLiteRound(
78
+ val / static_cast<float>(scale[channel]))) +
79
+ zero_point[channel];
80
+ int32_t clamped = std::min(std::max(unclamped, min_val), max_val);
81
+ output_data[offset] = static_cast<OutputT>(clamped);
82
+ } while (NextIndex(num_dims, reinterpret_cast<const int*>(dims_data),
83
+ current_dim.data()));
84
+ }
85
+
86
+ } // namespace reference_ops
87
+
88
+ } // namespace tflite_micro
89
+ #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_