xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (395) hide show
  1. xmos_ai_tools/__init__.py +7 -0
  2. xmos_ai_tools/io_server/__init__.py +151 -0
  3. xmos_ai_tools/runtime/__init__.py +0 -0
  4. xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
  5. xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
  6. xmos_ai_tools/runtime/include/flash_server.h +74 -0
  7. xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
  8. xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
  9. xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
  10. xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
  11. xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
  12. xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
  13. xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
  14. xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
  15. xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
  16. xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
  17. xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
  18. xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
  19. xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
  20. xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
  21. xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
  22. xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
  23. xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
  24. xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
  25. xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
  26. xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
  27. xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
  28. xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
  29. xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
  30. xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
  31. xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
  32. xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
  33. xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
  34. xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
  35. xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
  36. xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
  37. xmos_ai_tools/runtime/include/ioserver.h +44 -0
  38. xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
  39. xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
  40. xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
  41. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
  42. xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
  43. xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
  44. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
  45. xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
  46. xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
  47. xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
  48. xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
  49. xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
  50. xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
  51. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
  52. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
  53. xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
  54. xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
  55. xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
  56. xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
  57. xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
  58. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
  59. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
  60. xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
  61. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
  62. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
  63. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
  64. xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
  65. xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
  66. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
  67. xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
  68. xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
  69. xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
  70. xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
  71. xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
  72. xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
  73. xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
  74. xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
  75. xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
  76. xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
  77. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
  78. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
  79. xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
  80. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
  81. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
  82. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
  83. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
  84. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
  85. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
  86. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
  87. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
  88. xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
  89. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
  90. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
  91. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
  92. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
  93. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
  94. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
  95. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
  96. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
  97. xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
  98. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
  99. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
  100. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
  101. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
  102. xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
  103. xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
  104. xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
  105. xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
  106. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
  107. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
  108. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
  109. xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
  110. xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
  111. xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
  112. xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
  113. xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
  114. xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
  115. xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
  116. xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
  117. xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
  118. xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
  119. xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
  120. xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
  121. xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
  122. xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
  123. xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
  124. xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
  125. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
  126. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
  127. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
  128. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
  129. xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
  130. xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
  131. xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
  132. xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
  133. xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
  134. xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
  135. xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
  136. xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
  137. xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
  138. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
  139. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
  140. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
  141. xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
  142. xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
  143. xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
  144. xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
  145. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
  146. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
  147. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
  148. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
  149. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
  150. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
  151. xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
  152. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
  153. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
  154. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
  155. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
  156. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
  157. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
  158. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
  159. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
  160. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
  161. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
  162. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
  163. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
  164. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
  165. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
  166. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
  167. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
  168. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
  169. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
  170. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
  171. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
  172. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
  173. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
  174. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
  175. xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
  176. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
  177. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
  178. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
  179. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
  180. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
  181. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
  182. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
  183. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
  184. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
  185. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
  186. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
  187. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
  188. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
  189. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
  190. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
  191. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
  192. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
  193. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
  194. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
  195. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
  196. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
  197. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
  198. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
  199. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
  200. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
  201. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
  202. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
  203. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
  204. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
  205. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
  206. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
  207. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
  208. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
  209. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
  210. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
  211. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
  212. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
  213. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
  214. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
  215. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
  216. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
  217. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
  218. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
  219. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
  220. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
  221. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
  222. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
  223. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
  224. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
  225. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
  226. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
  227. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
  228. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
  229. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
  230. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
  231. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
  232. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
  233. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
  234. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
  235. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
  236. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
  237. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
  238. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
  239. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
  240. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
  241. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
  242. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
  243. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
  244. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
  245. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
  246. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
  247. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
  248. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
  249. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
  250. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
  251. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
  252. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
  253. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
  254. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
  255. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
  256. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
  257. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
  258. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
  259. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
  260. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
  261. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
  262. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
  263. xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
  264. xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
  265. xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
  266. xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
  267. xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
  268. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
  269. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
  270. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
  271. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
  272. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
  273. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
  274. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
  275. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
  276. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
  277. xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
  278. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
  279. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
  280. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
  281. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
  282. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
  283. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
  284. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
  285. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
  286. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
  287. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
  288. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
  289. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
  290. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
  291. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
  292. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
  293. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
  294. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
  295. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
  296. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
  297. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
  298. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
  299. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
  300. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
  301. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
  302. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
  303. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
  304. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
  305. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
  306. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
  307. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
  308. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
  309. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
  310. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
  311. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
  312. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
  313. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
  314. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
  315. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
  316. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
  317. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
  318. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
  319. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
  320. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
  321. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
  322. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
  323. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
  324. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
  325. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
  326. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
  327. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
  328. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
  329. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
  330. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
  331. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
  332. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
  333. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
  334. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
  335. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
  336. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
  337. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
  338. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
  339. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
  340. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
  341. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
  342. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
  343. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
  344. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
  345. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
  346. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
  347. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
  348. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
  349. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
  350. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
  351. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
  352. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
  353. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
  354. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
  355. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
  356. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
  357. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
  358. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
  359. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
  360. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
  361. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
  362. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
  363. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
  364. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
  365. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
  366. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
  367. xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
  368. xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
  369. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
  370. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
  371. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
  372. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
  373. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
  374. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
  375. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
  376. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
  377. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
  378. xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
  379. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
  380. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
  381. xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
  382. xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
  383. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  384. xmos_ai_tools/xformer/__init__.py +60 -0
  385. xmos_ai_tools/xformer/flash.py +190 -0
  386. xmos_ai_tools/xinterpreters/__init__.py +1 -0
  387. xmos_ai_tools/xinterpreters/exceptions.py +38 -0
  388. xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
  389. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
  390. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
  391. xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
  392. xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
  393. xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
  394. xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
  395. xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1358 @@
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
16
+ #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
17
+
18
+ #include <algorithm>
19
+ #include <cstddef>
20
+ #include <cstdint>
21
+
22
+ #include "tensorflow/lite/kernels/internal/runtime_shape.h"
23
+ #ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
24
+ #ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
25
+ #define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
26
+ #endif
27
+ #endif
28
+
29
+ #include <cmath>
30
+ #include <functional>
31
+
32
+ #include "fixedpoint/fixedpoint.h"
33
+ #include "tensorflow/lite/core/macros.h"
34
+ #include "tensorflow/lite/kernels/internal/cppmath.h"
35
+ #include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
36
+ #include "tensorflow/lite/kernels/internal/types.h"
37
+
38
+ namespace tflite_micro {
39
+
40
+ constexpr int kReverseShift = -1;
41
+
42
+ // Reduces and compresses dimensions so that broadcast handling becomes more
43
+ // efficient. Returns true if the output shape is broadcastable; it doesn't
44
+ // contain any degenerate dimension, i.e. shape dimension = 0. False otherwise.
45
+ template <int MAX_DIM = 6>
46
+ bool ReduceDimensionsForBroadcast(const RuntimeShape& input1_shape,
47
+ const RuntimeShape& input2_shape,
48
+ size_t* compressed_input1_stride,
49
+ size_t* compressed_input2_stride,
50
+ size_t* compressed_output_shape) {
51
+ size_t num_compressed_dims = 0;
52
+ size_t compressed_input1_shape[MAX_DIM];
53
+ size_t compressed_input2_shape[MAX_DIM];
54
+ std::fill(compressed_input1_shape, compressed_input1_shape + MAX_DIM, 1);
55
+ std::fill(compressed_input2_shape, compressed_input2_shape + MAX_DIM, 1);
56
+ std::fill(compressed_output_shape, compressed_output_shape + MAX_DIM, 1);
57
+ bool broadcast_input1 = false;
58
+ bool broadcast_input2 = false;
59
+ bool first_nonunit = true;
60
+ const size_t num_input1_dims = input1_shape.DimensionsCount();
61
+ const size_t num_input2_dims = input2_shape.DimensionsCount();
62
+ const int32_t* input1_dims = input1_shape.DimsData();
63
+ const int32_t* input2_dims = input2_shape.DimsData();
64
+ const size_t num_common_dims = std::min(num_input1_dims, num_input2_dims);
65
+ for (size_t i = 1; i <= num_common_dims; i++) {
66
+ const size_t input1_dim = input1_dims[num_input1_dims - i];
67
+ const size_t input2_dim = input2_dims[num_input2_dims - i];
68
+ if (input1_dim == 0 || input2_dim == 0) {
69
+ return false;
70
+ }
71
+ if (input1_dim == 1 && input2_dim == 1) {
72
+ continue;
73
+ }
74
+ assert(!broadcast_input1 || !broadcast_input2);
75
+
76
+ if (input1_dim == 1) {
77
+ if (!broadcast_input1) {
78
+ broadcast_input1 = true;
79
+ broadcast_input2 = false;
80
+ num_compressed_dims++;
81
+ }
82
+ compressed_input2_shape[num_compressed_dims - 1] *= input2_dim;
83
+ compressed_output_shape[num_compressed_dims - 1] *= input2_dim;
84
+ } else if (input2_dim == 1) {
85
+ if (!broadcast_input2) {
86
+ broadcast_input1 = false;
87
+ broadcast_input2 = true;
88
+ num_compressed_dims++;
89
+ }
90
+ compressed_input1_shape[num_compressed_dims - 1] *= input1_dim;
91
+ compressed_output_shape[num_compressed_dims - 1] *= input1_dim;
92
+ } else {
93
+ TFLITE_DCHECK(input1_dim == input2_dim);
94
+ if (broadcast_input1 || broadcast_input2 || first_nonunit) {
95
+ broadcast_input1 = false;
96
+ broadcast_input2 = false;
97
+ num_compressed_dims++;
98
+ }
99
+ compressed_input1_shape[num_compressed_dims - 1] *= input1_dim;
100
+ compressed_input2_shape[num_compressed_dims - 1] *= input1_dim;
101
+ compressed_output_shape[num_compressed_dims - 1] *= input1_dim;
102
+ }
103
+ first_nonunit = false;
104
+ }
105
+ if (num_input1_dims > num_input2_dims) {
106
+ if (!broadcast_input2) {
107
+ num_compressed_dims++;
108
+ }
109
+ for (size_t i = 0; i < num_input1_dims - num_input2_dims; i++) {
110
+ const size_t input1_dim = input1_dims[i];
111
+ if (input1_dim == 0) {
112
+ return false;
113
+ }
114
+ compressed_input1_shape[num_compressed_dims - 1] *= input1_dim;
115
+ compressed_output_shape[num_compressed_dims - 1] *= input1_dim;
116
+ }
117
+ } else if (num_input2_dims > num_input1_dims) {
118
+ if (!broadcast_input1) {
119
+ num_compressed_dims++;
120
+ }
121
+ for (size_t i = 0; i < num_input2_dims - num_input1_dims; i++) {
122
+ const size_t input2_dim = input2_dims[i];
123
+ if (input2_dim == 0) {
124
+ return false;
125
+ }
126
+ compressed_input2_shape[num_compressed_dims - 1] *= input2_dim;
127
+ compressed_output_shape[num_compressed_dims - 1] *= input2_dim;
128
+ }
129
+ }
130
+ num_compressed_dims = (num_compressed_dims > 1) ? num_compressed_dims : 1;
131
+
132
+ int input1_stride = 1;
133
+ int input2_stride = 1;
134
+ for (int i = 0; i < MAX_DIM; ++i) {
135
+ compressed_input1_stride[i] = input1_stride;
136
+ input1_stride *= compressed_input1_shape[i];
137
+ compressed_input2_stride[i] = input2_stride;
138
+ input2_stride *= compressed_input2_shape[i];
139
+ }
140
+ for (int i = 0; i < MAX_DIM; ++i) {
141
+ if (compressed_input1_shape[i] != compressed_input2_shape[i]) {
142
+ if (compressed_input1_shape[i] == 1) {
143
+ compressed_input1_stride[i] = 0;
144
+ } else {
145
+ TFLITE_DCHECK_EQ(compressed_input2_shape[i], 1);
146
+ compressed_input2_stride[i] = 0;
147
+ }
148
+ }
149
+ }
150
+ return true;
151
+ }
152
+
153
+ inline void GetActivationMinMax(FusedActivationFunctionType ac,
154
+ float* output_activation_min,
155
+ float* output_activation_max) {
156
+ switch (ac) {
157
+ case FusedActivationFunctionType::kNone:
158
+ *output_activation_min = std::numeric_limits<float>::lowest();
159
+ *output_activation_max = std::numeric_limits<float>::max();
160
+ break;
161
+ case FusedActivationFunctionType::kRelu:
162
+ *output_activation_min = 0.f;
163
+ *output_activation_max = std::numeric_limits<float>::max();
164
+ break;
165
+ case FusedActivationFunctionType::kRelu1:
166
+ *output_activation_min = -1.f;
167
+ *output_activation_max = 1.f;
168
+ break;
169
+ case FusedActivationFunctionType::kRelu6:
170
+ *output_activation_min = 0.f;
171
+ *output_activation_max = 6.f;
172
+ break;
173
+ }
174
+ }
175
+
176
+ template <typename T>
177
+ inline T ActivationFunctionWithMinMax(T x, T output_activation_min,
178
+ T output_activation_max) {
179
+ using std::max;
180
+ using std::min;
181
+ return min(max(x, output_activation_min), output_activation_max);
182
+ }
183
+
184
+ // Legacy function, left for compatibility only.
185
+ template <FusedActivationFunctionType Ac>
186
+ float ActivationFunction(float x) {
187
+ float output_activation_min, output_activation_max;
188
+ GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
189
+ return ActivationFunctionWithMinMax(x, output_activation_min,
190
+ output_activation_max);
191
+ }
192
+
193
+ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
194
+ const float* bias_data, int array_size,
195
+ float* array_data) {
196
+ if (bias_size == 0) return;
197
+ // Note: see b/132215220: in May 2019 we thought it would be OK to replace
198
+ // this with the Eigen one-liner:
199
+ // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
200
+ // This turned out to severely regress performance: +4ms (i.e. 8%) on
201
+ // MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now.
202
+ TFLITE_DCHECK_EQ((array_size % bias_size), 0);
203
+ #ifdef USE_NEON
204
+ float* array_ptr = array_data;
205
+ float* array_end_ptr = array_ptr + array_size;
206
+ const auto clamp_min_vec = vdupq_n_f32(clamp_min);
207
+ const auto clamp_max_vec = vdupq_n_f32(clamp_max);
208
+ for (; array_ptr != array_end_ptr; array_ptr += bias_size) {
209
+ int i = 0;
210
+ for (; i <= bias_size - 16; i += 16) {
211
+ auto b0 = vld1q_f32(bias_data + i);
212
+ auto b1 = vld1q_f32(bias_data + i + 4);
213
+ auto b2 = vld1q_f32(bias_data + i + 8);
214
+ auto b3 = vld1q_f32(bias_data + i + 12);
215
+ auto a0 = vld1q_f32(array_ptr + i);
216
+ auto a1 = vld1q_f32(array_ptr + i + 4);
217
+ auto a2 = vld1q_f32(array_ptr + i + 8);
218
+ auto a3 = vld1q_f32(array_ptr + i + 12);
219
+ auto x0 = vaddq_f32(a0, b0);
220
+ auto x1 = vaddq_f32(a1, b1);
221
+ auto x2 = vaddq_f32(a2, b2);
222
+ auto x3 = vaddq_f32(a3, b3);
223
+ x0 = vmaxq_f32(clamp_min_vec, x0);
224
+ x1 = vmaxq_f32(clamp_min_vec, x1);
225
+ x2 = vmaxq_f32(clamp_min_vec, x2);
226
+ x3 = vmaxq_f32(clamp_min_vec, x3);
227
+ x0 = vminq_f32(clamp_max_vec, x0);
228
+ x1 = vminq_f32(clamp_max_vec, x1);
229
+ x2 = vminq_f32(clamp_max_vec, x2);
230
+ x3 = vminq_f32(clamp_max_vec, x3);
231
+ vst1q_f32(array_ptr + i, x0);
232
+ vst1q_f32(array_ptr + i + 4, x1);
233
+ vst1q_f32(array_ptr + i + 8, x2);
234
+ vst1q_f32(array_ptr + i + 12, x3);
235
+ }
236
+ for (; i <= bias_size - 4; i += 4) {
237
+ auto b = vld1q_f32(bias_data + i);
238
+ auto a = vld1q_f32(array_ptr + i);
239
+ auto x = vaddq_f32(a, b);
240
+ x = vmaxq_f32(clamp_min_vec, x);
241
+ x = vminq_f32(clamp_max_vec, x);
242
+ vst1q_f32(array_ptr + i, x);
243
+ }
244
+ for (; i < bias_size; i++) {
245
+ array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i],
246
+ clamp_min, clamp_max);
247
+ }
248
+ }
249
+ #else // not NEON
250
+ for (int array_offset = 0; array_offset < array_size;
251
+ array_offset += bias_size) {
252
+ for (int i = 0; i < bias_size; i++) {
253
+ array_data[array_offset + i] = ActivationFunctionWithMinMax(
254
+ array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max);
255
+ }
256
+ }
257
+ #endif
258
+ }
259
+
260
+ // Single-rounding MultiplyByQuantizedMultiplier
261
+ #if TFLITE_SINGLE_ROUNDING
262
+ inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
263
+ int32_t quantized_multiplier,
264
+ int shift) {
265
+ TFLITE_DCHECK(quantized_multiplier >= 0);
266
+ TFLITE_DCHECK(shift >= -31 && shift <= 30);
267
+
268
+ const int64_t total_shift = 31 - shift;
269
+ const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
270
+ int64_t result = x * static_cast<int64_t>(quantized_multiplier) + round;
271
+ result = result >> total_shift;
272
+
273
+ TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
274
+ result <= std::numeric_limits<int32_t>::max());
275
+ return static_cast<int32_t>(result);
276
+ }
277
+
278
+ inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
279
+ int32_t x, int32_t quantized_multiplier, int shift) {
280
+ TFLITE_DCHECK_LE(shift, 0);
281
+ return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
282
+ }
283
+
284
+ inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
285
+ int32_t x, int32_t quantized_multiplier, int shift) {
286
+ TFLITE_DCHECK_GE(shift, 0);
287
+ return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
288
+ }
289
+
290
+ inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
291
+ int32_t quantized_multiplier,
292
+ int shift) {
293
+ // Inputs:
294
+ // - quantized_multiplier has fixed point at bit 31
295
+ // - shift is -31 to +7 (negative for right shift)
296
+ //
297
+ // Assumptions: The following input ranges are assumed
298
+ // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
299
+ // - scaling is chosen so final scaled result fits in int32_t
300
+ // - input x is in the range -(1<<47) <= x < (1<<47)
301
+ TFLITE_DCHECK(quantized_multiplier >= 0);
302
+ TFLITE_DCHECK(shift >= -31 && shift < 8);
303
+ TFLITE_DCHECK(x >= -(static_cast<int64_t>(1) << 47) &&
304
+ x < (static_cast<int64_t>(1) << 47));
305
+
306
+ const int32_t reduced_multiplier =
307
+ (quantized_multiplier < 0x7FFF0000)
308
+ ? ((quantized_multiplier + (1 << 15)) >> 16)
309
+ : 0x7FFF;
310
+ const int64_t total_shift = 15 - shift;
311
+ const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
312
+ int64_t result = x * static_cast<int64_t>(reduced_multiplier) + round;
313
+ result = result >> total_shift;
314
+
315
+ TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
316
+ result <= std::numeric_limits<int32_t>::max());
317
+ return static_cast<int32_t>(result);
318
+ }
319
+
320
+ #ifdef USE_NEON
321
+ inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
322
+ int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
323
+ TFLITE_DCHECK(quantized_multiplier >= 0);
324
+
325
+ const int right_shift = std::min(-1, shift);
326
+ const int left_shift = shift - right_shift;
327
+
328
+ const int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier);
329
+ const int32x4_t left_shift_dup = vdupq_n_s32(left_shift);
330
+ const int32x4_t right_shift_dup = vdupq_n_s32(right_shift);
331
+
332
+ int32x4x4_t result;
333
+ result.val[0] = vrshlq_s32(
334
+ vqdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup), multiplier_dup),
335
+ right_shift_dup);
336
+
337
+ result.val[1] = vrshlq_s32(
338
+ vqdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup), multiplier_dup),
339
+ right_shift_dup);
340
+
341
+ result.val[2] = vrshlq_s32(
342
+ vqdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup), multiplier_dup),
343
+ right_shift_dup);
344
+
345
+ result.val[3] = vrshlq_s32(
346
+ vqdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup), multiplier_dup),
347
+ right_shift_dup);
348
+
349
+ return result;
350
+ }
351
+ #endif // USE_NEON
352
+ // Double-rounding MultiplyByQuantizedMultiplier
353
+ #else
354
+ inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
355
+ int32_t x, int32_t quantized_multiplier, int left_shift) {
356
+ using gemmlowp::RoundingDivideByPOT;
357
+ using gemmlowp::SaturatingRoundingDoublingHighMul;
358
+ return RoundingDivideByPOT(
359
+ SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift);
360
+ }
361
+
362
+ inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
363
+ int32_t x, int32_t quantized_multiplier, int left_shift) {
364
+ using gemmlowp::SaturatingRoundingDoublingHighMul;
365
+ return SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
366
+ quantized_multiplier);
367
+ }
368
+
369
+ TFLITE_NOINLINE int32_t MultiplyByQuantizedMultiplier(
370
+ int32_t x, int32_t quantized_multiplier, int shift);
371
+
372
+ TFLITE_NOINLINE int32_t MultiplyByQuantizedMultiplier(
373
+ int64_t x, int32_t quantized_multiplier, int shift);
374
+
375
+ #ifdef USE_NEON
376
+ // Round uses ARM's rounding shift right.
377
+ inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
378
+ int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
379
+ const int left_shift = std::max(shift, 0);
380
+ const int right_shift = std::min(shift, 0);
381
+ int32x4x4_t result;
382
+
383
+ int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier);
384
+ int32x4_t left_shift_dup = vdupq_n_s32(left_shift);
385
+ int32x4_t right_shift_dup = vdupq_n_s32(right_shift);
386
+
387
+ result.val[0] =
388
+ vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup),
389
+ multiplier_dup),
390
+ right_shift_dup);
391
+
392
+ result.val[1] =
393
+ vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup),
394
+ multiplier_dup),
395
+ right_shift_dup);
396
+
397
+ result.val[2] =
398
+ vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup),
399
+ multiplier_dup),
400
+ right_shift_dup);
401
+
402
+ result.val[3] =
403
+ vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup),
404
+ multiplier_dup),
405
+ right_shift_dup);
406
+
407
+ return result;
408
+ }
409
+ #endif // USE_NEON
410
+ #endif // TFLITE_SINGLE_ROUNDING
411
+
412
+ template <typename T>
413
+ int CountLeadingZeros(T integer_input) {
414
+ static_assert(std::is_unsigned<T>::value,
415
+ "Only unsigned integer types handled.");
416
+ if (integer_input == 0) {
417
+ return std::numeric_limits<T>::digits;
418
+ }
419
+ #if defined(__GNUC__)
420
+ if (std::is_same<T, uint32_t>::value) {
421
+ return __builtin_clz(integer_input);
422
+ } else if (std::is_same<T, uint64_t>::value) {
423
+ return __builtin_clzll(integer_input);
424
+ }
425
+ #endif
426
+ const T one_in_leading_positive = static_cast<T>(1)
427
+ << (std::numeric_limits<T>::digits - 1);
428
+ int leading_zeros = 0;
429
+ while (integer_input < one_in_leading_positive) {
430
+ integer_input <<= 1;
431
+ ++leading_zeros;
432
+ }
433
+ return leading_zeros;
434
+ }
435
+
436
+ template <typename T>
437
+ inline int CountLeadingSignBits(T integer_input) {
438
+ static_assert(std::is_signed<T>::value, "Only signed integer types handled.");
439
+ #if defined(__GNUC__) && !defined(__clang__)
440
+ return integer_input ? __builtin_clrsb(integer_input)
441
+ : std::numeric_limits<T>::digits;
442
+ #else
443
+ using U = typename std::make_unsigned<T>::type;
444
+ return integer_input >= 0
445
+ ? CountLeadingZeros(static_cast<U>(integer_input)) - 1
446
+ : integer_input != std::numeric_limits<T>::min()
447
+ ? CountLeadingZeros(2 * static_cast<U>(-integer_input) - 1)
448
+ : 0;
449
+ #endif
450
+ }
451
+
452
+ // Use "count leading zeros" helper functions to do a fast Floor(log_2(x)).
453
+ template <typename Integer>
454
+ inline Integer FloorLog2(Integer n) {
455
+ static_assert(std::is_integral<Integer>::value, "");
456
+ static_assert(std::is_signed<Integer>::value, "");
457
+ static_assert(sizeof(Integer) == 4 || sizeof(Integer) == 8, "");
458
+ TFLITE_CHECK_GT(n, 0);
459
+ if (sizeof(Integer) == 4) {
460
+ return 30 - CountLeadingSignBits(n);
461
+ } else {
462
+ return 62 - CountLeadingSignBits(n);
463
+ }
464
+ }
465
+
466
+ namespace detail {
467
+
468
+ // LUTPopulate takes an optional type-erased transform_params to allow passing
469
+ // extra parameters to the transform function pointer. const void* is used
470
+ // instead of std::function to be compatible with TFLite Micro
471
+ template <typename FloatT, typename Func>
472
+ inline typename std::enable_if<std::is_same<Func, FloatT (*)(FloatT)>::value,
473
+ FloatT>::type
474
+ LUTTransform(Func transform, const void* /*transform_params*/, FloatT value) {
475
+ static_assert(std::is_floating_point<FloatT>::value,
476
+ "FloatT must be a floating-point type.");
477
+ return transform(value);
478
+ }
479
+
480
+ template <typename FloatT, typename Func>
481
+ inline typename std::enable_if<
482
+ std::is_same<Func, FloatT (*)(FloatT, const void*)>::value, FloatT>::type
483
+ LUTTransform(Func transform, const void* transform_params, FloatT value) {
484
+ static_assert(std::is_floating_point<FloatT>::value,
485
+ "FloatT must be a floating-point type.");
486
+ return transform(value, transform_params);
487
+ }
488
+
489
+ // Use the same LUT generation code for both uint8_t and int8_t. Int8_t indexes
490
+ // will be directly casted to uint8_t, the int8 LUT will thus be ordered as [0,
491
+ // 1, ..., 127, -128, ..., -2, -1] instead of [-128, -127, ..., -1, 0, 1, ...,
492
+ // 126, 127].
493
+ template <typename T, typename Func>
494
+ inline void LUTPopulateInt8(float input_scale, int32_t input_zero_point,
495
+ float output_scale, int32_t output_zero_point,
496
+ Func transform, const void* transform_params,
497
+ T* lut) {
498
+ static_assert(
499
+ std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value,
500
+ "T must be an uint8 or int8 type.");
501
+ uint8_t* lut_uint8 = reinterpret_cast<uint8_t*>(lut);
502
+ const float inverse_scale = 1 / output_scale;
503
+ int32_t maxval = std::numeric_limits<T>::max();
504
+ int32_t minval = std::numeric_limits<T>::min();
505
+ for (int32_t val = minval; val <= maxval; ++val) {
506
+ const float dequantized = input_scale * (val - input_zero_point);
507
+ const float transformed =
508
+ LUTTransform(transform, transform_params, dequantized);
509
+ const float rescaled = TfLiteRound(transformed * inverse_scale);
510
+ const int32_t quantized =
511
+ static_cast<int32_t>(rescaled + output_zero_point);
512
+ lut_uint8[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>(
513
+ static_cast<T>(std::max(std::min(maxval, quantized), minval)));
514
+ }
515
+ }
516
+
517
+ // Keep floating-point type configurable for backward compatibility. float
518
+ // should be used for FloatT by default.
519
+ template <typename FloatT, typename Func>
520
+ inline void LUTPopulateInt16(FloatT input_scale, int32_t input_zero_point,
521
+ FloatT output_scale, int32_t output_zero_point,
522
+ Func transform, const void* transform_params,
523
+ int16_t* lut) {
524
+ static_assert(std::is_floating_point<FloatT>::value,
525
+ "FloatT must be a floating-point type.");
526
+ const FloatT input_min =
527
+ input_scale * (std::numeric_limits<int16_t>::min() - input_zero_point);
528
+ const FloatT input_max =
529
+ input_scale * (std::numeric_limits<int16_t>::max() - input_zero_point);
530
+ const FloatT output_min =
531
+ output_scale * (std::numeric_limits<int16_t>::min() - output_zero_point);
532
+ const FloatT output_max =
533
+ output_scale * (std::numeric_limits<int16_t>::max() - output_zero_point);
534
+
535
+ const int nb_steps = 512;
536
+ const FloatT step = (input_max - input_min) / nb_steps;
537
+ const FloatT half_step = step / 2;
538
+ const FloatT output_scaling_inv =
539
+ static_cast<FloatT>(std::numeric_limits<int16_t>::max() -
540
+ std::numeric_limits<int16_t>::min() + 1) /
541
+ (output_max - output_min);
542
+ const FloatT table_min =
543
+ static_cast<FloatT>(std::numeric_limits<int16_t>::min());
544
+ const FloatT table_max =
545
+ static_cast<FloatT>(std::numeric_limits<int16_t>::max());
546
+
547
+ for (int i = 0; i < nb_steps; i++) {
548
+ const FloatT val =
549
+ LUTTransform<FloatT>(transform, transform_params, input_min + i * step);
550
+ const FloatT val_midpoint = LUTTransform<FloatT>(
551
+ transform, transform_params, input_min + i * step + half_step);
552
+ const FloatT val_next = LUTTransform<FloatT>(transform, transform_params,
553
+ input_min + (i + 1) * step);
554
+
555
+ const FloatT sample_val = TfLiteRound(val * output_scaling_inv);
556
+ const FloatT midpoint_interp_val =
557
+ TfLiteRound((val_next * output_scaling_inv +
558
+ TfLiteRound(val * output_scaling_inv)) /
559
+ 2);
560
+ const FloatT midpoint_val = TfLiteRound(val_midpoint * output_scaling_inv);
561
+ const FloatT midpoint_err = midpoint_interp_val - midpoint_val;
562
+ const FloatT bias = TfLiteRound(midpoint_err / 2);
563
+
564
+ lut[i] = static_cast<int16_t>(std::min<FloatT>(
565
+ std::max<FloatT>(sample_val - bias, table_min), table_max));
566
+ }
567
+
568
+ lut[nb_steps] = static_cast<int16_t>(std::min<FloatT>(
569
+ std::max<FloatT>(TfLiteRound(LUTTransform<FloatT>(
570
+ transform, transform_params, input_max) *
571
+ output_scaling_inv),
572
+ table_min),
573
+ table_max));
574
+ }
575
+
576
+ } // namespace detail
577
+
578
+ template <typename T>
579
+ inline typename std::enable_if<std::is_same<T, uint8_t>::value ||
580
+ std::is_same<T, int8_t>::value,
581
+ void>::type
582
+ LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale,
583
+ int32_t output_zero_point, float (*transform)(float), T* lut) {
584
+ detail::LUTPopulateInt8(input_scale, input_zero_point, output_scale,
585
+ output_zero_point, transform, nullptr, lut);
586
+ }
587
+
588
+ template <typename T>
589
+ inline typename std::enable_if<std::is_same<T, uint8_t>::value ||
590
+ std::is_same<T, int8_t>::value,
591
+ void>::type
592
+ LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale,
593
+ int32_t output_zero_point, float (*transform)(float, const void*),
594
+ const void* transform_params, T* lut) {
595
+ detail::LUTPopulateInt8(input_scale, input_zero_point, output_scale,
596
+ output_zero_point, transform, transform_params, lut);
597
+ }
598
+
599
+ template <typename T>
600
+ inline typename std::enable_if<std::is_same<T, int16_t>::value, void>::type
601
+ LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale,
602
+ int32_t output_zero_point, float (*transform)(float), T* lut) {
603
+ detail::LUTPopulateInt16<float>(input_scale, input_zero_point, output_scale,
604
+ output_zero_point, transform, nullptr, lut);
605
+ }
606
+
607
+ template <typename T>
608
+ inline typename std::enable_if<std::is_same<T, int16_t>::value, void>::type
609
+ LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale,
610
+ int32_t output_zero_point, float (*transform)(float, const void*),
611
+ const void* transform_params, T* lut) {
612
+ detail::LUTPopulateInt16<float>(input_scale, input_zero_point, output_scale,
613
+ output_zero_point, transform,
614
+ transform_params, lut);
615
+ }
616
+
617
+ // Deprecated, avoid usage and prefer the float version. Kept for
618
+ // backward-compatiblity.
619
+ template <typename T>
620
+ inline typename std::enable_if<std::is_same<T, int16_t>::value, void>::type
621
+ LUTPopulate(double input_scale, int32_t input_zero_point, double output_scale,
622
+ int32_t output_zero_point, double (*transform)(double), T* lut) {
623
+ detail::LUTPopulateInt16<double>(input_scale, input_zero_point, output_scale,
624
+ output_zero_point, transform, nullptr, lut);
625
+ }
626
+
627
+ // The size of the LUT depends on the type of input. For uint8 and int8 inputs a
628
+ // simple 256 entries LUT is used. For int16 inputs the high 9 bits are used for
629
+ // indexing and the 7 remaining bits are used for interpolation. We thus use a
630
+ // 513-entries LUT for int16 cases, 512 for the 9-bit indexing and 1 extra entry
631
+ // to interpolate the last value.
632
+ template <typename T>
633
+ constexpr int LUTSize() {
634
+ static_assert(std::is_same<T, uint8_t>::value ||
635
+ std::is_same<T, int8_t>::value ||
636
+ std::is_same<T, int16_t>::value,
637
+ "Only LUTs with uint8, int8 or int16 inputs are supported.");
638
+ // As per c++11: constexpr methods cannot have more than one return statement.
639
+ return (std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value)
640
+ ? 256
641
+ : 513;
642
+ }
643
+
644
+ // int16_t -> int16_t table lookup with interpolation
645
+ // LUT must have 513 values
646
+ inline int16_t LUTLookup(int16_t value, const int16_t* lut) {
647
+ // 512 base values, lut[513] is only used to calculate the slope
648
+ const uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
649
+ assert(index < 512 && "LUT index out of range.");
650
+ const int16_t offset = value & 0x7f;
651
+
652
+ // Base and slope are Q0.x
653
+ const int16_t base = lut[index];
654
+ const int16_t slope = lut[index + 1] - lut[index];
655
+
656
+ // Q0.x * Q0.7 = Q0.(x + 7)
657
+ // Round and convert from Q0.(x + 7) to Q0.x
658
+ const int delta = (slope * offset + 64) >> 7;
659
+
660
+ // Q0.15 + Q0.15
661
+ return static_cast<int16_t>(base + delta);
662
+ }
663
+
664
+ // int8_t -> int8_t table lookup without interpolation
665
+ // LUT must have 256 values
666
+ // LUTPopulate<int8_t> has ordered the LUT so that indexing it with an
667
+ // int8_t is just done by casting it to an uint8_t.
668
+ inline int8_t LUTLookup(int8_t value, const int8_t* lut) {
669
+ return lut[static_cast<uint8_t>(value)];
670
+ }
671
+
672
+ // uint8_t -> uint8_t table lookup without interpolation
673
+ // LUT must have 256 values
674
+ inline uint8_t LUTLookup(uint8_t value, const uint8_t* lut) {
675
+ return lut[value];
676
+ }
677
+
678
+ // Table of sigmoid(i/24) at 0.16 format - 256 elements.
679
+
680
+ // We use combined sigmoid and tanh look-up table, since
681
+ // tanh(x) = 2*sigmoid(2*x) -1.
682
+ // Both functions are symmetric, so the LUT table is only needed
683
+ // for the absolute value of the input.
684
+ static const uint16_t sigmoid_table_uint16[256] = {
685
+ 32768, 33451, 34133, 34813, 35493, 36169, 36843, 37513, 38180, 38841, 39498,
686
+ 40149, 40794, 41432, 42064, 42688, 43304, 43912, 44511, 45102, 45683, 46255,
687
+ 46817, 47369, 47911, 48443, 48964, 49475, 49975, 50464, 50942, 51409, 51865,
688
+ 52311, 52745, 53169, 53581, 53983, 54374, 54755, 55125, 55485, 55834, 56174,
689
+ 56503, 56823, 57133, 57433, 57724, 58007, 58280, 58544, 58800, 59048, 59288,
690
+ 59519, 59743, 59959, 60168, 60370, 60565, 60753, 60935, 61110, 61279, 61441,
691
+ 61599, 61750, 61896, 62036, 62172, 62302, 62428, 62549, 62666, 62778, 62886,
692
+ 62990, 63090, 63186, 63279, 63368, 63454, 63536, 63615, 63691, 63765, 63835,
693
+ 63903, 63968, 64030, 64090, 64148, 64204, 64257, 64308, 64357, 64405, 64450,
694
+ 64494, 64536, 64576, 64614, 64652, 64687, 64721, 64754, 64786, 64816, 64845,
695
+ 64873, 64900, 64926, 64950, 64974, 64997, 65019, 65039, 65060, 65079, 65097,
696
+ 65115, 65132, 65149, 65164, 65179, 65194, 65208, 65221, 65234, 65246, 65258,
697
+ 65269, 65280, 65291, 65301, 65310, 65319, 65328, 65337, 65345, 65352, 65360,
698
+ 65367, 65374, 65381, 65387, 65393, 65399, 65404, 65410, 65415, 65420, 65425,
699
+ 65429, 65433, 65438, 65442, 65445, 65449, 65453, 65456, 65459, 65462, 65465,
700
+ 65468, 65471, 65474, 65476, 65479, 65481, 65483, 65485, 65488, 65489, 65491,
701
+ 65493, 65495, 65497, 65498, 65500, 65501, 65503, 65504, 65505, 65507, 65508,
702
+ 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65517, 65518,
703
+ 65519, 65520, 65520, 65521, 65522, 65522, 65523, 65523, 65524, 65524, 65525,
704
+ 65525, 65526, 65526, 65526, 65527, 65527, 65528, 65528, 65528, 65529, 65529,
705
+ 65529, 65529, 65530, 65530, 65530, 65530, 65531, 65531, 65531, 65531, 65531,
706
+ 65532, 65532, 65532, 65532, 65532, 65532, 65533, 65533, 65533, 65533, 65533,
707
+ 65533, 65533, 65533, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534,
708
+ 65534, 65534, 65535};
709
+
710
+ // TODO(b/77858996): Add these to gemmlowp.
711
+ template <typename IntegerType>
712
+ IntegerType SaturatingAddNonGemmlowp(IntegerType a, IntegerType b) {
713
+ static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
714
+ return a;
715
+ }
716
+
717
+ template <>
718
+ inline std::int32_t SaturatingAddNonGemmlowp(std::int32_t a, std::int32_t b) {
719
+ std::int64_t a64 = a;
720
+ std::int64_t b64 = b;
721
+ std::int64_t sum = a64 + b64;
722
+ return static_cast<std::int32_t>(std::min(
723
+ static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()),
724
+ std::max(
725
+ static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::min()),
726
+ sum)));
727
+ }
728
+
729
+ template <typename tRawType, int tIntegerBits>
730
+ gemmlowp::FixedPoint<tRawType, tIntegerBits> SaturatingAddNonGemmlowp(
731
+ gemmlowp::FixedPoint<tRawType, tIntegerBits> a,
732
+ gemmlowp::FixedPoint<tRawType, tIntegerBits> b) {
733
+ return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
734
+ SaturatingAddNonGemmlowp(a.raw(), b.raw()));
735
+ }
736
+
737
+ template <typename IntegerType>
738
+ IntegerType SaturatingSub(IntegerType a, IntegerType b) {
739
+ static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
740
+ return a;
741
+ }
742
+
743
+ template <>
744
+ inline std::int16_t SaturatingSub(std::int16_t a, std::int16_t b) {
745
+ std::int32_t a32 = a;
746
+ std::int32_t b32 = b;
747
+ std::int32_t diff = a32 - b32;
748
+ return static_cast<std::int16_t>(
749
+ std::min(static_cast<int32_t>(32767),
750
+ std::max(static_cast<int32_t>(-32768), diff)));
751
+ }
752
+
753
+ template <>
754
+ inline std::int32_t SaturatingSub(std::int32_t a, std::int32_t b) {
755
+ std::int64_t a64 = a;
756
+ std::int64_t b64 = b;
757
+ std::int64_t diff = a64 - b64;
758
+ return static_cast<std::int32_t>(std::min(
759
+ static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()),
760
+ std::max(
761
+ static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::min()),
762
+ diff)));
763
+ }
764
+
765
+ template <typename tRawType, int tIntegerBits>
766
+ gemmlowp::FixedPoint<tRawType, tIntegerBits> SaturatingSub(
767
+ gemmlowp::FixedPoint<tRawType, tIntegerBits> a,
768
+ gemmlowp::FixedPoint<tRawType, tIntegerBits> b) {
769
+ return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
770
+ SaturatingSub(a.raw(), b.raw()));
771
+ }
772
+ // End section to be moved to gemmlowp.
773
+
774
+ template <typename IntegerType>
775
+ IntegerType SaturatingRoundingMultiplyByPOTParam(IntegerType x, int exponent) {
776
+ if (exponent == 0) {
777
+ return x;
778
+ }
779
+ using ScalarIntegerType =
780
+ typename gemmlowp::FixedPointRawTypeTraits<IntegerType>::ScalarRawType;
781
+ const IntegerType min =
782
+ gemmlowp::Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::min());
783
+ const IntegerType max =
784
+ gemmlowp::Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::max());
785
+ const int ScalarIntegerTypeBits = 8 * sizeof(ScalarIntegerType);
786
+
787
+ const std::int32_t threshold =
788
+ ((1 << (ScalarIntegerTypeBits - 1 - exponent)) - 1);
789
+ const IntegerType positive_mask =
790
+ gemmlowp::MaskIfGreaterThan(x, gemmlowp::Dup<IntegerType>(threshold));
791
+ const IntegerType negative_mask =
792
+ gemmlowp::MaskIfLessThan(x, gemmlowp::Dup<IntegerType>(-threshold));
793
+
794
+ IntegerType result = gemmlowp::ShiftLeft(x, exponent);
795
+ result = gemmlowp::SelectUsingMask(positive_mask, max, result);
796
+ result = gemmlowp::SelectUsingMask(negative_mask, min, result);
797
+ return result;
798
+ }
799
+
800
+ // If we want to leave IntegerBits fixed, then multiplication
801
+ // by a power of two has to be saturating/rounding, not exact anymore.
802
+ template <typename tRawType, int tIntegerBits>
803
+ gemmlowp::FixedPoint<tRawType, tIntegerBits>
804
+ SaturatingRoundingMultiplyByPOTParam(
805
+ gemmlowp::FixedPoint<tRawType, tIntegerBits> a, int exponent) {
806
+ return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
807
+ SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
808
+ }
809
+
810
+ // Convert int32_t multiplier to int16_t with rounding.
811
+ inline void DownScaleInt32ToInt16Multiplier(int32_t multiplier_int32_t,
812
+ int16_t* multiplier_int16_t) {
813
+ TFLITE_DCHECK_GE(multiplier_int32_t, 0);
814
+ static constexpr int32_t kRoundingOffset = 1 << 15;
815
+ if (multiplier_int32_t >=
816
+ std::numeric_limits<int32_t>::max() - kRoundingOffset) {
817
+ *multiplier_int16_t = std::numeric_limits<int16_t>::max();
818
+ return;
819
+ }
820
+ const int32_t result = (multiplier_int32_t + kRoundingOffset) >> 16;
821
+ TFLITE_DCHECK_LE(result << 16, multiplier_int32_t + kRoundingOffset);
822
+ TFLITE_DCHECK_GT(result << 16, multiplier_int32_t - kRoundingOffset);
823
+ *multiplier_int16_t = result;
824
+ TFLITE_DCHECK_EQ(*multiplier_int16_t, result);
825
+ }
826
+
827
+ // Minimum output bits to accommodate log of maximum input range. It actually
828
+ // does not matter if one considers, say, [-64,64] or [-64,64).
829
+ //
830
+ // For example, run this through Octave:
831
+ // [0:127; ...
832
+ // ceil(log(abs( log(2.^(0:127))+1 ))/log(2)); ...
833
+ // ceil(log(abs( log(2.^(0:127))+1 ))/log(2))]
834
+ constexpr int min_log_x_output_bits(int input_bits) {
835
+ return input_bits > 90 ? 7
836
+ : input_bits > 44 ? 6
837
+ : input_bits > 21 ? 5
838
+ : input_bits > 10 ? 4
839
+ : input_bits > 4 ? 3
840
+ : input_bits > 1 ? 2
841
+ : 1;
842
+ }
843
+
844
+ // Although currently the name of this function says that it cannot handle
845
+ // values less than 1, in practice it can handle as low as 1/x_max, where
846
+ // x_max is the largest representable input. In other words, the output range
847
+ // is symmetric.
848
+ template <int OutputIntegerBits, int InputIntegerBits>
849
+ inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
850
+ log_x_for_x_greater_than_or_equal_to_1_impl(
851
+ gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
852
+ // assert(__builtin_clz(0u) >= std::numeric_limits<uint32_t>::digits - 1);
853
+ // assert(__builtin_clz(0u) <= std::numeric_limits<uint32_t>::digits);
854
+ using FixedPoint0 = gemmlowp::FixedPoint<int32_t, 0>;
855
+ // The reason for accumulating the result with an extra bit of headroom is
856
+ // that z_pow_2_adj * log_2 might be saturated, and adding num_scaled *
857
+ // recip_denom will otherwise introduce an error.
858
+ static constexpr int kAccumIntegerBits = OutputIntegerBits + 1;
859
+ using FixedPointAccum = gemmlowp::FixedPoint<int32_t, kAccumIntegerBits>;
860
+
861
+ const FixedPoint0 log_2 = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
862
+ FixedPoint0, 1488522236, std::log(2.0));
863
+ const FixedPoint0 sqrt_sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
864
+ FixedPoint0, 1805811301, std::sqrt(std::sqrt(0.5)));
865
+ const FixedPoint0 sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
866
+ FixedPoint0, 1518500250, std::sqrt(0.5));
867
+ const FixedPoint0 one_quarter =
868
+ GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPoint0, 536870912, 1.0 / 4.0);
869
+
870
+ const FixedPoint0 alpha_n = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
871
+ FixedPoint0, 117049297, 11.0 / 240.0 * std::sqrt(std::sqrt(2.0)));
872
+ const FixedPoint0 alpha_d = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
873
+ FixedPoint0, 127690142, 1.0 / 20.0 * std::sqrt(std::sqrt(2.0)));
874
+ const FixedPoint0 alpha_i = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
875
+ FixedPoint0, 1057819769,
876
+ 2.0 / std::sqrt(std::sqrt(2.0)) - std::sqrt(std::sqrt(2.0)));
877
+ const FixedPoint0 alpha_f = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
878
+ FixedPoint0, 638450708, 1.0 / 4.0 * std::sqrt(std::sqrt(2.0)));
879
+
880
+ const FixedPointAccum shifted_quarter =
881
+ gemmlowp::Rescale<kAccumIntegerBits>(one_quarter);
882
+
883
+ // Reinterpret the input value as Q0.31, because we will figure out the
884
+ // required shift "ourselves" instead of using, say, Rescale.
885
+ FixedPoint0 z_a = FixedPoint0::FromRaw(input_val.raw());
886
+ // z_a_pow_2 = input_integer_bits - z_a_headroom;
887
+ int z_a_headroom_plus_1 = CountLeadingZeros(static_cast<uint32_t>(z_a.raw()));
888
+ FixedPoint0 r_a_tmp =
889
+ SaturatingRoundingMultiplyByPOTParam(z_a, (z_a_headroom_plus_1 - 1));
890
+ const int32_t r_a_raw =
891
+ SaturatingRoundingMultiplyByPOTParam((r_a_tmp * sqrt_half).raw(), 1);
892
+ // z_pow_2_adj = max(z_pow_2_a - 0.75, z_pow_2_b - 0.25);
893
+ // z_pow_2_adj = max(InputIntegerBits - z_a_headroom_plus_1 + 0.25,
894
+ // InputIntegerBits - z_b_headroom - 0.25);
895
+ const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp(
896
+ FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
897
+ static_cast<int32_t>(InputIntegerBits - z_a_headroom_plus_1),
898
+ 31 - kAccumIntegerBits)),
899
+ shifted_quarter);
900
+
901
+ // z_b is treated like z_a, but premultiplying by sqrt(0.5).
902
+ FixedPoint0 z_b = z_a * sqrt_half;
903
+ int z_b_headroom = CountLeadingZeros(static_cast<uint32_t>(z_b.raw())) - 1;
904
+ const int32_t r_b_raw =
905
+ SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
906
+ const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
907
+ FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
908
+ static_cast<int32_t>(InputIntegerBits - z_b_headroom),
909
+ 31 - kAccumIntegerBits)),
910
+ shifted_quarter);
911
+
912
+ const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw));
913
+ const FixedPointAccum z_pow_2_adj = FixedPointAccum::FromRaw(
914
+ std::max(z_a_pow_2_adj.raw(), z_b_pow_2_adj.raw()));
915
+
916
+ const FixedPoint0 p = gemmlowp::RoundingHalfSum(r, sqrt_sqrt_half);
917
+ FixedPoint0 q = r - sqrt_sqrt_half;
918
+ q = q + q;
919
+
920
+ const FixedPoint0 common_sq = q * q;
921
+ const FixedPoint0 num = q * r + q * common_sq * alpha_n;
922
+ const FixedPoint0 denom_minus_one_0 =
923
+ p * (alpha_i + q + alpha_d * common_sq) + alpha_f * q;
924
+ const FixedPoint0 recip_denom =
925
+ one_over_one_plus_x_for_x_in_0_1(denom_minus_one_0);
926
+
927
+ const FixedPointAccum num_scaled = gemmlowp::Rescale<kAccumIntegerBits>(num);
928
+ return gemmlowp::Rescale<OutputIntegerBits>(z_pow_2_adj * log_2 +
929
+ num_scaled * recip_denom);
930
+ }
931
+
932
+ template <int OutputIntegerBits, int InputIntegerBits>
933
+ inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
934
+ log_x_for_x_greater_than_or_equal_to_1(
935
+ gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
936
+ static_assert(
937
+ OutputIntegerBits >= min_log_x_output_bits(InputIntegerBits),
938
+ "Output integer bits must be sufficient to accommodate logs of inputs.");
939
+ return log_x_for_x_greater_than_or_equal_to_1_impl<OutputIntegerBits,
940
+ InputIntegerBits>(
941
+ input_val);
942
+ }
943
+
944
+ inline int32_t GetReciprocal(int32_t x, int x_integer_digits,
945
+ int* num_bits_over_unit) {
946
+ int headroom_plus_one = CountLeadingZeros(static_cast<uint32_t>(x));
947
+ // This is the number of bits to the left of the binary point above 1.0.
948
+ // Consider x=1.25. In that case shifted_scale=0.8 and
949
+ // no later adjustment will be needed.
950
+ *num_bits_over_unit = x_integer_digits - headroom_plus_one;
951
+ const int32_t shifted_sum_minus_one =
952
+ static_cast<int32_t>((static_cast<uint32_t>(x) << headroom_plus_one) -
953
+ (static_cast<uint32_t>(1) << 31));
954
+
955
+ gemmlowp::FixedPoint<int32_t, 0> shifted_scale =
956
+ gemmlowp::one_over_one_plus_x_for_x_in_0_1(
957
+ gemmlowp::FixedPoint<int32_t, 0>::FromRaw(shifted_sum_minus_one));
958
+ return shifted_scale.raw();
959
+ }
960
+
961
+ inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift,
962
+ int32_t* output_inv_sqrt,
963
+ int* output_shift) {
964
+ TFLITE_DCHECK_GE(input, 0);
965
+ if (input <= 1) {
966
+ // Handle the input value 1 separately to avoid overflow in that case
967
+ // in the general computation below (b/143972021). Also handle 0 as if it
968
+ // were a 1. 0 is an invalid input here (divide by zero) and 1 is a valid
969
+ // but rare/unrealistic input value. We can expect both to occur in some
970
+ // incompletely trained models, but probably not in fully trained models.
971
+ *output_inv_sqrt = std::numeric_limits<std::int32_t>::max();
972
+ *output_shift = 0;
973
+ return;
974
+ }
975
+ TFLITE_DCHECK_GT(input, 1);
976
+ *output_shift = 11;
977
+ while (input >= (1 << 29)) {
978
+ input /= 4;
979
+ ++*output_shift;
980
+ }
981
+ const unsigned max_left_shift_bits =
982
+ CountLeadingZeros(static_cast<uint32_t>(input)) - 1;
983
+ const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2;
984
+ const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1;
985
+ *output_shift -= left_shift_bit_pairs;
986
+ input <<= 2 * left_shift_bit_pairs;
987
+ TFLITE_DCHECK_GE(input, (1 << 27));
988
+ TFLITE_DCHECK_LT(input, (1 << 29));
989
+ using gemmlowp::FixedPoint;
990
+ using gemmlowp::Rescale;
991
+ using gemmlowp::SaturatingRoundingMultiplyByPOT;
992
+ // Using 3 integer bits gives us enough room for the internal arithmetic in
993
+ // this Newton-Raphson iteration.
994
+ using F3 = FixedPoint<int32_t, 3>;
995
+ using F0 = FixedPoint<int32_t, 0>;
996
+ const F3 fixedpoint_input = F3::FromRaw(input >> 1);
997
+ const F3 fixedpoint_half_input =
998
+ SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input);
999
+ const F3 fixedpoint_half_three =
1000
+ GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F3, (1 << 28) + (1 << 27), 1.5);
1001
+ // Newton-Raphson iteration
1002
+ // Naive unoptimized starting guess: x = 1
1003
+ F3 x = F3::One();
1004
+ // Naive unoptimized number of iterations: 5
1005
+ for (int i = 0; i < 5; i++) {
1006
+ const F3 x3 = Rescale<3>(x * x * x);
1007
+ x = Rescale<3>(fixedpoint_half_three * x - fixedpoint_half_input * x3);
1008
+ }
1009
+ const F0 fixedpoint_half_sqrt_2 =
1010
+ GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F0, 1518500250, std::sqrt(2.) / 2.);
1011
+ x = x * fixedpoint_half_sqrt_2;
1012
+ *output_inv_sqrt = x.raw();
1013
+ if (*output_shift < 0) {
1014
+ *output_inv_sqrt <<= -*output_shift;
1015
+ *output_shift = 0;
1016
+ }
1017
+ // Convert right shift (right is positive) to left shift.
1018
+ *output_shift *= reverse_shift;
1019
+ }
1020
+
1021
+ // DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
1022
+ // BROADCASTING.
1023
+ //
1024
+ // NdArrayDesc<N> describes the shape and memory layout of an N-dimensional
1025
+ // rectangular array of numbers.
1026
+ //
1027
+ // NdArrayDesc<N> is basically identical to Dims<N> defined in types.h.
1028
+ // However, as Dims<N> is to be deprecated, this class exists as an adaptor
1029
+ // to enable simple unoptimized implementations of element-wise broadcasting
1030
+ // operations.
1031
+ template <int N>
1032
+ struct NdArrayDesc {
1033
+ // The "extent" of each dimension. Indices along dimension d must be in the
1034
+ // half-open interval [0, extents[d]).
1035
+ int extents[N];
1036
+
1037
+ // The number of *elements* (not bytes) between consecutive indices of each
1038
+ // dimension.
1039
+ int strides[N];
1040
+ };
1041
+
1042
+ // DO NOT USE THIS FUNCTION FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
1043
+ // BROADCASTING.
1044
+ //
1045
+ // Same as Offset(), except takes as NdArrayDesc<N> instead of Dims<N>.
1046
+ inline int SubscriptToIndex(const NdArrayDesc<4>& desc, int i0, int i1, int i2,
1047
+ int i3) {
1048
+ TFLITE_DCHECK(i0 >= 0 && i0 < desc.extents[0]);
1049
+ TFLITE_DCHECK(i1 >= 0 && i1 < desc.extents[1]);
1050
+ TFLITE_DCHECK(i2 >= 0 && i2 < desc.extents[2]);
1051
+ TFLITE_DCHECK(i3 >= 0 && i3 < desc.extents[3]);
1052
+ return i0 * desc.strides[0] + i1 * desc.strides[1] + i2 * desc.strides[2] +
1053
+ i3 * desc.strides[3];
1054
+ }
1055
+
1056
+ inline int SubscriptToIndex(const NdArrayDesc<5>& desc, int indexes[5]) {
1057
+ return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
1058
+ indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
1059
+ indexes[4] * desc.strides[4];
1060
+ }
1061
+
1062
+ inline int SubscriptToIndex(const NdArrayDesc<8>& desc, int indexes[8]) {
1063
+ return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
1064
+ indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
1065
+ indexes[4] * desc.strides[4] + indexes[5] * desc.strides[5] +
1066
+ indexes[6] * desc.strides[6] + indexes[7] * desc.strides[7];
1067
+ }
1068
+
1069
+ // Given the dimensions of the operands for an element-wise binary broadcast,
1070
+ // adjusts them so that they can be directly iterated over with simple loops.
1071
+ // Returns the adjusted dims as instances of NdArrayDesc in 'desc0_out' and
1072
+ // 'desc1_out'. 'desc0_out' and 'desc1_out' cannot be nullptr.
1073
+ //
1074
+ // This function assumes that the two input shapes are compatible up to
1075
+ // broadcasting and the shorter one has already been prepended with 1s to be the
1076
+ // same length. E.g., if shape0 is (1, 16, 16, 64) and shape1 is (1, 64),
1077
+ // shape1 must already have been prepended to be (1, 1, 1, 64). Recall that
1078
+ // Dims<N> refer to shapes in reverse order. In this case, input0_dims will be
1079
+ // (64, 16, 16, 1) and input1_dims will be (64, 1, 1, 1).
1080
+ //
1081
+ // When two shapes are compatible up to broadcasting, for each dimension d,
1082
+ // the input extents are either equal, or one of them is 1.
1083
+ //
1084
+ // This function performs the following for each dimension d:
1085
+ // - If the extents are equal, then do nothing since the loop that walks over
1086
+ // both of the input arrays is correct.
1087
+ // - Otherwise, one (and only one) of the extents must be 1. Say extent0 is 1
1088
+ // and extent1 is e1. Then set extent0 to e1 and stride0 *to 0*. This allows
1089
+ // array0 to be referenced *at any index* in dimension d and still access the
1090
+ // same slice.
1091
+ template <int N>
1092
+ inline void NdArrayDescsForElementwiseBroadcast(const Dims<N>& input0_dims,
1093
+ const Dims<N>& input1_dims,
1094
+ NdArrayDesc<N>* desc0_out,
1095
+ NdArrayDesc<N>* desc1_out) {
1096
+ TFLITE_DCHECK(desc0_out != nullptr);
1097
+ TFLITE_DCHECK(desc1_out != nullptr);
1098
+
1099
+ // Copy dims to desc.
1100
+ for (int i = 0; i < N; ++i) {
1101
+ desc0_out->extents[i] = input0_dims.sizes[i];
1102
+ desc0_out->strides[i] = input0_dims.strides[i];
1103
+ desc1_out->extents[i] = input1_dims.sizes[i];
1104
+ desc1_out->strides[i] = input1_dims.strides[i];
1105
+ }
1106
+
1107
+ // Walk over each dimension. If the extents are equal do nothing.
1108
+ // Otherwise, set the desc with extent 1 to have extent equal to the other and
1109
+ // stride 0.
1110
+ for (int i = 0; i < N; ++i) {
1111
+ const int extent0 = ArraySize(input0_dims, i);
1112
+ const int extent1 = ArraySize(input1_dims, i);
1113
+ if (extent0 != extent1) {
1114
+ if (extent0 == 1) {
1115
+ desc0_out->strides[i] = 0;
1116
+ desc0_out->extents[i] = extent1;
1117
+ } else {
1118
+ TFLITE_DCHECK_EQ(extent1, 1);
1119
+ desc1_out->strides[i] = 0;
1120
+ desc1_out->extents[i] = extent0;
1121
+ }
1122
+ }
1123
+ }
1124
+ }
1125
+
1126
+ // Copies dims to desc, calculating strides.
1127
+ template <int N>
1128
+ TFLITE_NOINLINE void CopyDimsToDesc(const RuntimeShape& input_shape,
1129
+ NdArrayDesc<N>* desc_out) {
1130
+ int desc_stride = 1;
1131
+ for (int i = N - 1; i >= 0; --i) {
1132
+ desc_out->extents[i] = input_shape.Dims(i);
1133
+ desc_out->strides[i] = desc_stride;
1134
+ desc_stride *= input_shape.Dims(i);
1135
+ }
1136
+ }
1137
+
1138
+ template <int N>
1139
+ inline void NdArrayDescsForElementwiseBroadcast(
1140
+ const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
1141
+ NdArrayDesc<N>* desc0_out, NdArrayDesc<N>* desc1_out) {
1142
+ TFLITE_DCHECK(desc0_out != nullptr);
1143
+ TFLITE_DCHECK(desc1_out != nullptr);
1144
+
1145
+ auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
1146
+ auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
1147
+
1148
+ // Copy dims to desc, calculating strides.
1149
+ CopyDimsToDesc<N>(extended_input0_shape, desc0_out);
1150
+ CopyDimsToDesc<N>(extended_input1_shape, desc1_out);
1151
+
1152
+ // Walk over each dimension. If the extents are equal do nothing.
1153
+ // Otherwise, set the desc with extent 1 to have extent equal to the other and
1154
+ // stride 0.
1155
+ for (int i = 0; i < N; ++i) {
1156
+ const int extent0 = extended_input0_shape.Dims(i);
1157
+ const int extent1 = extended_input1_shape.Dims(i);
1158
+ if (extent0 != extent1) {
1159
+ if (extent0 == 1) {
1160
+ desc0_out->strides[i] = 0;
1161
+ desc0_out->extents[i] = extent1;
1162
+ } else {
1163
+ TFLITE_DCHECK_EQ(extent1, 1);
1164
+ desc1_out->strides[i] = 0;
1165
+ desc1_out->extents[i] = extent0;
1166
+ }
1167
+ }
1168
+ }
1169
+ }
1170
+
1171
+ template <int N>
1172
+ inline void NdArrayDescsForElementwiseBroadcast(
1173
+ const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
1174
+ const RuntimeShape& input2_shape, NdArrayDesc<N>* desc0_out,
1175
+ NdArrayDesc<N>* desc1_out, NdArrayDesc<N>* desc2_out) {
1176
+ TFLITE_DCHECK(desc0_out != nullptr);
1177
+ TFLITE_DCHECK(desc1_out != nullptr);
1178
+ TFLITE_DCHECK(desc2_out != nullptr);
1179
+
1180
+ auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
1181
+ auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
1182
+ auto extended_input2_shape = RuntimeShape::ExtendedShape(N, input2_shape);
1183
+
1184
+ // Copy dims to desc, calculating strides.
1185
+ CopyDimsToDesc<N>(extended_input0_shape, desc0_out);
1186
+ CopyDimsToDesc<N>(extended_input1_shape, desc1_out);
1187
+ CopyDimsToDesc<N>(extended_input2_shape, desc2_out);
1188
+
1189
+ // Walk over each dimension. If the extents are equal do nothing.
1190
+ // Otherwise, set the desc with extent 1 to have extent equal to the other and
1191
+ // stride 0.
1192
+ for (int i = 0; i < N; ++i) {
1193
+ const int extent0 = extended_input0_shape.Dims(i);
1194
+ const int extent1 = extended_input1_shape.Dims(i);
1195
+ const int extent2 = extended_input2_shape.Dims(i);
1196
+
1197
+ int extent = extent0;
1198
+ if (extent1 != 1) extent = extent1;
1199
+ if (extent2 != 1) extent = extent2;
1200
+
1201
+ TFLITE_DCHECK(extent0 == 1 || extent0 == extent);
1202
+ TFLITE_DCHECK(extent1 == 1 || extent1 == extent);
1203
+ TFLITE_DCHECK(extent2 == 1 || extent2 == extent);
1204
+
1205
+ if (!(extent0 == extent1 && extent1 == extent2)) {
1206
+ if (extent0 == 1) {
1207
+ desc0_out->strides[i] = 0;
1208
+ desc0_out->extents[i] = extent;
1209
+ }
1210
+ if (extent1 == 1) {
1211
+ desc1_out->strides[i] = 0;
1212
+ desc1_out->extents[i] = extent;
1213
+ }
1214
+ if (extent2 == 1) {
1215
+ desc2_out->strides[i] = 0;
1216
+ desc2_out->extents[i] = extent;
1217
+ }
1218
+ }
1219
+ }
1220
+ }
1221
+
1222
+ // Detailed implementation of NDOpsHelper, the indexes must be a zero array.
1223
+ // This implementation is equivalent to N nested loops. Ex, if N=4, it can be
1224
+ // re-writen as:
1225
+ // for (int b = 0; b < output.extents[0]; ++b) {
1226
+ // for (int y = 0; y < output.extents[1]; ++y) {
1227
+ // for (int x = 0; x < output.extents[2]; ++x) {
1228
+ // for (int c = 0; c < output.extents[3]; ++c) {
1229
+ // calc({b,y,x,c});
1230
+ // }
1231
+ // }
1232
+ // }
1233
+ // }
1234
+ template <int N, int DIM, typename Calc>
1235
+ typename std::enable_if<DIM != N - 1, void>::type NDOpsHelperImpl(
1236
+ const NdArrayDesc<N>& output, const Calc& calc, int indexes[N]) {
1237
+ for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) {
1238
+ NDOpsHelperImpl<N, DIM + 1, Calc>(output, calc, indexes);
1239
+ }
1240
+ }
1241
+
1242
+ template <int N, int DIM, typename Calc>
1243
+ typename std::enable_if<DIM == N - 1, void>::type NDOpsHelperImpl(
1244
+ const NdArrayDesc<N>& output, const Calc& calc, int indexes[N]) {
1245
+ for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) {
1246
+ calc(indexes);
1247
+ }
1248
+ }
1249
+
1250
+ // Execute the calc function in the innermost iteration based on the shape of
1251
+ // the output. The calc function should take a single argument of type int[N].
1252
+ template <int N, typename Calc>
1253
+ inline void NDOpsHelper(const NdArrayDesc<N>& output, const Calc& calc) {
1254
+ int indexes[N] = {0};
1255
+ NDOpsHelperImpl<N, 0, Calc>(output, calc, indexes);
1256
+ }
1257
+ // Copied from gemmlowp::RoundDown when we dropped direct dependency on
1258
+ // gemmlowp.
1259
+ //
1260
+ // Returns the runtime argument rounded down to the nearest multiple of
1261
+ // the fixed Modulus.
1262
+ template <unsigned Modulus, typename Integer>
1263
+ Integer RoundDown(Integer i) {
1264
+ return i - (i % Modulus);
1265
+ }
1266
+
1267
+ // Copied from gemmlowp::RoundUp when we dropped direct dependency on
1268
+ // gemmlowp.
1269
+ //
1270
+ // Returns the runtime argument rounded up to the nearest multiple of
1271
+ // the fixed Modulus.
1272
+ template <unsigned Modulus, typename Integer>
1273
+ Integer RoundUp(Integer i) {
1274
+ return RoundDown<Modulus>(i + Modulus - 1);
1275
+ }
1276
+
1277
+ // Copied from gemmlowp::CeilQuotient when we dropped direct dependency on
1278
+ // gemmlowp.
1279
+ //
1280
+ // Returns the quotient a / b rounded up ('ceil') to the nearest integer.
1281
+ template <typename Integer>
1282
+ Integer CeilQuotient(Integer a, Integer b) {
1283
+ return (a + b - 1) / b;
1284
+ }
1285
+
1286
+ // This function is a copy of gemmlowp::HowManyThreads, copied when we dropped
1287
+ // the direct dependency of internal/optimized/ on gemmlowp.
1288
+ //
1289
+ // It computes a reasonable number of threads to use for a GEMM of shape
1290
+ // (rows, cols, depth).
1291
+ //
1292
+ // TODO(b/131910176): get rid of this function by switching each call site
1293
+ // to its own more sensible logic for its own workload.
1294
+ template <int KernelRows>
1295
+ inline int LegacyHowManyThreads(int max_num_threads, int rows, int cols,
1296
+ int depth) {
1297
+ // Early-exit in the default case where multi-threading is disabled.
1298
+ if (max_num_threads == 1) {
1299
+ return 1;
1300
+ }
1301
+
1302
+ // Ensure that each thread has KernelRows rows to process, if at all possible.
1303
+ int thread_count = std::min(max_num_threads, rows / KernelRows);
1304
+
1305
+ // Limit the number of threads according to the overall size of the problem.
1306
+ if (thread_count > 1) {
1307
+ // Empirically determined value.
1308
+ static constexpr std::uint64_t min_cubic_size_per_thread = 64 * 1024;
1309
+
1310
+ // We can only multiply two out of three sizes without risking overflow
1311
+ const std::uint64_t cubic_size =
1312
+ std::uint64_t(rows) * std::uint64_t(cols) * std::uint64_t(depth);
1313
+
1314
+ thread_count = std::min(
1315
+ thread_count, static_cast<int>(cubic_size / min_cubic_size_per_thread));
1316
+ }
1317
+
1318
+ if (thread_count < 1) {
1319
+ thread_count = 1;
1320
+ }
1321
+
1322
+ assert(thread_count > 0 && thread_count <= max_num_threads);
1323
+ return thread_count;
1324
+ }
1325
+
1326
+ template <typename T>
1327
+ void optimized_ops_preload_l1_stream(const T* ptr) {
1328
+ #ifdef __GNUC__
1329
+ // builtin offered by GCC-compatible compilers including clang
1330
+ __builtin_prefetch(ptr, /* 0 means read */ 0, /* 0 means no locality */ 0);
1331
+ #else
1332
+ (void)ptr;
1333
+ #endif
1334
+ }
1335
+
1336
+ template <typename T>
1337
+ void optimized_ops_preload_l1_keep(const T* ptr) {
1338
+ #ifdef __GNUC__
1339
+ // builtin offered by GCC-compatible compilers including clang
1340
+ __builtin_prefetch(ptr, /* 0 means read */ 0, /* 3 means high locality */ 3);
1341
+ #else
1342
+ (void)ptr;
1343
+ #endif
1344
+ }
1345
+
1346
+ template <typename T>
1347
+ void optimized_ops_prefetch_write_l1_keep(const T* ptr) {
1348
+ #ifdef __GNUC__
1349
+ // builtin offered by GCC-compatible compilers including clang
1350
+ __builtin_prefetch(ptr, /* 1 means write */ 1, /* 3 means high locality */ 3);
1351
+ #else
1352
+ (void)ptr;
1353
+ #endif
1354
+ }
1355
+
1356
+ } // namespace tflite_micro
1357
+
1358
+ #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_