xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/__init__.py +7 -0
- xmos_ai_tools/io_server/__init__.py +151 -0
- xmos_ai_tools/runtime/__init__.py +0 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
- xmos_ai_tools/runtime/include/flash_server.h +74 -0
- xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
- xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
- xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
- xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
- xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
- xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
- xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
- xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
- xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
- xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
- xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
- xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
- xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
- xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
- xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
- xmos_ai_tools/runtime/include/ioserver.h +44 -0
- xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
- xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
- xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
- xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
- xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
- xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
- xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
- xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
- xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
- xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
- xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xformer/__init__.py +60 -0
- xmos_ai_tools/xformer/flash.py +190 -0
- xmos_ai_tools/xinterpreters/__init__.py +1 -0
- xmos_ai_tools/xinterpreters/exceptions.py +38 -0
- xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,194 @@
|
|
1
|
+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
|
17
|
+
|
18
|
+
#include <algorithm>
|
19
|
+
|
20
|
+
#include "fixedpoint/fixedpoint.h"
|
21
|
+
#include "ruy/profiler/instrumentation.h" // from @ruy
|
22
|
+
#include "tensorflow/lite/kernels/internal/common.h"
|
23
|
+
|
24
|
+
namespace tflite_micro {
|
25
|
+
namespace reference_integer_ops {
|
26
|
+
|
27
|
+
// Maximum dimension supported by the broadcast mul operation.
|
28
|
+
constexpr int kMaxMulBroadcastDim = 6;
|
29
|
+
|
30
|
+
template <typename InputType, typename OutputType>
|
31
|
+
void MulElementwise(int size, const ArithmeticParams& params,
|
32
|
+
const InputType* input1_data, const InputType* input2_data,
|
33
|
+
OutputType* output_data) {
|
34
|
+
for (int i = 0; i < size; ++i) {
|
35
|
+
const int32_t input1_val = params.input1_offset + input1_data[i];
|
36
|
+
const int32_t input2_val = params.input2_offset + input2_data[i];
|
37
|
+
const int32_t unclamped_result =
|
38
|
+
params.output_offset +
|
39
|
+
MultiplyByQuantizedMultiplier(input1_val * input2_val,
|
40
|
+
params.output_multiplier,
|
41
|
+
params.output_shift);
|
42
|
+
const int32_t clamped_output =
|
43
|
+
std::min(params.quantized_activation_max,
|
44
|
+
std::max(params.quantized_activation_min, unclamped_result));
|
45
|
+
output_data[i] = static_cast<OutputType>(clamped_output);
|
46
|
+
}
|
47
|
+
}
|
48
|
+
|
49
|
+
template <typename T>
|
50
|
+
inline void Mul(const ArithmeticParams& params,
|
51
|
+
const RuntimeShape& input1_shape, const T* input1_data,
|
52
|
+
const RuntimeShape& input2_shape, const T* input2_data,
|
53
|
+
const RuntimeShape& output_shape, T* output_data) {
|
54
|
+
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
55
|
+
params.quantized_activation_max);
|
56
|
+
ruy::profiler::ScopeLabel label("Mul/8bit");
|
57
|
+
const int flat_size =
|
58
|
+
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
59
|
+
|
60
|
+
MulElementwise(flat_size, params, input1_data, input2_data, output_data);
|
61
|
+
}
|
62
|
+
|
63
|
+
// Mul with 16 bit inputs and int8_t outputs.
|
64
|
+
inline void Mul(const ArithmeticParams& params,
|
65
|
+
const RuntimeShape& input1_shape, const int16_t* input1_data,
|
66
|
+
const RuntimeShape& input2_shape, const int16_t* input2_data,
|
67
|
+
const RuntimeShape& output_shape, int8_t* output_data) {
|
68
|
+
ruy::profiler::ScopeLabel label("Mul/Int16Int8");
|
69
|
+
int32_t output_offset = params.output_offset;
|
70
|
+
int32_t output_activation_min = params.quantized_activation_min;
|
71
|
+
int32_t output_activation_max = params.quantized_activation_max;
|
72
|
+
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
73
|
+
|
74
|
+
const int flat_size =
|
75
|
+
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
76
|
+
|
77
|
+
for (int i = 0; i < flat_size; i++) {
|
78
|
+
// F0 uses 0 integer bits, range [-1, 1].
|
79
|
+
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
|
80
|
+
|
81
|
+
F0 unclamped_result =
|
82
|
+
F0::FromRaw(input1_data[i]) * F0::FromRaw(input2_data[i]);
|
83
|
+
int16_t rescaled_result =
|
84
|
+
gemmlowp::RoundingDivideByPOT(unclamped_result.raw(), 8);
|
85
|
+
int16_t clamped_result = std::min<int16_t>(
|
86
|
+
output_activation_max - output_offset, rescaled_result);
|
87
|
+
clamped_result = std::max<int16_t>(output_activation_min - output_offset,
|
88
|
+
clamped_result);
|
89
|
+
output_data[i] = output_offset + clamped_result;
|
90
|
+
}
|
91
|
+
}
|
92
|
+
|
93
|
+
template <typename T>
|
94
|
+
inline void BroadcastMul6DSlow(
|
95
|
+
const ArithmeticParams& params, const RuntimeShape& input1_shape,
|
96
|
+
const T* input1_data, const RuntimeShape& input2_shape,
|
97
|
+
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
|
98
|
+
ruy::profiler::ScopeLabel label("BroadcastMul6DSlow");
|
99
|
+
|
100
|
+
NdArrayDesc<kMaxMulBroadcastDim> desc1;
|
101
|
+
NdArrayDesc<kMaxMulBroadcastDim> desc2;
|
102
|
+
// The input shapes are extended as part of NdArrayDesc initialization.
|
103
|
+
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
104
|
+
&desc2);
|
105
|
+
const RuntimeShape extended_output_shape =
|
106
|
+
RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, output_shape);
|
107
|
+
// Cache output shape dimensions.
|
108
|
+
int32_t extended_output_shape_dims[kMaxMulBroadcastDim];
|
109
|
+
std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(),
|
110
|
+
sizeof(extended_output_shape_dims));
|
111
|
+
|
112
|
+
size_t input1_offset_a = 0;
|
113
|
+
size_t input2_offset_a = 0;
|
114
|
+
size_t output_offset_a = 0;
|
115
|
+
for (int a = 0; a < extended_output_shape_dims[0]; ++a) {
|
116
|
+
size_t input1_offset_d = input1_offset_a;
|
117
|
+
size_t input2_offset_d = input2_offset_a;
|
118
|
+
size_t output_offset_d = output_offset_a;
|
119
|
+
for (int d = 0; d < extended_output_shape_dims[1]; ++d) {
|
120
|
+
size_t input1_offset_b = input1_offset_d;
|
121
|
+
size_t input2_offset_b = input2_offset_d;
|
122
|
+
size_t output_offset_b = output_offset_d;
|
123
|
+
for (int b = 0; b < extended_output_shape_dims[2]; ++b) {
|
124
|
+
size_t input1_offset_y = input1_offset_b;
|
125
|
+
size_t input2_offset_y = input2_offset_b;
|
126
|
+
size_t output_offset_y = output_offset_b;
|
127
|
+
for (int y = 0; y < extended_output_shape_dims[3]; ++y) {
|
128
|
+
size_t input1_offset_x = input1_offset_y;
|
129
|
+
size_t input2_offset_x = input2_offset_y;
|
130
|
+
size_t output_offset_x = output_offset_y;
|
131
|
+
for (int x = 0; x < extended_output_shape_dims[4]; ++x) {
|
132
|
+
size_t input1_offset_c = input1_offset_x;
|
133
|
+
size_t input2_offset_c = input2_offset_x;
|
134
|
+
size_t output_offset_c = output_offset_x;
|
135
|
+
for (int c = 0; c < extended_output_shape_dims[5]; ++c) {
|
136
|
+
const int32_t input1_val =
|
137
|
+
params.input1_offset + input1_data[input1_offset_c];
|
138
|
+
const int32_t input2_val =
|
139
|
+
params.input2_offset + input2_data[input2_offset_c];
|
140
|
+
const int32_t unclamped_result =
|
141
|
+
params.output_offset +
|
142
|
+
MultiplyByQuantizedMultiplier(input1_val * input2_val,
|
143
|
+
params.output_multiplier,
|
144
|
+
params.output_shift);
|
145
|
+
const int32_t clamped_output = std::min(
|
146
|
+
params.quantized_activation_max,
|
147
|
+
std::max(params.quantized_activation_min, unclamped_result));
|
148
|
+
output_data[output_offset_c] = static_cast<T>(clamped_output);
|
149
|
+
input1_offset_c += desc1.strides[5];
|
150
|
+
input2_offset_c += desc2.strides[5];
|
151
|
+
++output_offset_c;
|
152
|
+
}
|
153
|
+
input1_offset_x += desc1.strides[4];
|
154
|
+
input2_offset_x += desc2.strides[4];
|
155
|
+
output_offset_x += extended_output_shape_dims[5];
|
156
|
+
}
|
157
|
+
input1_offset_y += desc1.strides[3];
|
158
|
+
input2_offset_y += desc2.strides[3];
|
159
|
+
output_offset_y +=
|
160
|
+
extended_output_shape_dims[4] * extended_output_shape_dims[5];
|
161
|
+
}
|
162
|
+
input1_offset_b += desc1.strides[2];
|
163
|
+
input2_offset_b += desc2.strides[2];
|
164
|
+
output_offset_b += extended_output_shape_dims[3] *
|
165
|
+
extended_output_shape_dims[4] *
|
166
|
+
extended_output_shape_dims[5];
|
167
|
+
}
|
168
|
+
input1_offset_d += desc1.strides[1];
|
169
|
+
input2_offset_d += desc2.strides[1];
|
170
|
+
output_offset_d +=
|
171
|
+
extended_output_shape_dims[2] * extended_output_shape_dims[3] *
|
172
|
+
extended_output_shape_dims[4] * extended_output_shape_dims[5];
|
173
|
+
}
|
174
|
+
input1_offset_a += desc1.strides[0];
|
175
|
+
input2_offset_a += desc2.strides[0];
|
176
|
+
output_offset_a +=
|
177
|
+
extended_output_shape_dims[1] * extended_output_shape_dims[2] *
|
178
|
+
extended_output_shape_dims[3] * extended_output_shape_dims[4] *
|
179
|
+
extended_output_shape_dims[5];
|
180
|
+
}
|
181
|
+
}
|
182
|
+
|
183
|
+
template <typename T>
|
184
|
+
inline void BroadcastMul4DSlow(
|
185
|
+
const ArithmeticParams& params, const RuntimeShape& input1_shape,
|
186
|
+
const T* input1_data, const RuntimeShape& input2_shape,
|
187
|
+
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
|
188
|
+
BroadcastMul6DSlow(params, input1_shape, input1_data, input2_shape,
|
189
|
+
input2_data, output_shape, output_data);
|
190
|
+
}
|
191
|
+
|
192
|
+
} // namespace reference_integer_ops
|
193
|
+
} // namespace tflite_micro
|
194
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h
ADDED
@@ -0,0 +1,264 @@
|
|
1
|
+
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
|
17
|
+
|
18
|
+
#include <algorithm>
|
19
|
+
#include <limits>
|
20
|
+
|
21
|
+
#include "tensorflow/lite/kernels/internal/common.h"
|
22
|
+
|
23
|
+
namespace tflite_micro {
|
24
|
+
namespace reference_integer_ops {
|
25
|
+
|
26
|
+
inline bool AveragePool(const PoolParams& params,
|
27
|
+
const RuntimeShape& input_shape,
|
28
|
+
const int8_t* input_data,
|
29
|
+
const RuntimeShape& output_shape, int8_t* output_data) {
|
30
|
+
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
31
|
+
params.quantized_activation_max);
|
32
|
+
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
33
|
+
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
|
34
|
+
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
|
35
|
+
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
|
36
|
+
const int input_height = input_shape.Dims(1);
|
37
|
+
const int input_width = input_shape.Dims(2);
|
38
|
+
const int output_height = output_shape.Dims(1);
|
39
|
+
const int output_width = output_shape.Dims(2);
|
40
|
+
const int stride_height = params.stride_height;
|
41
|
+
const int stride_width = params.stride_width;
|
42
|
+
for (int batch = 0; batch < batches; ++batch) {
|
43
|
+
for (int out_y = 0; out_y < output_height; ++out_y) {
|
44
|
+
for (int out_x = 0; out_x < output_width; ++out_x) {
|
45
|
+
for (int channel = 0; channel < depth; ++channel) {
|
46
|
+
const int in_x_origin =
|
47
|
+
(out_x * stride_width) - params.padding_values.width;
|
48
|
+
const int in_y_origin =
|
49
|
+
(out_y * stride_height) - params.padding_values.height;
|
50
|
+
// Compute the boundaries of the filter region clamped so as to
|
51
|
+
// ensure that the filter window fits in the input array.
|
52
|
+
const int filter_x_start = std::max(0, -in_x_origin);
|
53
|
+
const int filter_x_end =
|
54
|
+
std::min(params.filter_width, input_width - in_x_origin);
|
55
|
+
const int filter_y_start = std::max(0, -in_y_origin);
|
56
|
+
const int filter_y_end =
|
57
|
+
std::min(params.filter_height, input_height - in_y_origin);
|
58
|
+
int32_t acc = 0;
|
59
|
+
int filter_count = 0;
|
60
|
+
for (int filter_y = filter_y_start; filter_y < filter_y_end;
|
61
|
+
++filter_y) {
|
62
|
+
for (int filter_x = filter_x_start; filter_x < filter_x_end;
|
63
|
+
++filter_x) {
|
64
|
+
const int in_x = in_x_origin + filter_x;
|
65
|
+
const int in_y = in_y_origin + filter_y;
|
66
|
+
acc +=
|
67
|
+
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
|
68
|
+
filter_count++;
|
69
|
+
}
|
70
|
+
}
|
71
|
+
if (filter_count == 0) return false;
|
72
|
+
// Round to the closest integer value.
|
73
|
+
acc = acc > 0 ? (acc + filter_count / 2) / filter_count
|
74
|
+
: (acc - filter_count / 2) / filter_count;
|
75
|
+
acc = std::max(acc, params.quantized_activation_min);
|
76
|
+
acc = std::min(acc, params.quantized_activation_max);
|
77
|
+
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
78
|
+
static_cast<int8_t>(acc);
|
79
|
+
}
|
80
|
+
}
|
81
|
+
}
|
82
|
+
}
|
83
|
+
return true;
|
84
|
+
}
|
85
|
+
|
86
|
+
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
87
|
+
const int8_t* input_data, const RuntimeShape& output_shape,
|
88
|
+
int8_t* output_data) {
|
89
|
+
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
90
|
+
params.quantized_activation_max);
|
91
|
+
TFLITE_DCHECK_GE(params.quantized_activation_min,
|
92
|
+
std::numeric_limits<int8_t>::min());
|
93
|
+
TFLITE_DCHECK_LE(params.quantized_activation_max,
|
94
|
+
std::numeric_limits<int8_t>::max());
|
95
|
+
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
96
|
+
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
|
97
|
+
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
|
98
|
+
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
|
99
|
+
const int input_height = input_shape.Dims(1);
|
100
|
+
const int input_width = input_shape.Dims(2);
|
101
|
+
const int output_height = output_shape.Dims(1);
|
102
|
+
const int output_width = output_shape.Dims(2);
|
103
|
+
const int stride_height = params.stride_height;
|
104
|
+
const int stride_width = params.stride_width;
|
105
|
+
for (int batch = 0; batch < batches; ++batch) {
|
106
|
+
for (int out_y = 0; out_y < output_height; ++out_y) {
|
107
|
+
for (int out_x = 0; out_x < output_width; ++out_x) {
|
108
|
+
for (int channel = 0; channel < depth; ++channel) {
|
109
|
+
const int in_x_origin =
|
110
|
+
(out_x * stride_width) - params.padding_values.width;
|
111
|
+
const int in_y_origin =
|
112
|
+
(out_y * stride_height) - params.padding_values.height;
|
113
|
+
// Compute the boundaries of the filter region clamped so as to
|
114
|
+
// ensure that the filter window fits in the input array.
|
115
|
+
const int filter_x_start = std::max(0, -in_x_origin);
|
116
|
+
const int filter_x_end =
|
117
|
+
std::min(params.filter_width, input_width - in_x_origin);
|
118
|
+
const int filter_y_start = std::max(0, -in_y_origin);
|
119
|
+
const int filter_y_end =
|
120
|
+
std::min(params.filter_height, input_height - in_y_origin);
|
121
|
+
int8_t max = std::numeric_limits<int8_t>::lowest();
|
122
|
+
for (int filter_y = filter_y_start; filter_y < filter_y_end;
|
123
|
+
++filter_y) {
|
124
|
+
for (int filter_x = filter_x_start; filter_x < filter_x_end;
|
125
|
+
++filter_x) {
|
126
|
+
const int in_x = in_x_origin + filter_x;
|
127
|
+
const int in_y = in_y_origin + filter_y;
|
128
|
+
max = std::max(
|
129
|
+
max,
|
130
|
+
input_data[Offset(input_shape, batch, in_y, in_x, channel)]);
|
131
|
+
}
|
132
|
+
}
|
133
|
+
max = std::max<int8_t>(max, params.quantized_activation_min);
|
134
|
+
max = std::min<int8_t>(max, params.quantized_activation_max);
|
135
|
+
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
136
|
+
static_cast<int8_t>(max);
|
137
|
+
}
|
138
|
+
}
|
139
|
+
}
|
140
|
+
}
|
141
|
+
}
|
142
|
+
|
143
|
+
inline bool AveragePool(const PoolParams& params,
|
144
|
+
const RuntimeShape& input_shape,
|
145
|
+
const int16_t* input_data,
|
146
|
+
const RuntimeShape& output_shape,
|
147
|
+
int16_t* output_data) {
|
148
|
+
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
149
|
+
params.quantized_activation_max);
|
150
|
+
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
151
|
+
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
|
152
|
+
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
|
153
|
+
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
|
154
|
+
const int input_height = input_shape.Dims(1);
|
155
|
+
const int input_width = input_shape.Dims(2);
|
156
|
+
const int output_height = output_shape.Dims(1);
|
157
|
+
const int output_width = output_shape.Dims(2);
|
158
|
+
const int stride_height = params.stride_height;
|
159
|
+
const int stride_width = params.stride_width;
|
160
|
+
for (int batch = 0; batch < batches; ++batch) {
|
161
|
+
for (int out_y = 0; out_y < output_height; ++out_y) {
|
162
|
+
for (int out_x = 0; out_x < output_width; ++out_x) {
|
163
|
+
for (int channel = 0; channel < depth; ++channel) {
|
164
|
+
const int in_x_origin =
|
165
|
+
(out_x * stride_width) - params.padding_values.width;
|
166
|
+
const int in_y_origin =
|
167
|
+
(out_y * stride_height) - params.padding_values.height;
|
168
|
+
// Compute the boundaries of the filter region clamped so as to
|
169
|
+
// ensure that the filter window fits in the input array.
|
170
|
+
const int filter_x_start = std::max(0, -in_x_origin);
|
171
|
+
const int filter_x_end =
|
172
|
+
std::min(params.filter_width, input_width - in_x_origin);
|
173
|
+
const int filter_y_start = std::max(0, -in_y_origin);
|
174
|
+
const int filter_y_end =
|
175
|
+
std::min(params.filter_height, input_height - in_y_origin);
|
176
|
+
int32_t acc = 0;
|
177
|
+
int filter_count = 0;
|
178
|
+
for (int filter_y = filter_y_start; filter_y < filter_y_end;
|
179
|
+
++filter_y) {
|
180
|
+
for (int filter_x = filter_x_start; filter_x < filter_x_end;
|
181
|
+
++filter_x) {
|
182
|
+
const int in_x = in_x_origin + filter_x;
|
183
|
+
const int in_y = in_y_origin + filter_y;
|
184
|
+
acc +=
|
185
|
+
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
|
186
|
+
filter_count++;
|
187
|
+
}
|
188
|
+
}
|
189
|
+
if (filter_count == 0) return false;
|
190
|
+
// Round to the closest integer value.
|
191
|
+
acc = acc > 0 ? (acc + filter_count / 2) / filter_count
|
192
|
+
: (acc - filter_count / 2) / filter_count;
|
193
|
+
acc = std::max(acc, params.quantized_activation_min);
|
194
|
+
acc = std::min(acc, params.quantized_activation_max);
|
195
|
+
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
196
|
+
static_cast<int16_t>(acc);
|
197
|
+
}
|
198
|
+
}
|
199
|
+
}
|
200
|
+
}
|
201
|
+
return true;
|
202
|
+
}
|
203
|
+
|
204
|
+
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
205
|
+
const int16_t* input_data, const RuntimeShape& output_shape,
|
206
|
+
int16_t* output_data) {
|
207
|
+
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
208
|
+
params.quantized_activation_max);
|
209
|
+
TFLITE_DCHECK_GE(params.quantized_activation_min,
|
210
|
+
std::numeric_limits<int16_t>::min());
|
211
|
+
TFLITE_DCHECK_LE(params.quantized_activation_max,
|
212
|
+
std::numeric_limits<int16_t>::max());
|
213
|
+
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
214
|
+
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
|
215
|
+
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
|
216
|
+
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
|
217
|
+
const int input_height = input_shape.Dims(1);
|
218
|
+
const int input_width = input_shape.Dims(2);
|
219
|
+
const int output_height = output_shape.Dims(1);
|
220
|
+
const int output_width = output_shape.Dims(2);
|
221
|
+
const int stride_height = params.stride_height;
|
222
|
+
const int stride_width = params.stride_width;
|
223
|
+
for (int batch = 0; batch < batches; ++batch) {
|
224
|
+
for (int out_y = 0; out_y < output_height; ++out_y) {
|
225
|
+
for (int out_x = 0; out_x < output_width; ++out_x) {
|
226
|
+
for (int channel = 0; channel < depth; ++channel) {
|
227
|
+
const int in_x_origin =
|
228
|
+
(out_x * stride_width) - params.padding_values.width;
|
229
|
+
const int in_y_origin =
|
230
|
+
(out_y * stride_height) - params.padding_values.height;
|
231
|
+
// Compute the boundaries of the filter region clamped so as to
|
232
|
+
// ensure that the filter window fits in the input array.
|
233
|
+
const int filter_x_start = std::max(0, -in_x_origin);
|
234
|
+
const int filter_x_end =
|
235
|
+
std::min(params.filter_width, input_width - in_x_origin);
|
236
|
+
const int filter_y_start = std::max(0, -in_y_origin);
|
237
|
+
const int filter_y_end =
|
238
|
+
std::min(params.filter_height, input_height - in_y_origin);
|
239
|
+
int16_t max = std::numeric_limits<int16_t>::lowest();
|
240
|
+
for (int filter_y = filter_y_start; filter_y < filter_y_end;
|
241
|
+
++filter_y) {
|
242
|
+
for (int filter_x = filter_x_start; filter_x < filter_x_end;
|
243
|
+
++filter_x) {
|
244
|
+
const int in_x = in_x_origin + filter_x;
|
245
|
+
const int in_y = in_y_origin + filter_y;
|
246
|
+
max = std::max(
|
247
|
+
max,
|
248
|
+
input_data[Offset(input_shape, batch, in_y, in_x, channel)]);
|
249
|
+
}
|
250
|
+
}
|
251
|
+
max = std::max<int16_t>(max, params.quantized_activation_min);
|
252
|
+
max = std::min<int16_t>(max, params.quantized_activation_max);
|
253
|
+
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
254
|
+
static_cast<int16_t>(max);
|
255
|
+
}
|
256
|
+
}
|
257
|
+
}
|
258
|
+
}
|
259
|
+
}
|
260
|
+
|
261
|
+
} // namespace reference_integer_ops
|
262
|
+
} // namespace tflite_micro
|
263
|
+
|
264
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
|
@@ -0,0 +1,117 @@
|
|
1
|
+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
|
17
|
+
|
18
|
+
#include <algorithm>
|
19
|
+
#include <limits>
|
20
|
+
|
21
|
+
#include "fixedpoint/fixedpoint.h"
|
22
|
+
#include "tensorflow/lite/kernels/internal/common.h"
|
23
|
+
|
24
|
+
namespace tflite_micro {
|
25
|
+
namespace reference_integer_ops {
|
26
|
+
|
27
|
+
inline void Tanh(int32_t input_zero_point, int32_t input_range_radius,
|
28
|
+
int32_t input_multiplier, int32_t input_shift,
|
29
|
+
const RuntimeShape& input_shape, const int8_t* input_data,
|
30
|
+
const RuntimeShape& output_shape, int8_t* output_data) {
|
31
|
+
// Integer bits must be in sync with Prepare() function.
|
32
|
+
static constexpr int32_t kInputIntegerBits = 4;
|
33
|
+
static constexpr int32_t kOutputScale = 7;
|
34
|
+
static constexpr int32_t kMinInt8 = std::numeric_limits<int8_t>::min();
|
35
|
+
static constexpr int32_t kMaxInt8 = std::numeric_limits<int8_t>::max();
|
36
|
+
using F4 = gemmlowp::FixedPoint<int32_t, kInputIntegerBits>;
|
37
|
+
|
38
|
+
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
39
|
+
|
40
|
+
for (int i = 0; i < flat_size; ++i) {
|
41
|
+
const int32_t input =
|
42
|
+
static_cast<int32_t>(input_data[i]) - input_zero_point;
|
43
|
+
if (input <= -input_range_radius) {
|
44
|
+
output_data[i] = kMinInt8;
|
45
|
+
} else if (input >= input_range_radius) {
|
46
|
+
output_data[i] = kMaxInt8;
|
47
|
+
} else {
|
48
|
+
const int32_t input_in_q4 =
|
49
|
+
MultiplyByQuantizedMultiplier(input, input_multiplier, input_shift);
|
50
|
+
const int32_t output_in_q0 =
|
51
|
+
gemmlowp::tanh(F4::FromRaw(input_in_q4)).raw();
|
52
|
+
|
53
|
+
// Rescale and downcast.
|
54
|
+
using gemmlowp::RoundingDivideByPOT;
|
55
|
+
int32_t output_in_q24 =
|
56
|
+
RoundingDivideByPOT(output_in_q0, 31 - kOutputScale);
|
57
|
+
output_in_q24 = std::min(std::max(output_in_q24, kMinInt8), kMaxInt8);
|
58
|
+
output_data[i] = static_cast<int8_t>(output_in_q24);
|
59
|
+
}
|
60
|
+
}
|
61
|
+
}
|
62
|
+
|
63
|
+
inline void Tanh(int32_t input_multiplier, int32_t input_left_shift,
|
64
|
+
const RuntimeShape& input_shape, const int16_t* ptr_input_data,
|
65
|
+
const RuntimeShape& output_shape, int16_t* ptr_output_data) {
|
66
|
+
// We use the LUT for sigmoid and take into account, that
|
67
|
+
// tanh(x) = 2*sigmoid(2*x) - 1
|
68
|
+
|
69
|
+
// We scale by 3/4 to expand range [-8,8]->[-10.7,10.7].
|
70
|
+
// In case of general parameter scale, multiplier 3 is taken into account
|
71
|
+
// in TanhPrepare function and it is included in
|
72
|
+
// input_multiplier already.
|
73
|
+
|
74
|
+
if (input_multiplier == 0) { // power of two case
|
75
|
+
input_multiplier = 3 << input_left_shift;
|
76
|
+
input_left_shift = 0;
|
77
|
+
}
|
78
|
+
|
79
|
+
int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0;
|
80
|
+
|
81
|
+
int flat_size = MatchingFlatSize(input_shape, output_shape);
|
82
|
+
|
83
|
+
for (int i = 0; i < flat_size; ++i, ptr_input_data++, ptr_output_data++) {
|
84
|
+
int32_t input_data =
|
85
|
+
((*ptr_input_data) * input_multiplier + round) >> input_left_shift;
|
86
|
+
|
87
|
+
uint32_t abs_input_data = abs(input_data);
|
88
|
+
uint32_t uh = abs_input_data >> 8;
|
89
|
+
int32_t result;
|
90
|
+
|
91
|
+
if (uh >= 255) {
|
92
|
+
// Saturate to maximum.
|
93
|
+
result = 0xFFFF << 8;
|
94
|
+
} else {
|
95
|
+
uint32_t ua = sigmoid_table_uint16[uh];
|
96
|
+
uint32_t ub = sigmoid_table_uint16[uh + 1];
|
97
|
+
|
98
|
+
uint8_t ut = abs_input_data & 0xFF;
|
99
|
+
|
100
|
+
result = (ua << 8) + ut * (ub - ua);
|
101
|
+
}
|
102
|
+
|
103
|
+
result = (input_data >= 0)
|
104
|
+
? (result - (1 << (14 + 9)) + (1 << (9 - 2)))
|
105
|
+
: (-result + (1 << (14 + 9)) + (1 << (9 - 2)) - 1);
|
106
|
+
|
107
|
+
// Convert back to 16-bit.
|
108
|
+
result >>= (9 - 1);
|
109
|
+
|
110
|
+
*ptr_output_data = result;
|
111
|
+
}
|
112
|
+
}
|
113
|
+
|
114
|
+
} // namespace reference_integer_ops
|
115
|
+
} // namespace tflite_micro
|
116
|
+
|
117
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
|