xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/__init__.py +7 -0
- xmos_ai_tools/io_server/__init__.py +151 -0
- xmos_ai_tools/runtime/__init__.py +0 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
- xmos_ai_tools/runtime/include/flash_server.h +74 -0
- xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
- xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
- xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
- xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
- xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
- xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
- xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
- xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
- xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
- xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
- xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
- xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
- xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
- xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
- xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
- xmos_ai_tools/runtime/include/ioserver.h +44 -0
- xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
- xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
- xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
- xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
- xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
- xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
- xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
- xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
- xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
- xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
- xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xformer/__init__.py +60 -0
- xmos_ai_tools/xformer/flash.py +190 -0
- xmos_ai_tools/xinterpreters/__init__.py +1 -0
- xmos_ai_tools/xinterpreters/exceptions.py +38 -0
- xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,491 @@
|
|
1
|
+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
|
17
|
+
|
18
|
+
#include <algorithm>
|
19
|
+
|
20
|
+
#include "ruy/profiler/instrumentation.h" // from @ruy
|
21
|
+
#include "tensorflow/lite/kernels/internal/common.h"
|
22
|
+
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
23
|
+
#include "tensorflow/lite/kernels/internal/max.h"
|
24
|
+
#include "tensorflow/lite/kernels/internal/min.h"
|
25
|
+
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
26
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
27
|
+
|
28
|
+
// Check if the reduction at index is the first one along the dimensions given
|
29
|
+
// in axis.
|
30
|
+
inline bool IsFirstReduction(const int* index, const int num_axis,
|
31
|
+
const int* axis) {
|
32
|
+
if (num_axis == 0) {
|
33
|
+
return true;
|
34
|
+
}
|
35
|
+
|
36
|
+
TFLITE_DCHECK(index != nullptr);
|
37
|
+
TFLITE_DCHECK(axis != nullptr);
|
38
|
+
for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) {
|
39
|
+
if (index[axis[axis_idx]] != 0) {
|
40
|
+
return false;
|
41
|
+
}
|
42
|
+
}
|
43
|
+
|
44
|
+
return true;
|
45
|
+
}
|
46
|
+
|
47
|
+
namespace tflite_micro {
|
48
|
+
|
49
|
+
namespace reference_ops {
|
50
|
+
|
51
|
+
// A generic reduce method that can be used for reduce_sum, reduce_mean, etc.
|
52
|
+
// This method iterates through input data and reduce elements along the
|
53
|
+
// dimensions given in axis.
|
54
|
+
template <typename In, typename Out>
|
55
|
+
inline bool Reduce(const In* input_data, const int* input_dims,
|
56
|
+
const int* output_dims, const int input_num_dims,
|
57
|
+
const int output_num_dims, const int* axis,
|
58
|
+
const int num_axis, int* input_iter,
|
59
|
+
Out reducer(Out current, const In in), Out* output_data) {
|
60
|
+
// Reset input iterator.
|
61
|
+
for (int idx = 0; idx < input_num_dims; ++idx) {
|
62
|
+
input_iter[idx] = 0;
|
63
|
+
}
|
64
|
+
// Iterate through input_data.
|
65
|
+
do {
|
66
|
+
size_t input_offset =
|
67
|
+
ReducedOutputOffset(input_num_dims, input_dims, input_iter, 0, nullptr);
|
68
|
+
size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims,
|
69
|
+
input_iter, num_axis, axis);
|
70
|
+
output_data[output_offset] =
|
71
|
+
reducer(output_data[output_offset], input_data[input_offset]);
|
72
|
+
} while (NextIndex(input_num_dims, input_dims, input_iter));
|
73
|
+
return true;
|
74
|
+
}
|
75
|
+
|
76
|
+
// Similar to above Reduce function but takes two reducer functions.
|
77
|
+
// The 'reducer_first' is called with the first value of the reduction,
|
78
|
+
// 'reducer_next' is then called for all the others.
|
79
|
+
template <typename In, typename Out>
|
80
|
+
inline bool Reduce(const In* input_data, const int* input_dims,
|
81
|
+
const int* output_dims, const int input_num_dims,
|
82
|
+
const int output_num_dims, const int* axis,
|
83
|
+
const int num_axis, int* input_iter,
|
84
|
+
const std::function<Out(In in)>& reducer_first,
|
85
|
+
const std::function<Out(Out current, In in)>& reducer_next,
|
86
|
+
Out* output_data) {
|
87
|
+
// Reset input iterator.
|
88
|
+
for (int idx = 0; idx < input_num_dims; ++idx) {
|
89
|
+
input_iter[idx] = 0;
|
90
|
+
}
|
91
|
+
// Iterate through input_data.
|
92
|
+
do {
|
93
|
+
size_t input_offset =
|
94
|
+
ReducedOutputOffset(input_num_dims, input_dims, input_iter, 0, nullptr);
|
95
|
+
size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims,
|
96
|
+
input_iter, num_axis, axis);
|
97
|
+
if (IsFirstReduction(input_iter, num_axis, axis)) {
|
98
|
+
output_data[output_offset] = reducer_first(input_data[input_offset]);
|
99
|
+
} else {
|
100
|
+
output_data[output_offset] =
|
101
|
+
reducer_next(output_data[output_offset], input_data[input_offset]);
|
102
|
+
}
|
103
|
+
} while (NextIndex(input_num_dims, input_dims, input_iter));
|
104
|
+
return true;
|
105
|
+
}
|
106
|
+
|
107
|
+
// This method parses the input 'axis' to remove duplicates and handle negative
|
108
|
+
// values, and returns a valid 'out_axis'
|
109
|
+
inline bool ResolveAxis(const int num_dims, const int* axis,
|
110
|
+
const int64_t num_axis, int* out_axis,
|
111
|
+
int* out_num_axis) {
|
112
|
+
*out_num_axis = 0; // Just in case.
|
113
|
+
// Short-circuit axis resolution for scalars; the axis will go unused.
|
114
|
+
if (num_dims == 0) {
|
115
|
+
return true;
|
116
|
+
}
|
117
|
+
// o(n^2) is fine since out_num_axis should be really small, mostly <= 4
|
118
|
+
for (int64_t idx = 0; idx < num_axis; ++idx) {
|
119
|
+
// Handle negative index. A positive index 'p_idx' can be represented as a
|
120
|
+
// negative index 'n_idx' as: n_idx = p_idx-num_dims
|
121
|
+
// eg: For num_dims=3, [0, 1, 2] is the same as [-3, -2, -1] */
|
122
|
+
int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
|
123
|
+
TFLITE_DCHECK(current >= 0 && current < num_dims);
|
124
|
+
if (current < 0 || current >= num_dims) {
|
125
|
+
return false;
|
126
|
+
}
|
127
|
+
bool is_dup = false;
|
128
|
+
for (int j = 0; j < *out_num_axis; ++j) {
|
129
|
+
if (out_axis[j] == current) {
|
130
|
+
is_dup = true;
|
131
|
+
break;
|
132
|
+
}
|
133
|
+
}
|
134
|
+
if (!is_dup) {
|
135
|
+
out_axis[*out_num_axis] = current;
|
136
|
+
*out_num_axis += 1;
|
137
|
+
}
|
138
|
+
}
|
139
|
+
return true;
|
140
|
+
}
|
141
|
+
|
142
|
+
// This method expects that output_data has been initialized.
|
143
|
+
template <typename In, typename Out>
|
144
|
+
inline bool ReduceSumImpl(const In* input_data, const int* input_dims,
|
145
|
+
const int* output_dims, const int input_num_dims,
|
146
|
+
const int output_num_dims, const int* axis,
|
147
|
+
const int num_axis, int* input_iter,
|
148
|
+
Out* output_data) {
|
149
|
+
auto reducer = [](const Out current, const In in) -> Out {
|
150
|
+
const Out actual_in = static_cast<Out>(in);
|
151
|
+
return current + actual_in;
|
152
|
+
};
|
153
|
+
return Reduce<In, Out>(input_data, input_dims, output_dims, input_num_dims,
|
154
|
+
output_num_dims, axis, num_axis, input_iter, reducer,
|
155
|
+
output_data);
|
156
|
+
}
|
157
|
+
|
158
|
+
template <typename T>
|
159
|
+
inline bool InitTensorDataForReduce(const int* dims, const int num_dims,
|
160
|
+
const T init_value, T* data) {
|
161
|
+
size_t num_elements = 1;
|
162
|
+
for (int idx = 0; idx < num_dims; ++idx) {
|
163
|
+
size_t current = static_cast<size_t>(dims[idx]);
|
164
|
+
// Overflow prevention.
|
165
|
+
if (current > 0 &&
|
166
|
+
num_elements > std::numeric_limits<size_t>::max() / current) {
|
167
|
+
return false;
|
168
|
+
}
|
169
|
+
num_elements *= current;
|
170
|
+
}
|
171
|
+
for (size_t idx = 0; idx < num_elements; ++idx) {
|
172
|
+
data[idx] = init_value;
|
173
|
+
}
|
174
|
+
return true;
|
175
|
+
}
|
176
|
+
|
177
|
+
// Computes the generic value (i.e., sum/max/min/prod) of elements across
|
178
|
+
// dimensions given in axis. It needs to pass in init_value and reducer.
|
179
|
+
template <typename T>
|
180
|
+
inline bool ReduceGeneric(const T* input_data, const int* input_dims,
|
181
|
+
const int input_num_dims, T* output_data,
|
182
|
+
const int* output_dims, const int output_num_dims,
|
183
|
+
const int* axis, const int64_t num_axis_dimensions,
|
184
|
+
bool keep_dims, int* temp_index, int* resolved_axis,
|
185
|
+
T init_value,
|
186
|
+
T reducer(const T current, const T in)) {
|
187
|
+
// Reset output data.
|
188
|
+
if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
|
189
|
+
output_data)) {
|
190
|
+
return false;
|
191
|
+
}
|
192
|
+
|
193
|
+
// Return early when input shape has zero dim. This is done after initializing
|
194
|
+
// data for output tensor because there are cases that the input tensor is
|
195
|
+
// empty but output tensor is not. In that case, output tensor should be
|
196
|
+
// filled with init_value.
|
197
|
+
for (int i = 0; i < input_num_dims; ++i) {
|
198
|
+
if (input_dims[i] == 0) return true;
|
199
|
+
}
|
200
|
+
|
201
|
+
// Resolve axis.
|
202
|
+
int num_resolved_axis = 0;
|
203
|
+
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
|
204
|
+
&num_resolved_axis)) {
|
205
|
+
return false;
|
206
|
+
}
|
207
|
+
|
208
|
+
return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
|
209
|
+
output_num_dims, resolved_axis, num_resolved_axis,
|
210
|
+
temp_index, reducer, output_data);
|
211
|
+
}
|
212
|
+
|
213
|
+
// Computes the mean of elements across dimensions given in axis.
|
214
|
+
// It does so in two stages, first calculates the sum of elements along the axis
|
215
|
+
// then divides it by the number of element in axis.
|
216
|
+
template <typename T, typename U>
|
217
|
+
inline bool Mean(const T* input_data, const int* input_dims,
|
218
|
+
const int input_num_dims, T* output_data,
|
219
|
+
const int* output_dims, const int output_num_dims,
|
220
|
+
const int* axis, const int num_axis_dimensions, bool keep_dims,
|
221
|
+
int* temp_index, int* resolved_axis, U* temp_sum) {
|
222
|
+
ruy::profiler::ScopeLabel label("Mean");
|
223
|
+
// Reset output data.
|
224
|
+
size_t num_outputs = 1;
|
225
|
+
for (int idx = 0; idx < output_num_dims; ++idx) {
|
226
|
+
size_t current = static_cast<size_t>(output_dims[idx]);
|
227
|
+
// Overflow prevention.
|
228
|
+
if (num_outputs > std::numeric_limits<size_t>::max() / current) {
|
229
|
+
return false;
|
230
|
+
}
|
231
|
+
num_outputs *= current;
|
232
|
+
}
|
233
|
+
for (size_t idx = 0; idx < num_outputs; ++idx) {
|
234
|
+
output_data[idx] = T();
|
235
|
+
temp_sum[idx] = U();
|
236
|
+
}
|
237
|
+
|
238
|
+
// Resolve axis.
|
239
|
+
int num_resolved_axis = 0;
|
240
|
+
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
|
241
|
+
&num_resolved_axis)) {
|
242
|
+
return false;
|
243
|
+
}
|
244
|
+
|
245
|
+
if (!ReduceSumImpl<T, U>(input_data, input_dims, output_dims, input_num_dims,
|
246
|
+
output_num_dims, resolved_axis, num_resolved_axis,
|
247
|
+
temp_index, temp_sum)) {
|
248
|
+
return false;
|
249
|
+
}
|
250
|
+
|
251
|
+
// Calculate mean by dividing output_data by num of aggregated element.
|
252
|
+
size_t num_elements_in_axis = 1;
|
253
|
+
for (int idx = 0; idx < num_resolved_axis; ++idx) {
|
254
|
+
size_t current = static_cast<size_t>(input_dims[resolved_axis[idx]]);
|
255
|
+
// Overflow prevention.
|
256
|
+
if (current > (std::numeric_limits<size_t>::max() / num_elements_in_axis)) {
|
257
|
+
return false;
|
258
|
+
}
|
259
|
+
num_elements_in_axis *= current;
|
260
|
+
}
|
261
|
+
|
262
|
+
if (num_elements_in_axis > 0) {
|
263
|
+
for (size_t idx = 0; idx < num_outputs; ++idx) {
|
264
|
+
output_data[idx] =
|
265
|
+
static_cast<T>(temp_sum[idx] / static_cast<U>(num_elements_in_axis));
|
266
|
+
}
|
267
|
+
}
|
268
|
+
return true;
|
269
|
+
}
|
270
|
+
|
271
|
+
inline void Mean(const tflite_micro::MeanParams& op_params,
|
272
|
+
const RuntimeShape& unextended_input_shape,
|
273
|
+
const float* input_data,
|
274
|
+
const RuntimeShape& unextended_output_shape,
|
275
|
+
float* output_data) {
|
276
|
+
ruy::profiler::ScopeLabel label("Mean4D");
|
277
|
+
|
278
|
+
// Current implementation only supports dimension equals 4 and simultaneous
|
279
|
+
// reduction over width and height.
|
280
|
+
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
|
281
|
+
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
|
282
|
+
const RuntimeShape input_shape =
|
283
|
+
RuntimeShape::ExtendedShape(4, unextended_input_shape);
|
284
|
+
const RuntimeShape output_shape =
|
285
|
+
RuntimeShape::ExtendedShape(4, unextended_output_shape);
|
286
|
+
|
287
|
+
const int output_batch = output_shape.Dims(0);
|
288
|
+
const int output_height = output_shape.Dims(1);
|
289
|
+
const int output_width = output_shape.Dims(2);
|
290
|
+
const int output_depth = output_shape.Dims(3);
|
291
|
+
|
292
|
+
const int input_height = input_shape.Dims(1);
|
293
|
+
const int input_width = input_shape.Dims(2);
|
294
|
+
|
295
|
+
TFLITE_CHECK_EQ(op_params.axis_count, 2);
|
296
|
+
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
|
297
|
+
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
|
298
|
+
TFLITE_CHECK_EQ(output_height, 1);
|
299
|
+
TFLITE_CHECK_EQ(output_width, 1);
|
300
|
+
|
301
|
+
for (int out_b = 0; out_b < output_batch; ++out_b) {
|
302
|
+
for (int out_d = 0; out_d < output_depth; ++out_d) {
|
303
|
+
float value = 0;
|
304
|
+
for (int in_h = 0; in_h < input_height; ++in_h) {
|
305
|
+
for (int in_w = 0; in_w < input_width; ++in_w) {
|
306
|
+
value += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
|
307
|
+
}
|
308
|
+
}
|
309
|
+
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
|
310
|
+
value / (input_width * input_height);
|
311
|
+
}
|
312
|
+
}
|
313
|
+
}
|
314
|
+
|
315
|
+
// Computes the mean of elements across dimensions given in axis.
|
316
|
+
// It does so in two stages, first calculates the sum of elements along the axis
|
317
|
+
// then divides it by the number of element in axis for quantized values.
|
318
|
+
template <typename T, typename U>
|
319
|
+
inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point,
|
320
|
+
const int* input_dims, const int input_num_dims,
|
321
|
+
T* output_data, int32_t output_multiplier,
|
322
|
+
int output_shift, int32_t output_zero_point,
|
323
|
+
const int* output_dims,
|
324
|
+
const int output_num_dims, const int* axis,
|
325
|
+
const int num_axis_dimensions, bool keep_dims,
|
326
|
+
int* temp_index, int* resolved_axis, U* temp_sum,
|
327
|
+
bool compute_sum) {
|
328
|
+
const int32_t kMinValue = std::numeric_limits<T>::min();
|
329
|
+
const int32_t kMaxValue = std::numeric_limits<T>::max();
|
330
|
+
const bool uint8_case = std::is_same<T, uint8_t>::value;
|
331
|
+
const bool int16_case = std::is_same<T, int16_t>::value;
|
332
|
+
if (uint8_case) {
|
333
|
+
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Uint8" : "Mean/Uint8");
|
334
|
+
} else if (int16_case) {
|
335
|
+
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int16" : "Mean/Int16");
|
336
|
+
} else {
|
337
|
+
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int8" : "Mean/Int8");
|
338
|
+
}
|
339
|
+
// Reset output data.
|
340
|
+
size_t num_outputs = 1;
|
341
|
+
for (int idx = 0; idx < output_num_dims; ++idx) {
|
342
|
+
size_t current = static_cast<size_t>(output_dims[idx]);
|
343
|
+
// Overflow prevention.
|
344
|
+
if (num_outputs > std::numeric_limits<size_t>::max() / current) {
|
345
|
+
return false;
|
346
|
+
}
|
347
|
+
num_outputs *= current;
|
348
|
+
}
|
349
|
+
for (size_t idx = 0; idx < num_outputs; ++idx) {
|
350
|
+
output_data[idx] = T();
|
351
|
+
temp_sum[idx] = U();
|
352
|
+
}
|
353
|
+
|
354
|
+
// Return early when input shape has zero dim. This is done after initializing
|
355
|
+
// data for output tensor because there are cases that the input tensor is
|
356
|
+
// empty but output tensor is not. In that case, output tensor should be
|
357
|
+
// filled with init_value.
|
358
|
+
for (int i = 0; i < input_num_dims; ++i) {
|
359
|
+
if (input_dims[i] == 0) return true;
|
360
|
+
}
|
361
|
+
|
362
|
+
// Resolve axis.
|
363
|
+
int num_resolved_axis = 0;
|
364
|
+
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
|
365
|
+
&num_resolved_axis)) {
|
366
|
+
return false;
|
367
|
+
}
|
368
|
+
|
369
|
+
if (!ReduceSumImpl<T, U>(input_data, input_dims, output_dims, input_num_dims,
|
370
|
+
output_num_dims, resolved_axis, num_resolved_axis,
|
371
|
+
temp_index, temp_sum)) {
|
372
|
+
return false;
|
373
|
+
}
|
374
|
+
|
375
|
+
// Calculate mean by dividing output_data by num of aggregated element.
|
376
|
+
int64_t num_elements_in_axis = 1;
|
377
|
+
for (int idx = 0; idx < num_resolved_axis; ++idx) {
|
378
|
+
size_t current = static_cast<size_t>(input_dims[resolved_axis[idx]]);
|
379
|
+
// Overflow prevention.
|
380
|
+
if (current > static_cast<size_t>(std::numeric_limits<int64_t>::max() /
|
381
|
+
num_elements_in_axis)) {
|
382
|
+
return false;
|
383
|
+
}
|
384
|
+
num_elements_in_axis *= current;
|
385
|
+
}
|
386
|
+
|
387
|
+
if (num_elements_in_axis == 0) {
|
388
|
+
return true;
|
389
|
+
}
|
390
|
+
|
391
|
+
// Readapt output rescaling when calculating the mean to integrate a
|
392
|
+
// 1/num_elements_in_axis multiplier.
|
393
|
+
if (!compute_sum) {
|
394
|
+
TFLITE_DCHECK_GE(num_elements_in_axis, 0);
|
395
|
+
int shift =
|
396
|
+
63 - CountLeadingZeros(static_cast<uint64_t>(num_elements_in_axis));
|
397
|
+
// To avoid any overflow risk 'shift' should be <= 32 and to satisfy
|
398
|
+
// 'MultiplyByQuantizedMultiplier' pre-conditions 'output_shift - shift'
|
399
|
+
// should be >= -31. Clamp the value at the price of some precision loss.
|
400
|
+
shift = std::min(shift, 32);
|
401
|
+
shift = std::min(shift, 31 + output_shift);
|
402
|
+
output_multiplier = static_cast<int32_t>(
|
403
|
+
(static_cast<int64_t>(output_multiplier) << shift) /
|
404
|
+
num_elements_in_axis);
|
405
|
+
output_shift = output_shift - shift;
|
406
|
+
}
|
407
|
+
|
408
|
+
for (size_t idx = 0; idx < num_outputs; ++idx) {
|
409
|
+
const U shifted_sum =
|
410
|
+
static_cast<U>(temp_sum[idx] - input_zero_point * num_elements_in_axis);
|
411
|
+
int32_t output = MultiplyByQuantizedMultiplier(
|
412
|
+
shifted_sum, output_multiplier, output_shift) +
|
413
|
+
output_zero_point;
|
414
|
+
output = std::min(std::max(output, kMinValue), kMaxValue);
|
415
|
+
output_data[idx] = static_cast<T>(output);
|
416
|
+
}
|
417
|
+
return true;
|
418
|
+
}
|
419
|
+
|
420
|
+
template <typename T, typename U>
|
421
|
+
inline bool QuantizedMeanOrSumExtraArgs(
|
422
|
+
const T* input_data, int32_t input_zero_point, float input_scale,
|
423
|
+
const int* input_dims, const int input_num_dims, T* output_data,
|
424
|
+
float output_scale, int32_t output_multiplier, int output_shift,
|
425
|
+
int32_t output_zero_point, const int* output_dims,
|
426
|
+
const int output_num_dims, const int* axis, const int num_axis_dimensions,
|
427
|
+
bool keep_dims, int* temp_index, int* resolved_axis, U* temp_sum,
|
428
|
+
bool compute_sum) {
|
429
|
+
return QuantizedMeanOrSum<T, U>(
|
430
|
+
input_data, input_zero_point, input_dims, input_num_dims, output_data,
|
431
|
+
output_multiplier, output_shift, output_zero_point, output_dims,
|
432
|
+
output_num_dims, axis, num_axis_dimensions, keep_dims, temp_index,
|
433
|
+
resolved_axis, temp_sum, compute_sum);
|
434
|
+
}
|
435
|
+
|
436
|
+
template <typename T>
|
437
|
+
inline bool QuantizedReduceProd(const T* input_data, int32_t input_zero_point,
|
438
|
+
const RuntimeShape& input_shape, T* output_data,
|
439
|
+
int32_t output_zero_point,
|
440
|
+
const RuntimeShape& output_shape,
|
441
|
+
const int* axis,
|
442
|
+
const int64_t num_axis_dimensions,
|
443
|
+
bool keep_dims, int* temp_index,
|
444
|
+
int* resolved_axis, int32_t* temp_prod,
|
445
|
+
int32_t scaling_multiplier, int scaling_shift) {
|
446
|
+
const int32_t kMinValue = std::numeric_limits<T>::min();
|
447
|
+
const int32_t kMaxValue = std::numeric_limits<T>::max();
|
448
|
+
|
449
|
+
// Resolve axis.
|
450
|
+
int num_resolved_axis = 0;
|
451
|
+
if (!ResolveAxis(input_shape.DimensionsCount(), axis, num_axis_dimensions,
|
452
|
+
resolved_axis, &num_resolved_axis)) {
|
453
|
+
return false;
|
454
|
+
}
|
455
|
+
|
456
|
+
// Calculate the reduced product by rescaling each multiplication step to
|
457
|
+
// avoid an overflow.
|
458
|
+
auto reducer_first = [&](T in) -> int32_t { return in - input_zero_point; };
|
459
|
+
|
460
|
+
auto reducer_next = [&](int32_t current, T in) -> int32_t {
|
461
|
+
const int64_t result =
|
462
|
+
static_cast<int64_t>(current) * (in - input_zero_point);
|
463
|
+
return MultiplyByQuantizedMultiplier(result, scaling_multiplier,
|
464
|
+
scaling_shift);
|
465
|
+
};
|
466
|
+
|
467
|
+
if (!Reduce<T, int32_t>(
|
468
|
+
input_data, input_shape.DimsData(), output_shape.DimsData(),
|
469
|
+
input_shape.DimensionsCount(), output_shape.DimensionsCount(),
|
470
|
+
resolved_axis, num_resolved_axis, temp_index, reducer_first,
|
471
|
+
reducer_next, temp_prod)) {
|
472
|
+
return false;
|
473
|
+
}
|
474
|
+
|
475
|
+
for (int i = 0; i < output_shape.FlatSize(); i++) {
|
476
|
+
int32_t result =
|
477
|
+
MultiplyByQuantizedMultiplier(static_cast<int64_t>(temp_prod[i]),
|
478
|
+
scaling_multiplier, scaling_shift) +
|
479
|
+
output_zero_point;
|
480
|
+
result = std::min(std::max(result, kMinValue), kMaxValue);
|
481
|
+
output_data[i] = static_cast<T>(result);
|
482
|
+
}
|
483
|
+
|
484
|
+
return true;
|
485
|
+
}
|
486
|
+
|
487
|
+
} // namespace reference_ops
|
488
|
+
|
489
|
+
} // namespace tflite_micro
|
490
|
+
|
491
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
|
@@ -0,0 +1,70 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_
|
17
|
+
|
18
|
+
#include <algorithm>
|
19
|
+
|
20
|
+
#include "ruy/profiler/instrumentation.h" // from @ruy
|
21
|
+
#include "tensorflow/lite/kernels/internal/common.h"
|
22
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
23
|
+
|
24
|
+
namespace tflite_micro {
|
25
|
+
namespace reference_ops {
|
26
|
+
|
27
|
+
template <typename input_type, typename output_type>
|
28
|
+
inline void Requantize(const input_type* input_data, int32_t size,
|
29
|
+
int32_t effective_scale_multiplier,
|
30
|
+
int32_t effective_scale_shift, int32_t input_zeropoint,
|
31
|
+
int32_t output_zeropoint, output_type* output_data) {
|
32
|
+
ruy::profiler::ScopeLabel label("Requantize");
|
33
|
+
const bool same_scale =
|
34
|
+
(effective_scale_multiplier == 1 << 30 && effective_scale_shift == 1);
|
35
|
+
if (same_scale) {
|
36
|
+
const bool mixed_type_int8_uint8 =
|
37
|
+
std::is_same<input_type, int8_t>::value &&
|
38
|
+
std::is_same<output_type, uint8_t>::value;
|
39
|
+
const bool mixed_type_uint8_int8 =
|
40
|
+
std::is_same<input_type, uint8_t>::value &&
|
41
|
+
std::is_same<output_type, int8_t>::value;
|
42
|
+
const int32_t zero_point_diff = input_zeropoint - output_zeropoint;
|
43
|
+
// Fast path to do requantization for the case when just a shift of 128 is
|
44
|
+
// needed.
|
45
|
+
if ((mixed_type_int8_uint8 && zero_point_diff == -128) ||
|
46
|
+
(mixed_type_uint8_int8 && zero_point_diff == 128)) {
|
47
|
+
for (int i = 0; i < size; ++i) {
|
48
|
+
output_data[i] = input_data[i] ^ 0x80;
|
49
|
+
}
|
50
|
+
return;
|
51
|
+
}
|
52
|
+
}
|
53
|
+
static constexpr int32_t kMinOutput = std::numeric_limits<output_type>::min();
|
54
|
+
static constexpr int32_t kMaxOutput = std::numeric_limits<output_type>::max();
|
55
|
+
for (int i = 0; i < size; ++i) {
|
56
|
+
const int32_t input = input_data[i] - input_zeropoint;
|
57
|
+
const int32_t output =
|
58
|
+
MultiplyByQuantizedMultiplier(input, effective_scale_multiplier,
|
59
|
+
effective_scale_shift) +
|
60
|
+
output_zeropoint;
|
61
|
+
const int32_t clamped_output =
|
62
|
+
std::max(std::min(output, kMaxOutput), kMinOutput);
|
63
|
+
output_data[i] = static_cast<output_type>(clamped_output);
|
64
|
+
}
|
65
|
+
}
|
66
|
+
|
67
|
+
} // namespace reference_ops
|
68
|
+
} // namespace tflite_micro
|
69
|
+
|
70
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_
|