xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/__init__.py +7 -0
- xmos_ai_tools/io_server/__init__.py +151 -0
- xmos_ai_tools/runtime/__init__.py +0 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
- xmos_ai_tools/runtime/include/flash_server.h +74 -0
- xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
- xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
- xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
- xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
- xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
- xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
- xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
- xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
- xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
- xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
- xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
- xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
- xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
- xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
- xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
- xmos_ai_tools/runtime/include/ioserver.h +44 -0
- xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
- xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
- xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
- xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
- xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
- xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
- xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
- xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
- xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
- xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
- xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xformer/__init__.py +60 -0
- xmos_ai_tools/xformer/flash.py +190 -0
- xmos_ai_tools/xinterpreters/__init__.py +1 -0
- xmos_ai_tools/xinterpreters/exceptions.py +38 -0
- xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,267 @@
|
|
1
|
+
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
|
17
|
+
|
18
|
+
#include <algorithm>
|
19
|
+
#include <complex>
|
20
|
+
|
21
|
+
#include "tensorflow/lite/kernels/internal/common.h"
|
22
|
+
|
23
|
+
namespace tflite_micro {
|
24
|
+
|
25
|
+
namespace reference_ops {
|
26
|
+
|
27
|
+
// Maximum dimension supported by the broadcast mul operation.
|
28
|
+
constexpr int kMaxMulBroadcastDim = 6;
|
29
|
+
|
30
|
+
// Element-wise mul that can often be used for inner loop of broadcast Mul as
|
31
|
+
// well as the non-broadcast Mul.
|
32
|
+
inline void MulElementwise(int size, const ArithmeticParams& params,
|
33
|
+
const uint8_t* input1_data,
|
34
|
+
const uint8_t* input2_data, uint8_t* output_data) {
|
35
|
+
for (int i = 0; i < size; ++i) {
|
36
|
+
const int32_t input1_val = params.input1_offset + input1_data[i];
|
37
|
+
const int32_t input2_val = params.input2_offset + input2_data[i];
|
38
|
+
const int32_t unclamped_result =
|
39
|
+
params.output_offset +
|
40
|
+
MultiplyByQuantizedMultiplier(input1_val * input2_val,
|
41
|
+
params.output_multiplier,
|
42
|
+
params.output_shift);
|
43
|
+
const int32_t clamped_output =
|
44
|
+
std::min(params.quantized_activation_max,
|
45
|
+
std::max(params.quantized_activation_min, unclamped_result));
|
46
|
+
output_data[i] = static_cast<uint8_t>(clamped_output);
|
47
|
+
}
|
48
|
+
}
|
49
|
+
|
50
|
+
template <typename T>
|
51
|
+
inline void Mul(const ArithmeticParams& params,
|
52
|
+
const RuntimeShape& input1_shape, const T* input1_data,
|
53
|
+
const RuntimeShape& input2_shape, const T* input2_data,
|
54
|
+
const RuntimeShape& output_shape, T* output_data) {
|
55
|
+
T output_activation_min;
|
56
|
+
T output_activation_max;
|
57
|
+
GetActivationParams(params, &output_activation_min, &output_activation_max);
|
58
|
+
|
59
|
+
const int flat_size =
|
60
|
+
MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape);
|
61
|
+
for (int i = 0; i < flat_size; ++i) {
|
62
|
+
output_data[i] = ActivationFunctionWithMinMax<T>(
|
63
|
+
input1_data[i] * input2_data[i], output_activation_min,
|
64
|
+
output_activation_max);
|
65
|
+
}
|
66
|
+
}
|
67
|
+
|
68
|
+
inline void Mul(const ArithmeticParams& params,
|
69
|
+
const RuntimeShape& input1_shape,
|
70
|
+
const std::complex<float>* input1_data,
|
71
|
+
const RuntimeShape& input2_shape,
|
72
|
+
const std::complex<float>* input2_data,
|
73
|
+
const RuntimeShape& output_shape,
|
74
|
+
std::complex<float>* output_data) {
|
75
|
+
const int flat_size =
|
76
|
+
MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape);
|
77
|
+
for (int i = 0; i < flat_size; ++i) {
|
78
|
+
output_data[i] = input1_data[i] * input2_data[i];
|
79
|
+
}
|
80
|
+
}
|
81
|
+
|
82
|
+
inline void Mul(const ArithmeticParams& params,
|
83
|
+
const RuntimeShape& input1_shape, const uint8_t* input1_data,
|
84
|
+
const RuntimeShape& input2_shape, const uint8_t* input2_data,
|
85
|
+
const RuntimeShape& output_shape, uint8_t* output_data) {
|
86
|
+
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
87
|
+
params.quantized_activation_max);
|
88
|
+
const int flat_size =
|
89
|
+
MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape);
|
90
|
+
|
91
|
+
MulElementwise(flat_size, params, input1_data, input2_data, output_data);
|
92
|
+
}
|
93
|
+
|
94
|
+
template <typename T, typename F>
|
95
|
+
void BroadcastMulRecursiveDimensions(
|
96
|
+
const ArithmeticParams& params, int dimension, const T* input1_data,
|
97
|
+
const T* input2_data, T* output_data, size_t* input1_offset_p,
|
98
|
+
size_t* input2_offset_p, size_t* output_offset,
|
99
|
+
const NdArrayDesc<kMaxMulBroadcastDim>& desc1,
|
100
|
+
const NdArrayDesc<kMaxMulBroadcastDim>& desc2,
|
101
|
+
const int32_t extended_output_shape_dims[kMaxMulBroadcastDim],
|
102
|
+
F binary_func) {
|
103
|
+
if (dimension == kMaxMulBroadcastDim - 1) {
|
104
|
+
for (int c = 0; c < extended_output_shape_dims[dimension]; ++c) {
|
105
|
+
const T input1_val = input1_data[*input1_offset_p];
|
106
|
+
const T input2_val = input2_data[*input2_offset_p];
|
107
|
+
output_data[*output_offset] = binary_func(params, input1_val, input2_val);
|
108
|
+
*input1_offset_p += desc1.strides[dimension];
|
109
|
+
*input2_offset_p += desc2.strides[dimension];
|
110
|
+
++(*output_offset);
|
111
|
+
}
|
112
|
+
} else {
|
113
|
+
for (int a = 0; a < extended_output_shape_dims[dimension]; ++a) {
|
114
|
+
size_t input1_offset_c = *input1_offset_p;
|
115
|
+
size_t input2_offset_c = *input2_offset_p;
|
116
|
+
BroadcastMulRecursiveDimensions(
|
117
|
+
params, dimension + 1, input1_data, input2_data, output_data,
|
118
|
+
&input1_offset_c, &input2_offset_c, output_offset, desc1, desc2,
|
119
|
+
extended_output_shape_dims, binary_func);
|
120
|
+
*input1_offset_p += desc1.strides[dimension];
|
121
|
+
*input2_offset_p += desc2.strides[dimension];
|
122
|
+
}
|
123
|
+
}
|
124
|
+
}
|
125
|
+
|
126
|
+
inline void BroadcastMul6DSlow(const ArithmeticParams& params,
|
127
|
+
const RuntimeShape& input1_shape,
|
128
|
+
const uint8_t* input1_data,
|
129
|
+
const RuntimeShape& input2_shape,
|
130
|
+
const uint8_t* input2_data,
|
131
|
+
const RuntimeShape& output_shape,
|
132
|
+
uint8_t* output_data) {
|
133
|
+
NdArrayDesc<kMaxMulBroadcastDim> desc1;
|
134
|
+
NdArrayDesc<kMaxMulBroadcastDim> desc2;
|
135
|
+
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
136
|
+
&desc2);
|
137
|
+
const RuntimeShape extended_output_shape =
|
138
|
+
RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, output_shape);
|
139
|
+
// Cache output shape dimensions.
|
140
|
+
int32_t extended_output_shape_dims[kMaxMulBroadcastDim];
|
141
|
+
std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(),
|
142
|
+
sizeof(extended_output_shape_dims));
|
143
|
+
|
144
|
+
size_t input1_offset = 0;
|
145
|
+
size_t input2_offset = 0;
|
146
|
+
size_t output_offset = 0;
|
147
|
+
BroadcastMulRecursiveDimensions(
|
148
|
+
params, 0, input1_data, input2_data, output_data, &input1_offset,
|
149
|
+
&input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims,
|
150
|
+
[](const ArithmeticParams& params, const uint8_t input1_val,
|
151
|
+
const uint8_t input2_val) {
|
152
|
+
const int32_t offsetted_input1_val = params.input1_offset + input1_val;
|
153
|
+
const int32_t offsetted_input2_val = params.input2_offset + input2_val;
|
154
|
+
const int32_t unclamped_result =
|
155
|
+
params.output_offset +
|
156
|
+
MultiplyByQuantizedMultiplier(
|
157
|
+
offsetted_input1_val * offsetted_input2_val,
|
158
|
+
params.output_multiplier, params.output_shift);
|
159
|
+
const int32_t clamped_output = std::min(
|
160
|
+
params.quantized_activation_max,
|
161
|
+
std::max(params.quantized_activation_min, unclamped_result));
|
162
|
+
return static_cast<uint8_t>(clamped_output);
|
163
|
+
});
|
164
|
+
}
|
165
|
+
|
166
|
+
template <typename T,
|
167
|
+
// For unquantized mul on small integers, explicitly set to true.
|
168
|
+
bool enable_for_short_integers = false>
|
169
|
+
inline typename std::enable_if<
|
170
|
+
!is_small_integer<T>::value || enable_for_short_integers, void>::type
|
171
|
+
BroadcastMul6DSlow(const ArithmeticParams& params,
|
172
|
+
const RuntimeShape& unextended_input1_shape,
|
173
|
+
const T* input1_data,
|
174
|
+
const RuntimeShape& unextended_input2_shape,
|
175
|
+
const T* input2_data,
|
176
|
+
const RuntimeShape& unextended_output_shape,
|
177
|
+
T* output_data) {
|
178
|
+
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 6);
|
179
|
+
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 6);
|
180
|
+
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 6);
|
181
|
+
NdArrayDesc<kMaxMulBroadcastDim> desc1;
|
182
|
+
NdArrayDesc<kMaxMulBroadcastDim> desc2;
|
183
|
+
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
|
184
|
+
unextended_input2_shape, &desc1, &desc2);
|
185
|
+
const RuntimeShape extended_output_shape =
|
186
|
+
RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, unextended_output_shape);
|
187
|
+
// Cache output shape dimensions.
|
188
|
+
int32_t extended_output_shape_dims[kMaxMulBroadcastDim];
|
189
|
+
std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(),
|
190
|
+
sizeof(extended_output_shape_dims));
|
191
|
+
|
192
|
+
// In Tensorflow, the dimensions are canonically named (batch_number, row,
|
193
|
+
// col, channel), with extents (batches, height, width, depth), with the
|
194
|
+
// trailing dimension changing most rapidly (channels has the smallest
|
195
|
+
// stride, typically 1 element).
|
196
|
+
//
|
197
|
+
// In generated C code, we store arrays with the dimensions reversed. The
|
198
|
+
// first dimension has smallest stride.
|
199
|
+
//
|
200
|
+
// We name our variables by their Tensorflow convention, but generate C code
|
201
|
+
// nesting loops such that the innermost loop has the smallest stride for
|
202
|
+
// the best cache behavior.
|
203
|
+
size_t input1_offset = 0;
|
204
|
+
size_t input2_offset = 0;
|
205
|
+
size_t output_offset = 0;
|
206
|
+
BroadcastMulRecursiveDimensions(
|
207
|
+
params, 0, input1_data, input2_data, output_data, &input1_offset,
|
208
|
+
&input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims,
|
209
|
+
[](const ArithmeticParams& params, const T input1_val,
|
210
|
+
const T input2_val) {
|
211
|
+
T output_activation_min;
|
212
|
+
T output_activation_max;
|
213
|
+
GetActivationParams(params, &output_activation_min,
|
214
|
+
&output_activation_max);
|
215
|
+
return ActivationFunctionWithMinMax<T>(input1_val * input2_val,
|
216
|
+
output_activation_min,
|
217
|
+
output_activation_max);
|
218
|
+
});
|
219
|
+
}
|
220
|
+
|
221
|
+
inline void BroadcastMul6DSlow(const ArithmeticParams& params,
|
222
|
+
const RuntimeShape& unextended_input1_shape,
|
223
|
+
const std::complex<float>* input1_data,
|
224
|
+
const RuntimeShape& unextended_input2_shape,
|
225
|
+
const std::complex<float>* input2_data,
|
226
|
+
const RuntimeShape& unextended_output_shape,
|
227
|
+
std::complex<float>* output_data) {
|
228
|
+
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 6);
|
229
|
+
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 6);
|
230
|
+
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 6);
|
231
|
+
|
232
|
+
NdArrayDesc<kMaxMulBroadcastDim> desc1;
|
233
|
+
NdArrayDesc<kMaxMulBroadcastDim> desc2;
|
234
|
+
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
|
235
|
+
unextended_input2_shape, &desc1, &desc2);
|
236
|
+
const RuntimeShape extended_output_shape =
|
237
|
+
RuntimeShape::ExtendedShape(kMaxMulBroadcastDim, unextended_output_shape);
|
238
|
+
// Cache output shape dimensions.
|
239
|
+
int32_t extended_output_shape_dims[kMaxMulBroadcastDim];
|
240
|
+
std::memcpy(extended_output_shape_dims, extended_output_shape.DimsData(),
|
241
|
+
sizeof(extended_output_shape_dims));
|
242
|
+
|
243
|
+
size_t input1_offset = 0;
|
244
|
+
size_t input2_offset = 0;
|
245
|
+
size_t output_offset = 0;
|
246
|
+
BroadcastMulRecursiveDimensions(
|
247
|
+
params, 0, input1_data, input2_data, output_data, &input1_offset,
|
248
|
+
&input2_offset, &output_offset, desc1, desc2, extended_output_shape_dims,
|
249
|
+
[](const ArithmeticParams& params, const std::complex<float> input1_val,
|
250
|
+
const std::complex<float> input2_val) {
|
251
|
+
return input1_val * input2_val;
|
252
|
+
});
|
253
|
+
}
|
254
|
+
|
255
|
+
template <typename T>
|
256
|
+
inline void BroadcastMul4DSlow(
|
257
|
+
const ArithmeticParams& params, const RuntimeShape& input1_shape,
|
258
|
+
const T* input1_data, const RuntimeShape& input2_shape,
|
259
|
+
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
|
260
|
+
return BroadcastMul6DSlow(params, input1_shape, input1_data, input2_shape,
|
261
|
+
input2_data, output_shape, output_data);
|
262
|
+
}
|
263
|
+
|
264
|
+
} // namespace reference_ops
|
265
|
+
} // namespace tflite_micro
|
266
|
+
|
267
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
|
@@ -0,0 +1,37 @@
|
|
1
|
+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
|
17
|
+
|
18
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
19
|
+
|
20
|
+
namespace tflite_micro {
|
21
|
+
|
22
|
+
namespace reference_ops {
|
23
|
+
|
24
|
+
template <typename T>
|
25
|
+
inline void Negate(const RuntimeShape& input_shape, const T* input_data,
|
26
|
+
const RuntimeShape& output_shape, T* output_data) {
|
27
|
+
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
28
|
+
|
29
|
+
for (int i = 0; i < flat_size; ++i) {
|
30
|
+
output_data[i] = -input_data[i];
|
31
|
+
}
|
32
|
+
}
|
33
|
+
|
34
|
+
} // namespace reference_ops
|
35
|
+
} // namespace tflite_micro
|
36
|
+
|
37
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
|
@@ -0,0 +1,169 @@
|
|
1
|
+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_
|
17
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_
|
18
|
+
|
19
|
+
#include <vector>
|
20
|
+
|
21
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
22
|
+
|
23
|
+
namespace tflite_micro {
|
24
|
+
|
25
|
+
namespace reference_ops {
|
26
|
+
|
27
|
+
// TFLite Pad supports activation tensors with up to 5 dimensions.
|
28
|
+
constexpr int PadKernelMaxDimensionCount() { return 5; }
|
29
|
+
|
30
|
+
// There are two versions of pad: Pad and PadV2. In PadV2 there is a second
|
31
|
+
// scalar input that provides the padding value. Therefore pad_value_ptr can be
|
32
|
+
// equivalent to a simple input1_data. For Pad, it should point to a zero
|
33
|
+
// value.
|
34
|
+
//
|
35
|
+
// Note that two typenames are required, so that T=P=int32_t is considered a
|
36
|
+
// specialization distinct from P=int32_t.
|
37
|
+
template <typename T, typename P>
|
38
|
+
inline void PadImpl(const tflite_micro::PadParams& op_params,
|
39
|
+
const RuntimeShape& input_shape, const T* input_data,
|
40
|
+
const P* pad_value_ptr, const RuntimeShape& output_shape,
|
41
|
+
T* output_data) {
|
42
|
+
const RuntimeShape ext_input_shape =
|
43
|
+
RuntimeShape::ExtendedShape(PadKernelMaxDimensionCount(), input_shape);
|
44
|
+
const RuntimeShape ext_output_shape =
|
45
|
+
RuntimeShape::ExtendedShape(PadKernelMaxDimensionCount(), output_shape);
|
46
|
+
TFLITE_DCHECK_LE(op_params.left_padding_count, PadKernelMaxDimensionCount());
|
47
|
+
TFLITE_DCHECK_LE(op_params.right_padding_count, PadKernelMaxDimensionCount());
|
48
|
+
|
49
|
+
// Runtime calls are currently fixed at 5 dimensions. Copy inputs so we can
|
50
|
+
// pad them to 5 dims (yes, we are "padding the padding").
|
51
|
+
int left_padding_copy[PadKernelMaxDimensionCount()];
|
52
|
+
for (int i = 0; i < PadKernelMaxDimensionCount(); i++) {
|
53
|
+
left_padding_copy[i] = 0;
|
54
|
+
}
|
55
|
+
for (int i = 0; i < op_params.left_padding_count; ++i) {
|
56
|
+
left_padding_copy[i + PadKernelMaxDimensionCount() -
|
57
|
+
op_params.left_padding_count] = op_params.left_padding[i];
|
58
|
+
}
|
59
|
+
int right_padding_copy[PadKernelMaxDimensionCount()];
|
60
|
+
for (int i = 0; i < PadKernelMaxDimensionCount(); i++) {
|
61
|
+
right_padding_copy[i] = 0;
|
62
|
+
}
|
63
|
+
for (int i = 0; i < op_params.right_padding_count; ++i) {
|
64
|
+
right_padding_copy[i + PadKernelMaxDimensionCount() -
|
65
|
+
op_params.right_padding_count] =
|
66
|
+
op_params.right_padding[i];
|
67
|
+
}
|
68
|
+
|
69
|
+
const int output_batch = ext_output_shape.Dims(0);
|
70
|
+
const int output_plane = ext_output_shape.Dims(1);
|
71
|
+
const int output_height = ext_output_shape.Dims(2);
|
72
|
+
const int output_width = ext_output_shape.Dims(3);
|
73
|
+
const int output_depth = ext_output_shape.Dims(4);
|
74
|
+
|
75
|
+
const int left_b_padding = left_padding_copy[0];
|
76
|
+
const int left_p_padding = left_padding_copy[1];
|
77
|
+
const int left_h_padding = left_padding_copy[2];
|
78
|
+
const int left_w_padding = left_padding_copy[3];
|
79
|
+
const int left_d_padding = left_padding_copy[4];
|
80
|
+
|
81
|
+
const int right_b_padding = right_padding_copy[0];
|
82
|
+
const int right_p_padding = right_padding_copy[1];
|
83
|
+
const int right_h_padding = right_padding_copy[2];
|
84
|
+
const int right_w_padding = right_padding_copy[3];
|
85
|
+
const int right_d_padding = right_padding_copy[4];
|
86
|
+
|
87
|
+
const T pad_value = *pad_value_ptr;
|
88
|
+
|
89
|
+
const T* in_ptr = input_data;
|
90
|
+
T* out_ptr = output_data;
|
91
|
+
for (int out_b = 0; out_b < output_batch; ++out_b) {
|
92
|
+
for (int out_p = 0; out_p < output_plane; ++out_p) {
|
93
|
+
for (int out_h = 0; out_h < output_height; ++out_h) {
|
94
|
+
for (int out_w = 0; out_w < output_width; ++out_w) {
|
95
|
+
for (int out_d = 0; out_d < output_depth; ++out_d) {
|
96
|
+
if (out_b < left_b_padding ||
|
97
|
+
out_b >= output_batch - right_b_padding ||
|
98
|
+
out_p < left_p_padding ||
|
99
|
+
out_p >= output_plane - right_p_padding ||
|
100
|
+
out_h < left_h_padding ||
|
101
|
+
out_h >= output_height - right_h_padding ||
|
102
|
+
out_w < left_w_padding ||
|
103
|
+
out_w >= output_width - right_w_padding ||
|
104
|
+
out_d < left_d_padding ||
|
105
|
+
out_d >= output_depth - right_d_padding) {
|
106
|
+
*out_ptr++ = pad_value;
|
107
|
+
} else {
|
108
|
+
*out_ptr++ = *in_ptr++;
|
109
|
+
}
|
110
|
+
}
|
111
|
+
}
|
112
|
+
}
|
113
|
+
}
|
114
|
+
}
|
115
|
+
}
|
116
|
+
|
117
|
+
template <typename T, typename P>
|
118
|
+
inline void Pad(const tflite_micro::PadParams& op_params,
|
119
|
+
const RuntimeShape& input_shape, const T* input_data,
|
120
|
+
const P* pad_value_ptr, const RuntimeShape& output_shape,
|
121
|
+
T* output_data) {
|
122
|
+
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
|
123
|
+
output_data);
|
124
|
+
}
|
125
|
+
|
126
|
+
// The second (pad-value) input can be int32_t when, say, the first is uint8_t.
|
127
|
+
template <typename T>
|
128
|
+
inline void Pad(const tflite_micro::PadParams& op_params,
|
129
|
+
const RuntimeShape& input_shape, const T* input_data,
|
130
|
+
const int32_t* pad_value_ptr, const RuntimeShape& output_shape,
|
131
|
+
T* output_data) {
|
132
|
+
const T converted_pad_value = static_cast<T>(*pad_value_ptr);
|
133
|
+
PadImpl(op_params, input_shape, input_data, &converted_pad_value,
|
134
|
+
output_shape, output_data);
|
135
|
+
}
|
136
|
+
|
137
|
+
// This version avoids conflicting template matching.
|
138
|
+
template <>
|
139
|
+
inline void Pad(const tflite_micro::PadParams& op_params,
|
140
|
+
const RuntimeShape& input_shape, const int32_t* input_data,
|
141
|
+
const int32_t* pad_value_ptr, const RuntimeShape& output_shape,
|
142
|
+
int32_t* output_data) {
|
143
|
+
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
|
144
|
+
output_data);
|
145
|
+
}
|
146
|
+
|
147
|
+
template <typename T, typename P>
|
148
|
+
inline void PadImageStyle(const tflite_micro::PadParams& op_params,
|
149
|
+
const RuntimeShape& input_shape, const T* input_data,
|
150
|
+
const P* pad_value_ptr,
|
151
|
+
const RuntimeShape& output_shape, T* output_data) {
|
152
|
+
Pad(op_params, input_shape, input_data, pad_value_ptr, output_shape,
|
153
|
+
output_data);
|
154
|
+
}
|
155
|
+
|
156
|
+
template <typename P>
|
157
|
+
inline void PadImageStyle(const tflite_micro::PadParams& op_params,
|
158
|
+
const RuntimeShape& input_shape,
|
159
|
+
const float* input_data, const P* pad_value_ptr,
|
160
|
+
const RuntimeShape& output_shape,
|
161
|
+
float* output_data) {
|
162
|
+
Pad(op_params, input_shape, input_data, pad_value_ptr, output_shape,
|
163
|
+
output_data);
|
164
|
+
}
|
165
|
+
|
166
|
+
} // namespace reference_ops
|
167
|
+
} // namespace tflite_micro
|
168
|
+
|
169
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_
|