xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/__init__.py +7 -0
- xmos_ai_tools/io_server/__init__.py +151 -0
- xmos_ai_tools/runtime/__init__.py +0 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
- xmos_ai_tools/runtime/include/flash_server.h +74 -0
- xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
- xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
- xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
- xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
- xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
- xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
- xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
- xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
- xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
- xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
- xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
- xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
- xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
- xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
- xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
- xmos_ai_tools/runtime/include/ioserver.h +44 -0
- xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
- xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
- xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
- xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
- xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
- xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
- xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
- xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
- xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
- xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
- xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xformer/__init__.py +60 -0
- xmos_ai_tools/xformer/flash.py +190 -0
- xmos_ai_tools/xinterpreters/__init__.py +1 -0
- xmos_ai_tools/xinterpreters/exceptions.py +38 -0
- xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,45 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_
|
18
|
+
|
19
|
+
#include "tensorflow/lite/c/builtin_op_data.h"
|
20
|
+
#include "tensorflow/lite/c/common.h"
|
21
|
+
|
22
|
+
namespace tflite_micro {
|
23
|
+
|
24
|
+
// The CircularBuffer op has one input and one output tensor.
|
25
|
+
extern const int kCircularBufferInputTensor;
|
26
|
+
extern const int kCircularBufferOutputTensor;
|
27
|
+
|
28
|
+
// Indices into the init flexbuffer's vector.
|
29
|
+
// The parameter's name is in the comment that follows.
|
30
|
+
// Elements in the vectors are ordered alphabetically by parameter name.
|
31
|
+
extern const int kCircularBufferCyclesMaxIndex; // 'cycles_max'
|
32
|
+
|
33
|
+
// These fields control the stride period of a strided streaming model. This op
|
34
|
+
// returns kTfLiteAbort until cycles_until_run-- is zero. At this time,
|
35
|
+
// cycles_until_run is reset to cycles_max.
|
36
|
+
struct OpDataCircularBuffer {
|
37
|
+
int cycles_until_run;
|
38
|
+
int cycles_max;
|
39
|
+
};
|
40
|
+
|
41
|
+
TfLiteStatus CircularBufferPrepare(TfLiteContext* context, TfLiteNode* node);
|
42
|
+
|
43
|
+
} // namespace tflite_micro
|
44
|
+
|
45
|
+
#endif // TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_
|
@@ -0,0 +1,22 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H
|
17
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H
|
18
|
+
|
19
|
+
extern const int g_gen_data_size_circular_buffer_config;
|
20
|
+
extern const unsigned char g_gen_data_circular_buffer_config[];
|
21
|
+
|
22
|
+
#endif
|
@@ -0,0 +1,117 @@
|
|
1
|
+
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_
|
18
|
+
|
19
|
+
#include <cstdint>
|
20
|
+
|
21
|
+
#include "tensorflow/lite/c/builtin_op_data.h"
|
22
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
23
|
+
#include "tensorflow/lite/micro/micro_common.h"
|
24
|
+
|
25
|
+
namespace tflite_micro {
|
26
|
+
|
27
|
+
struct OpDataConv {
|
28
|
+
TfLitePaddingValues padding;
|
29
|
+
|
30
|
+
// Cached tensor zero point values for quantized operations.
|
31
|
+
int32_t input_zero_point;
|
32
|
+
int32_t filter_zero_point;
|
33
|
+
int32_t output_zero_point;
|
34
|
+
|
35
|
+
// The scaling factor from input to output (aka the 'real multiplier') can
|
36
|
+
// be represented as a fixed point multiplier plus a left shift.
|
37
|
+
int32_t output_multiplier;
|
38
|
+
int output_shift;
|
39
|
+
|
40
|
+
// Per channel output multiplier and shift.
|
41
|
+
int32_t* per_channel_output_multiplier;
|
42
|
+
int32_t* per_channel_output_shift;
|
43
|
+
|
44
|
+
// The range of the fused activation layer. For example for kNone and
|
45
|
+
// uint8_t these would be 0 and 255.
|
46
|
+
int32_t output_activation_min;
|
47
|
+
int32_t output_activation_max;
|
48
|
+
|
49
|
+
// A buffer used to store unpacked filter values. This is used if the source
|
50
|
+
// tensor is of n-bit precision that cannot be easily processed by kernels.
|
51
|
+
int filter_buffer_index;
|
52
|
+
};
|
53
|
+
|
54
|
+
extern const int kConvInputTensor;
|
55
|
+
extern const int kConvWeightsTensor;
|
56
|
+
extern const int kConvBiasTensor;
|
57
|
+
extern const int kConvOutputTensor;
|
58
|
+
extern const int kConvQuantizedDimension;
|
59
|
+
|
60
|
+
// Returns a ConvParams struct with all the parameters needed for a
|
61
|
+
// float computation.
|
62
|
+
ConvParams ConvParamsFloat(const TfLiteConvParams& params,
|
63
|
+
const OpDataConv& data);
|
64
|
+
|
65
|
+
// Returns a ConvParams struct with all the parameters needed for a
|
66
|
+
// quantized computation.
|
67
|
+
ConvParams ConvParamsQuantized(const TfLiteConvParams& params,
|
68
|
+
const OpDataConv& data);
|
69
|
+
|
70
|
+
TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node,
|
71
|
+
const TfLiteConvParams& params, int width,
|
72
|
+
int height, int filter_width,
|
73
|
+
int filter_height, int out_width,
|
74
|
+
int out_height, const TfLiteType data_type,
|
75
|
+
OpDataConv* data);
|
76
|
+
|
77
|
+
void* ConvInit(TfLiteContext* context, const char* buffer, size_t length);
|
78
|
+
|
79
|
+
TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node);
|
80
|
+
|
81
|
+
// This is the most generic TFLMRegistration. The actual supported types
|
82
|
+
// may still be target dependent. The only requirement is that every
|
83
|
+
// implementation (reference or optimized) must define this function.
|
84
|
+
TFLMRegistration Register_CONV_2D();
|
85
|
+
|
86
|
+
#if defined(XTENSA)
|
87
|
+
// Returns a TFLMRegistration struct for kernel variant that only supports
|
88
|
+
// int8 activations and int8 weights and always calls the reference
|
89
|
+
// implementation.
|
90
|
+
TFLMRegistration Register_CONV_2D_INT8REF();
|
91
|
+
|
92
|
+
#else
|
93
|
+
inline TFLMRegistration Register_CONV_2D_INT8REF() {
|
94
|
+
return Register_CONV_2D();
|
95
|
+
}
|
96
|
+
#endif // defined(XTENSA)
|
97
|
+
|
98
|
+
#if defined(CMSIS_NN) || defined(XTENSA)
|
99
|
+
// Returns a TFLMRegistration struct for kernel variant that only supports
|
100
|
+
// int8 activations and int8 weights and uses the latency optimized
|
101
|
+
// implementations.
|
102
|
+
TFLMRegistration Register_CONV_2D_INT8();
|
103
|
+
|
104
|
+
// Returns a TFLMRegistration struct for kernel variant that only supports
|
105
|
+
// int16 activations and int8 weights and uses the latency optimized
|
106
|
+
// implementations.
|
107
|
+
TFLMRegistration Register_CONV_2D_INT16();
|
108
|
+
|
109
|
+
#else
|
110
|
+
inline TFLMRegistration Register_CONV_2D_INT8() { return Register_CONV_2D(); }
|
111
|
+
|
112
|
+
inline TFLMRegistration Register_CONV_2D_INT16() { return Register_CONV_2D(); }
|
113
|
+
#endif // defined(CMSIS_NN) || defined(XTENSA)
|
114
|
+
|
115
|
+
} // namespace tflite_micro
|
116
|
+
|
117
|
+
#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_
|
@@ -0,0 +1,94 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_
|
18
|
+
|
19
|
+
#include "tensorflow/lite/c/builtin_op_data.h"
|
20
|
+
#include "tensorflow/lite/c/common.h"
|
21
|
+
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
|
22
|
+
#include "tensorflow/lite/micro/kernels/micro_ops.h"
|
23
|
+
#include "tensorflow/lite/micro/test_helpers.h"
|
24
|
+
#include "tensorflow/lite/micro/testing/micro_test.h"
|
25
|
+
|
26
|
+
namespace tflite_micro {
|
27
|
+
namespace testing {
|
28
|
+
|
29
|
+
TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size,
|
30
|
+
int output_length, TfLiteConvParams* conv_params,
|
31
|
+
TFLMRegistration registration, float* output_data);
|
32
|
+
|
33
|
+
TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size,
|
34
|
+
int output_length, TfLiteConvParams* conv_params,
|
35
|
+
TFLMRegistration registration, int8_t* output_data);
|
36
|
+
|
37
|
+
TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
|
38
|
+
const float* expected_output_data,
|
39
|
+
int output_length,
|
40
|
+
TfLiteConvParams* conv_params,
|
41
|
+
TFLMRegistration registration,
|
42
|
+
float* output_data, float tolerance = 1e-5);
|
43
|
+
|
44
|
+
TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
|
45
|
+
const int8_t* expected_output_data,
|
46
|
+
int output_length,
|
47
|
+
TfLiteConvParams* conv_params,
|
48
|
+
TFLMRegistration registration,
|
49
|
+
int8_t* output_data, float tolerance = 1e-5);
|
50
|
+
|
51
|
+
TfLiteStatus TestConvFloat(int* input_dims_data, const float* input_data,
|
52
|
+
int* filter_dims_data, const float* filter_data,
|
53
|
+
int* bias_dims_data, const float* bias_data,
|
54
|
+
int* output_dims_data,
|
55
|
+
const float* expected_output_data,
|
56
|
+
TfLiteConvParams* conv_params,
|
57
|
+
TFLMRegistration registration, float* output_data);
|
58
|
+
|
59
|
+
TfLiteStatus TestConvQuantizedPerChannel(
|
60
|
+
int* input_dims_data, const float* input_data, int8_t* input_quantized,
|
61
|
+
float input_scale, int input_zero_point, int* filter_dims_data,
|
62
|
+
const float* filter_data, int8_t* filter_data_quantized,
|
63
|
+
int* bias_dims_data, const float* bias_data, int32_t* bias_data_quantized,
|
64
|
+
float* bias_scales, int* bias_zero_points, int* output_dims_data,
|
65
|
+
const float* expected_output_data, int8_t* expected_output_data_quantized,
|
66
|
+
float output_scale, int output_zero_point, TfLiteConvParams* conv_params,
|
67
|
+
TFLMRegistration registration, int8_t* output_data,
|
68
|
+
TfLiteType tensor_weight_type = kTfLiteNoType);
|
69
|
+
|
70
|
+
TfLiteStatus TestConvQuantizedPerChannel(
|
71
|
+
int* input_dims_data, const float* input_data, int16_t* input_quantized,
|
72
|
+
float input_scale, int input_zero_point, int* filter_dims_data,
|
73
|
+
const float* filter_data, int8_t* filter_data_quantized,
|
74
|
+
int* bias_dims_data, const float* bias_data,
|
75
|
+
std::int64_t* bias_data_quantized, float* bias_scales,
|
76
|
+
int* bias_zero_points, int* output_dims_data,
|
77
|
+
const float* expected_output_data, int16_t* expected_output_data_quantized,
|
78
|
+
float output_scale, int output_zero_point, TfLiteConvParams* conv_params,
|
79
|
+
TFLMRegistration registration, int16_t* output_data);
|
80
|
+
|
81
|
+
TfLiteStatus TestConvQuantizedPerChannel(
|
82
|
+
int* input_dims_data, const float* input_data, int16_t* input_quantized,
|
83
|
+
float input_scale, int input_zero_point, int* filter_dims_data,
|
84
|
+
const float* filter_data, int8_t* filter_data_quantized,
|
85
|
+
int* bias_dims_data, const float* bias_data, int32_t* bias_data_quantized,
|
86
|
+
float* bias_scales, int* bias_zero_points, int* output_dims_data,
|
87
|
+
const float* expected_output_data, int16_t* expected_output_data_quantized,
|
88
|
+
float output_scale, int output_zero_point, TfLiteConvParams* conv_params,
|
89
|
+
TFLMRegistration registration, int16_t* output_data);
|
90
|
+
|
91
|
+
} // namespace testing
|
92
|
+
} // namespace tflite_micro
|
93
|
+
|
94
|
+
#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_
|
@@ -0,0 +1,80 @@
|
|
1
|
+
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_
|
18
|
+
|
19
|
+
#include <cstdint>
|
20
|
+
|
21
|
+
#include "tensorflow/lite/c/builtin_op_data.h"
|
22
|
+
#include "tensorflow/lite/c/common.h"
|
23
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
24
|
+
#include "tensorflow/lite/micro/kernels/conv.h"
|
25
|
+
|
26
|
+
namespace tflite_micro {
|
27
|
+
|
28
|
+
extern const int kDepthwiseConvInputTensor;
|
29
|
+
extern const int kDepthwiseConvWeightsTensor;
|
30
|
+
extern const int kDepthwiseConvBiasTensor;
|
31
|
+
extern const int kDepthwiseConvOutputTensor;
|
32
|
+
extern const int kDepthwiseConvQuantizedDimension;
|
33
|
+
|
34
|
+
// Returns a DepthwiseParams struct with all the parameters needed for a
|
35
|
+
// float computation.
|
36
|
+
DepthwiseParams DepthwiseConvParamsFloat(
|
37
|
+
const TfLiteDepthwiseConvParams& params, const OpDataConv& data);
|
38
|
+
|
39
|
+
// Returns a DepthwiseParams struct with all the parameters needed for a
|
40
|
+
// quantized computation.
|
41
|
+
DepthwiseParams DepthwiseConvParamsQuantized(
|
42
|
+
const TfLiteDepthwiseConvParams& params, const OpDataConv& data);
|
43
|
+
|
44
|
+
TfLiteStatus CalculateOpDataDepthwiseConv(
|
45
|
+
TfLiteContext* context, TfLiteNode* node,
|
46
|
+
const TfLiteDepthwiseConvParams& params, int width, int height,
|
47
|
+
int filter_width, int filter_height, int out_width, int out_height,
|
48
|
+
const TfLiteType data_type, OpDataConv* data);
|
49
|
+
|
50
|
+
TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node);
|
51
|
+
|
52
|
+
// This is the most generic TFLMRegistration. The actual supported types
|
53
|
+
// may still be target dependent. The only requirement is that every
|
54
|
+
// implementation (reference or optimized) must define this function.
|
55
|
+
TFLMRegistration Register_DEPTHWISE_CONV_2D();
|
56
|
+
|
57
|
+
#if defined(CMSIS_NN)
|
58
|
+
// Returns a TFLMRegistration struct for kernel variant that only supports
|
59
|
+
// int8 activations and int8 weights and uses the latency optimized
|
60
|
+
// implementations.
|
61
|
+
TFLMRegistration Register_DEPTHWISE_CONV_2D_INT8();
|
62
|
+
|
63
|
+
// Returns a TFLMRegistration struct for kernel variant that only supports
|
64
|
+
// int16 activations and int8 weights and uses the latency optimized
|
65
|
+
// implementations.
|
66
|
+
TFLMRegistration Register_DEPTHWISE_CONV_2D_INT16();
|
67
|
+
|
68
|
+
#else
|
69
|
+
inline TFLMRegistration Register_DEPTHWISE_CONV_2D_INT8() {
|
70
|
+
return Register_DEPTHWISE_CONV_2D();
|
71
|
+
}
|
72
|
+
|
73
|
+
inline TFLMRegistration Register_DEPTHWISE_CONV_2D_INT16() {
|
74
|
+
return Register_DEPTHWISE_CONV_2D();
|
75
|
+
}
|
76
|
+
#endif
|
77
|
+
|
78
|
+
} // namespace tflite_micro
|
79
|
+
|
80
|
+
#endif // TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_
|
@@ -0,0 +1,38 @@
|
|
1
|
+
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_
|
18
|
+
|
19
|
+
#include "tensorflow/lite/c/builtin_op_data.h"
|
20
|
+
#include "tensorflow/lite/c/common.h"
|
21
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
22
|
+
|
23
|
+
namespace tflite_micro {
|
24
|
+
|
25
|
+
struct DequantizeOpData {
|
26
|
+
tflite_micro::DequantizationParams quantization_params;
|
27
|
+
// The scaling factor from input to output (aka the 'real multiplier') can
|
28
|
+
// be represented as a fixed point multiplier plus a left shift.
|
29
|
+
int32_t output_multiplier;
|
30
|
+
int output_shift;
|
31
|
+
int32_t output_zero_point;
|
32
|
+
};
|
33
|
+
|
34
|
+
TfLiteStatus DequantizePrepare(TfLiteContext* context, TfLiteNode* node);
|
35
|
+
|
36
|
+
} // namespace tflite_micro
|
37
|
+
|
38
|
+
#endif // TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_
|
@@ -0,0 +1,25 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H
|
17
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H
|
18
|
+
|
19
|
+
extern const int g_gen_data_size_none_regular_nms;
|
20
|
+
extern const unsigned char g_gen_data_none_regular_nms[];
|
21
|
+
|
22
|
+
extern const int g_gen_data_size_regular_nms;
|
23
|
+
extern const unsigned char g_gen_data_regular_nms[];
|
24
|
+
|
25
|
+
#endif
|
@@ -0,0 +1,28 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_
|
16
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_
|
17
|
+
|
18
|
+
#include "tensorflow/lite/c/common.h"
|
19
|
+
|
20
|
+
namespace tflite_micro {
|
21
|
+
|
22
|
+
TFLMRegistration* Register_ETHOSU();
|
23
|
+
|
24
|
+
const char* GetString_ETHOSU();
|
25
|
+
|
26
|
+
} // namespace tflite_micro
|
27
|
+
|
28
|
+
#endif // TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_
|
@@ -0,0 +1,112 @@
|
|
1
|
+
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
|
16
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
|
17
|
+
|
18
|
+
#include <cstdint>
|
19
|
+
|
20
|
+
#include "tensorflow/lite/c/builtin_op_data.h"
|
21
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
22
|
+
#include "tensorflow/lite/micro/micro_common.h"
|
23
|
+
|
24
|
+
namespace tflite_micro {
|
25
|
+
|
26
|
+
struct OpDataFullyConnected {
|
27
|
+
// The scaling factor from input to output (aka the 'real multiplier') can
|
28
|
+
// be represented as a fixed point multiplier plus a left shift.
|
29
|
+
int32_t output_multiplier;
|
30
|
+
int output_shift;
|
31
|
+
// The range of the fused activation layer. For example for kNone and
|
32
|
+
// uint8_t these would be 0 and 255.
|
33
|
+
int32_t output_activation_min;
|
34
|
+
int32_t output_activation_max;
|
35
|
+
// The index of the temporary tensor where the quantized inputs are cached.
|
36
|
+
int input_quantized_index;
|
37
|
+
// Cached zero point values of tensors.
|
38
|
+
int32_t input_zero_point;
|
39
|
+
int32_t filter_zero_point;
|
40
|
+
int32_t output_zero_point;
|
41
|
+
|
42
|
+
// TODO(b/258710417): enable by default once optimized fully-connected works for
|
43
|
+
// all targets.
|
44
|
+
#if !defined(HEXAGON)
|
45
|
+
// A buffer used to store unpacked filter values. This is used if the source
|
46
|
+
// tensor is of n-bit precision that cannot be easily processed by kernels.
|
47
|
+
int filter_buffer_index;
|
48
|
+
#endif
|
49
|
+
};
|
50
|
+
|
51
|
+
extern const int kFullyConnectedInputTensor;
|
52
|
+
extern const int kFullyConnectedWeightsTensor;
|
53
|
+
extern const int kFullyConnectedBiasTensor;
|
54
|
+
extern const int kFullyConnectedOutputTensor;
|
55
|
+
|
56
|
+
// Returns a FullyConnectedParams struct with all the parameters needed for a
|
57
|
+
// float computation.
|
58
|
+
FullyConnectedParams FullyConnectedParamsFloat(
|
59
|
+
TfLiteFusedActivation activation);
|
60
|
+
|
61
|
+
// Returns a FullyConnectedParams struct with all the parameters needed for a
|
62
|
+
// quantized computation.
|
63
|
+
FullyConnectedParams FullyConnectedParamsQuantized(
|
64
|
+
const OpDataFullyConnected& op_data);
|
65
|
+
|
66
|
+
TfLiteStatus CalculateOpDataFullyConnected(
|
67
|
+
TfLiteContext* context, TfLiteFusedActivation activation,
|
68
|
+
TfLiteType data_type, const TfLiteTensor* input, const TfLiteTensor* filter,
|
69
|
+
const TfLiteTensor* bias, TfLiteTensor* output, OpDataFullyConnected* data);
|
70
|
+
|
71
|
+
// This is the most generic TFLMRegistration. The actual supported types
|
72
|
+
// may still be target dependent. The only requirement is that every
|
73
|
+
// implementation (reference or optimized) must define this function.
|
74
|
+
TFLMRegistration Register_FULLY_CONNECTED();
|
75
|
+
|
76
|
+
#if defined(CMSIS_NN) || defined(HEXAGON) || defined(XTENSA)
|
77
|
+
// Returns a TFLMRegistration struct for kernel variant that only supports
|
78
|
+
// int8.
|
79
|
+
TFLMRegistration Register_FULLY_CONNECTED_INT8();
|
80
|
+
|
81
|
+
#else
|
82
|
+
// Note that while this block gets used for both reference and optimized kernels
|
83
|
+
// that do not have any specialized implementations, the only goal here is to
|
84
|
+
// define fallback implementation that allow reference kernels to still be used
|
85
|
+
// from applications that call a more specific kernel variant.
|
86
|
+
|
87
|
+
inline TFLMRegistration Register_FULLY_CONNECTED_INT8() {
|
88
|
+
return Register_FULLY_CONNECTED();
|
89
|
+
}
|
90
|
+
|
91
|
+
#endif
|
92
|
+
|
93
|
+
#if defined(CMSIS_NN)
|
94
|
+
// Returns a TFLMRegistration struct for kernel variant that only supports
|
95
|
+
// int16.
|
96
|
+
TFLMRegistration Register_FULLY_CONNECTED_INT16();
|
97
|
+
|
98
|
+
#else
|
99
|
+
// Note that while this block gets used for both reference and optimized kernels
|
100
|
+
// that do not have any specialized implementations, the only goal here is to
|
101
|
+
// define fallback implementation that allow reference kernels to still be used
|
102
|
+
// from applications that call a more specific kernel variant.
|
103
|
+
|
104
|
+
inline TFLMRegistration Register_FULLY_CONNECTED_INT16() {
|
105
|
+
return Register_FULLY_CONNECTED();
|
106
|
+
}
|
107
|
+
|
108
|
+
#endif
|
109
|
+
|
110
|
+
} // namespace tflite_micro
|
111
|
+
|
112
|
+
#endif // TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
|
@@ -0,0 +1,30 @@
|
|
1
|
+
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
|
16
|
+
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_
|
17
|
+
#define TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_
|
18
|
+
|
19
|
+
#include "tensorflow/lite/c/builtin_op_data.h"
|
20
|
+
#include "tensorflow/lite/c/common.h"
|
21
|
+
|
22
|
+
namespace tflite_micro {
|
23
|
+
|
24
|
+
extern const int kHardSwishInputTensor;
|
25
|
+
extern const int kHardSwishOutputTensor;
|
26
|
+
|
27
|
+
TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node);
|
28
|
+
} // namespace tflite_micro
|
29
|
+
|
30
|
+
#endif // TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_
|