xmos-ai-tools 1.3.2.dev80__py3-none-macosx_10_15_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/__init__.py +7 -0
- xmos_ai_tools/io_server/__init__.py +151 -0
- xmos_ai_tools/runtime/__init__.py +0 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.cmake +13 -0
- xmos_ai_tools/runtime/buildfiles/aitoolslib.make +8 -0
- xmos_ai_tools/runtime/include/flash_server.h +74 -0
- xmos_ai_tools/runtime/include/flatbuffers/allocator.h +68 -0
- xmos_ai_tools/runtime/include/flatbuffers/array.h +243 -0
- xmos_ai_tools/runtime/include/flatbuffers/base.h +474 -0
- xmos_ai_tools/runtime/include/flatbuffers/bfbs_generator.h +43 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer.h +142 -0
- xmos_ai_tools/runtime/include/flatbuffers/buffer_ref.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/code_generators.h +235 -0
- xmos_ai_tools/runtime/include/flatbuffers/default_allocator.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/detached_buffer.h +114 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffer_builder.h +1197 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatbuffers.h +270 -0
- xmos_ai_tools/runtime/include/flatbuffers/flatc.h +111 -0
- xmos_ai_tools/runtime/include/flatbuffers/flexbuffers.h +1897 -0
- xmos_ai_tools/runtime/include/flatbuffers/grpc.h +300 -0
- xmos_ai_tools/runtime/include/flatbuffers/hash.h +127 -0
- xmos_ai_tools/runtime/include/flatbuffers/idl.h +1232 -0
- xmos_ai_tools/runtime/include/flatbuffers/minireflect.h +419 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/flatc_pch.h +39 -0
- xmos_ai_tools/runtime/include/flatbuffers/pch/pch.h +38 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection.h +502 -0
- xmos_ai_tools/runtime/include/flatbuffers/reflection_generated.h +1449 -0
- xmos_ai_tools/runtime/include/flatbuffers/registry.h +128 -0
- xmos_ai_tools/runtime/include/flatbuffers/stl_emulation.h +509 -0
- xmos_ai_tools/runtime/include/flatbuffers/string.h +64 -0
- xmos_ai_tools/runtime/include/flatbuffers/struct.h +53 -0
- xmos_ai_tools/runtime/include/flatbuffers/table.h +168 -0
- xmos_ai_tools/runtime/include/flatbuffers/util.h +690 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector.h +370 -0
- xmos_ai_tools/runtime/include/flatbuffers/vector_downward.h +271 -0
- xmos_ai_tools/runtime/include/flatbuffers/verifier.h +283 -0
- xmos_ai_tools/runtime/include/ioserver.h +44 -0
- xmos_ai_tools/runtime/include/lib_nn/api/TransposeConv.h +24 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16.h +27 -0
- xmos_ai_tools/runtime/include/lib_nn/api/add_int16_transform.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/dequantize_int16_transform.h +34 -0
- xmos_ai_tools/runtime/include/lib_nn/api/expand_8_to_16.h +8 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16.h +42 -0
- xmos_ai_tools/runtime/include/lib_nn/api/multiply_int16_transform.h +71 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_api.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_bin_types.h +14 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_config.h +287 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_conv2d_structs.h +72 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_image.h +26 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +303 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_helper.h +132 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_op_utils.h +150 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_operator.h +18 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_pooling.h +551 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_types.h +83 -0
- xmos_ai_tools/runtime/include/lib_nn/api/nn_window_params.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16.h +54 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_kernel_transform.h +37 -0
- xmos_ai_tools/runtime/include/lib_nn/api/output_transform_fn_int16_mappings.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +82 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16.h +22 -0
- xmos_ai_tools/runtime/include/lib_nn/api/quantize_int16_transform.h +33 -0
- xmos_ai_tools/runtime/include/lib_nn/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memmove_word_aligned.h +15 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_memset_256.h +55 -0
- xmos_ai_tools/runtime/include/lib_nn/api/vpu_sim.h +118 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3_vpu.h +216 -0
- xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h +2869 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h +41 -0
- xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h +25 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h +47 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +218 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h +52 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h +13 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h +17 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h +62 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h +31 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_float.h +155 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +19 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +28 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +32 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +71 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +49 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +160 -0
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h +119 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_defs.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_device.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_descriptors.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/legacy/usb_std_requests.h +4 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +518 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h +11 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h +87 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_descriptors.h +191 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_std_requests.h +120 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/XUD_USB_Defines.h +70 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/hid.h +23 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio10.h +30 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudio20.h +357 -0
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/src/user/class/usbaudiocommon.h +168 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/delay_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/irfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
- xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
- xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
- xmos_ai_tools/runtime/include/signal/src/filter_bank_square_root.h +34 -0
- xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/log.h +30 -0
- xmos_ai_tools/runtime/include/signal/src/max_abs.h +31 -0
- xmos_ai_tools/runtime/include/signal/src/msb.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
- xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
- xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
- xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
- xmos_ai_tools/runtime/include/signal/src/window.h +31 -0
- xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_op_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/builtin_op_data.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +54 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +72 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +440 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +626 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +178 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +1496 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_util.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h +31 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h +52 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_lut.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h +46 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h +50 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h +34 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +1358 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/compatibility.h +122 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/optimized/neon_check.h +20 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +623 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +292 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +561 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +88 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +275 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +101 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +91 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +97 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +271 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +289 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +175 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +319 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +247 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +323 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +250 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +241 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +291 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +126 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +121 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +18 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +194 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +264 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +224 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +90 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +256 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +132 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +422 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +169 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +303 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +333 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +244 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +111 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +491 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +102 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +51 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +151 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +233 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +109 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +147 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +465 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +129 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +203 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +225 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +168 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +278 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +1096 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +341 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +104 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +63 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +144 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/compatibility.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/input_data.h +108 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/network_tester/network_model.h +166 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/detection_responder.h +32 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/image_provider.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/main_functions.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/person_detection/model_settings.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +70 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +57 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +141 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +310 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +145 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_common.h +24 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h +613 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/mcps_macros.h +115 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +1286 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h +22 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +117 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +94 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +80 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h +25 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +112 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +86 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +43 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +35 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +541 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +817 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +150 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +158 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +56 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +142 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +65 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +67 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +100 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +37 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +579 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +139 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +216 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +48 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +74 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +78 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +76 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +47 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +44 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +58 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +39 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +64 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +170 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +53 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +73 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +95 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +133 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +138 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +351 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +28 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +176 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +79 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +189 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +42 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +708 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +62 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +140 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +38 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +89 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +162 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +60 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +30 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +33 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +125 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +69 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +27 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +49 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +334 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +267 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/test_conv_model.h +23 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +45 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +36 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +75 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +24644 -0
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +33 -0
- xmos_ai_tools/runtime/include/tile_ram_server.h +38 -0
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xformer/__init__.py +60 -0
- xmos_ai_tools/xformer/flash.py +190 -0
- xmos_ai_tools/xinterpreters/__init__.py +1 -0
- xmos_ai_tools/xinterpreters/exceptions.py +38 -0
- xmos_ai_tools/xinterpreters/host_interpreter.py +652 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- xmos_ai_tools-1.3.2.dev80.data/data/bin/xcore-opt +0 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/METADATA +33 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/RECORD +395 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/WHEEL +5 -0
- xmos_ai_tools-1.3.2.dev80.dist-info/top_level.txt +1 -0
@@ -0,0 +1,38 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
|
17
|
+
|
18
|
+
#include <cmath>
|
19
|
+
|
20
|
+
#include "ruy/profiler/instrumentation.h" // from @ruy
|
21
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
22
|
+
|
23
|
+
namespace tflite_micro {
|
24
|
+
namespace reference_ops {
|
25
|
+
|
26
|
+
template <typename T>
|
27
|
+
inline void Exp(const T* input_data, const size_t num_elements,
|
28
|
+
T* output_data) {
|
29
|
+
ruy::profiler::ScopeLabel label("Exp");
|
30
|
+
for (size_t idx = 0; idx < num_elements; ++idx) {
|
31
|
+
output_data[idx] = std::exp(input_data[idx]);
|
32
|
+
}
|
33
|
+
}
|
34
|
+
|
35
|
+
} // namespace reference_ops
|
36
|
+
} // namespace tflite_micro
|
37
|
+
|
38
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
|
@@ -0,0 +1,38 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
|
17
|
+
|
18
|
+
#include <cmath>
|
19
|
+
|
20
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
21
|
+
|
22
|
+
namespace tflite_micro {
|
23
|
+
namespace reference_ops {
|
24
|
+
|
25
|
+
template <typename T>
|
26
|
+
void Fill(const RuntimeShape& value_shape, const T* value_data,
|
27
|
+
const RuntimeShape& output_shape, T* output_data) {
|
28
|
+
TFLITE_DCHECK_EQ(value_shape.DimensionsCount(), 0);
|
29
|
+
const int flat_size = output_shape.FlatSize();
|
30
|
+
for (int i = 0; i < flat_size; ++i) {
|
31
|
+
output_data[i] = *value_data;
|
32
|
+
}
|
33
|
+
}
|
34
|
+
|
35
|
+
} // namespace reference_ops
|
36
|
+
} // namespace tflite_micro
|
37
|
+
|
38
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
|
@@ -0,0 +1,39 @@
|
|
1
|
+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
|
17
|
+
|
18
|
+
#include <cmath>
|
19
|
+
|
20
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
21
|
+
|
22
|
+
namespace tflite_micro {
|
23
|
+
|
24
|
+
namespace reference_ops {
|
25
|
+
|
26
|
+
inline void Floor(const RuntimeShape& input_shape, const float* input_data,
|
27
|
+
const RuntimeShape& output_shape, float* output_data) {
|
28
|
+
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
29
|
+
|
30
|
+
for (int i = 0; i < flat_size; i++) {
|
31
|
+
int offset = i;
|
32
|
+
output_data[offset] = std::floor(input_data[offset]);
|
33
|
+
}
|
34
|
+
}
|
35
|
+
|
36
|
+
} // namespace reference_ops
|
37
|
+
} // namespace tflite_micro
|
38
|
+
|
39
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
|
@@ -0,0 +1,35 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
|
17
|
+
|
18
|
+
#include <cmath>
|
19
|
+
#include <functional>
|
20
|
+
|
21
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
22
|
+
|
23
|
+
namespace tflite_micro {
|
24
|
+
namespace reference_ops {
|
25
|
+
|
26
|
+
template <typename T>
|
27
|
+
T FloorDiv(T input1, T input2) {
|
28
|
+
return std::floor(std::divides<double>()(static_cast<double>(input1),
|
29
|
+
static_cast<double>(input2)));
|
30
|
+
}
|
31
|
+
|
32
|
+
} // namespace reference_ops
|
33
|
+
} // namespace tflite_micro
|
34
|
+
|
35
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
|
@@ -0,0 +1,44 @@
|
|
1
|
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
|
17
|
+
|
18
|
+
#include <cmath>
|
19
|
+
#include <functional>
|
20
|
+
|
21
|
+
namespace tflite_micro {
|
22
|
+
|
23
|
+
namespace reference_ops {
|
24
|
+
|
25
|
+
template <typename T>
|
26
|
+
T FloorMod(T input1, T input2) {
|
27
|
+
struct FloatMod {
|
28
|
+
float operator()(const float lhs, const float rhs) const {
|
29
|
+
return std::fmod(lhs, rhs);
|
30
|
+
}
|
31
|
+
};
|
32
|
+
using ModFunc = typename std::conditional<std::is_integral<T>::value,
|
33
|
+
std::modulus<T>, FloatMod>::type;
|
34
|
+
ModFunc mod_func;
|
35
|
+
T trunc_mod = mod_func(input1, input2);
|
36
|
+
return (trunc_mod != 0) && ((input2 < 0) != (trunc_mod < 0))
|
37
|
+
? (trunc_mod + input2)
|
38
|
+
: trunc_mod;
|
39
|
+
}
|
40
|
+
|
41
|
+
} // namespace reference_ops
|
42
|
+
} // namespace tflite_micro
|
43
|
+
|
44
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
|
@@ -0,0 +1,323 @@
|
|
1
|
+
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
14
|
+
==============================================================================*/
|
15
|
+
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
|
16
|
+
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
|
17
|
+
|
18
|
+
#include <algorithm>
|
19
|
+
|
20
|
+
#include "ruy/profiler/instrumentation.h" // from @ruy
|
21
|
+
#include "tensorflow/lite/kernels/internal/common.h"
|
22
|
+
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
23
|
+
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
24
|
+
#include "tensorflow/lite/kernels/internal/types.h"
|
25
|
+
|
26
|
+
namespace tflite_micro {
|
27
|
+
namespace reference_ops {
|
28
|
+
|
29
|
+
inline void FullyConnected(
|
30
|
+
const FullyConnectedParams& params, const RuntimeShape& input_shape,
|
31
|
+
const float* input_data, const RuntimeShape& weights_shape,
|
32
|
+
const float* weights_data, const RuntimeShape& bias_shape,
|
33
|
+
const float* bias_data, const RuntimeShape& output_shape,
|
34
|
+
float* output_data) {
|
35
|
+
const float output_activation_min = params.float_activation_min;
|
36
|
+
const float output_activation_max = params.float_activation_max;
|
37
|
+
// TODO(b/62193649): This really should be:
|
38
|
+
// const int batches = ArraySize(output_dims, 1);
|
39
|
+
// but the current --variable_batch hack consists in overwriting the 3rd
|
40
|
+
// dimension with the runtime batch size, as we don't keep track for each
|
41
|
+
// array of which dimension is the batch dimension in it.
|
42
|
+
const int output_dims_count = output_shape.DimensionsCount();
|
43
|
+
const int weights_dims_count = weights_shape.DimensionsCount();
|
44
|
+
const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1);
|
45
|
+
const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2,
|
46
|
+
output_shape, output_dims_count - 1);
|
47
|
+
const int accum_depth = weights_shape.Dims(weights_dims_count - 1);
|
48
|
+
for (int b = 0; b < batches; ++b) {
|
49
|
+
for (int out_c = 0; out_c < output_depth; ++out_c) {
|
50
|
+
float total = 0.f;
|
51
|
+
for (int d = 0; d < accum_depth; ++d) {
|
52
|
+
total += input_data[b * accum_depth + d] *
|
53
|
+
weights_data[out_c * accum_depth + d];
|
54
|
+
}
|
55
|
+
float bias_value = 0.0f;
|
56
|
+
if (bias_data) {
|
57
|
+
bias_value = bias_data[out_c];
|
58
|
+
}
|
59
|
+
output_data[out_c + output_depth * b] = ActivationFunctionWithMinMax(
|
60
|
+
total + bias_value, output_activation_min, output_activation_max);
|
61
|
+
}
|
62
|
+
}
|
63
|
+
}
|
64
|
+
|
65
|
+
inline void FullyConnected(
|
66
|
+
const FullyConnectedParams& params, const RuntimeShape& input_shape,
|
67
|
+
const uint8_t* input_data, const RuntimeShape& filter_shape,
|
68
|
+
const uint8_t* filter_data, const RuntimeShape& bias_shape,
|
69
|
+
const int32_t* bias_data, const RuntimeShape& output_shape,
|
70
|
+
uint8_t* output_data) {
|
71
|
+
const int32_t input_offset = params.input_offset;
|
72
|
+
const int32_t filter_offset = params.weights_offset;
|
73
|
+
const int32_t output_offset = params.output_offset;
|
74
|
+
const int32_t output_multiplier = params.output_multiplier;
|
75
|
+
const int output_shift = params.output_shift;
|
76
|
+
const int32_t output_activation_min = params.quantized_activation_min;
|
77
|
+
const int32_t output_activation_max = params.quantized_activation_max;
|
78
|
+
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
|
79
|
+
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
|
80
|
+
|
81
|
+
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
82
|
+
// TODO(b/62193649): This really should be:
|
83
|
+
// const int batches = ArraySize(output_dims, 1);
|
84
|
+
// but the current --variable_batch hack consists in overwriting the 3rd
|
85
|
+
// dimension with the runtime batch size, as we don't keep track for each
|
86
|
+
// array of which dimension is the batch dimension in it.
|
87
|
+
const int output_dim_count = output_shape.DimensionsCount();
|
88
|
+
const int filter_dim_count = filter_shape.DimensionsCount();
|
89
|
+
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
|
90
|
+
const int output_depth = MatchingDim(filter_shape, filter_dim_count - 2,
|
91
|
+
output_shape, output_dim_count - 1);
|
92
|
+
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
|
93
|
+
for (int b = 0; b < batches; ++b) {
|
94
|
+
for (int out_c = 0; out_c < output_depth; ++out_c) {
|
95
|
+
int32_t acc = 0;
|
96
|
+
for (int d = 0; d < accum_depth; ++d) {
|
97
|
+
int32_t input_val = input_data[b * accum_depth + d];
|
98
|
+
int32_t filter_val = filter_data[out_c * accum_depth + d];
|
99
|
+
acc += (filter_val + filter_offset) * (input_val + input_offset);
|
100
|
+
}
|
101
|
+
if (bias_data) {
|
102
|
+
acc += bias_data[out_c];
|
103
|
+
}
|
104
|
+
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
|
105
|
+
acc += output_offset;
|
106
|
+
acc = std::max(acc, output_activation_min);
|
107
|
+
acc = std::min(acc, output_activation_max);
|
108
|
+
output_data[out_c + output_depth * b] = static_cast<uint8_t>(acc);
|
109
|
+
}
|
110
|
+
}
|
111
|
+
}
|
112
|
+
|
113
|
+
inline void FullyConnected(
|
114
|
+
const FullyConnectedParams& params, const RuntimeShape& input_shape,
|
115
|
+
const uint8_t* input_data, const RuntimeShape& filter_shape,
|
116
|
+
const uint8_t* filter_data, const RuntimeShape& bias_shape,
|
117
|
+
const int32_t* bias_data, const RuntimeShape& output_shape,
|
118
|
+
int16_t* output_data) {
|
119
|
+
const int32_t input_offset = params.input_offset;
|
120
|
+
const int32_t filter_offset = params.weights_offset;
|
121
|
+
const int32_t output_offset = params.output_offset;
|
122
|
+
const int32_t output_multiplier = params.output_multiplier;
|
123
|
+
const int output_shift = params.output_shift;
|
124
|
+
const int32_t output_activation_min = params.quantized_activation_min;
|
125
|
+
const int32_t output_activation_max = params.quantized_activation_max;
|
126
|
+
|
127
|
+
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
128
|
+
TFLITE_DCHECK_EQ(output_offset, 0);
|
129
|
+
// TODO(b/62193649): This really should be:
|
130
|
+
// const int batches = ArraySize(output_dims, 1);
|
131
|
+
// but the current --variable_batch hack consists in overwriting the 3rd
|
132
|
+
// dimension with the runtime batch size, as we don't keep track for each
|
133
|
+
// array of which dimension is the batch dimension in it.
|
134
|
+
const int output_dim_count = output_shape.DimensionsCount();
|
135
|
+
const int filter_dim_count = filter_shape.DimensionsCount();
|
136
|
+
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
|
137
|
+
const int output_depth = MatchingDim(filter_shape, filter_dim_count - 2,
|
138
|
+
output_shape, output_dim_count - 1);
|
139
|
+
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
|
140
|
+
for (int b = 0; b < batches; ++b) {
|
141
|
+
for (int out_c = 0; out_c < output_depth; ++out_c) {
|
142
|
+
// Internal accumulation.
|
143
|
+
// Initialize accumulator with the bias-value.
|
144
|
+
int32_t accum = bias_data[out_c];
|
145
|
+
// Accumulation loop.
|
146
|
+
for (int d = 0; d < accum_depth; ++d) {
|
147
|
+
int16_t input_val = input_data[b * accum_depth + d] + input_offset;
|
148
|
+
int16_t filter_val =
|
149
|
+
filter_data[out_c * accum_depth + d] + filter_offset;
|
150
|
+
accum += filter_val * input_val;
|
151
|
+
}
|
152
|
+
// Down-scale the final int32_t accumulator to the scale used by our
|
153
|
+
// (16-bit, typically 3 integer bits) fixed-point format. The quantized
|
154
|
+
// multiplier and shift here have been pre-computed offline
|
155
|
+
// (e.g. by toco).
|
156
|
+
accum =
|
157
|
+
MultiplyByQuantizedMultiplier(accum, output_multiplier, output_shift);
|
158
|
+
// Saturate, cast to int16_t, and store to output array.
|
159
|
+
accum = std::max(accum, output_activation_min - output_offset);
|
160
|
+
accum = std::min(accum, output_activation_max - output_offset);
|
161
|
+
accum += output_offset;
|
162
|
+
output_data[out_c + output_depth * b] = accum;
|
163
|
+
}
|
164
|
+
}
|
165
|
+
}
|
166
|
+
|
167
|
+
inline void ShuffledFullyConnected(
|
168
|
+
const FullyConnectedParams& params, const RuntimeShape& input_shape,
|
169
|
+
const uint8_t* input_data, const RuntimeShape& weights_shape,
|
170
|
+
const uint8_t* shuffled_weights_data, const RuntimeShape& bias_shape,
|
171
|
+
const int32_t* bias_data, const RuntimeShape& output_shape,
|
172
|
+
int16_t* output_data, uint8_t* shuffled_input_workspace_data) {
|
173
|
+
const int32_t output_multiplier = params.output_multiplier;
|
174
|
+
const int output_shift = params.output_shift;
|
175
|
+
const int32_t output_activation_min = params.quantized_activation_min;
|
176
|
+
const int32_t output_activation_max = params.quantized_activation_max;
|
177
|
+
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
178
|
+
|
179
|
+
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
|
180
|
+
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
|
181
|
+
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
|
182
|
+
// TODO(b/62193649): This really should be:
|
183
|
+
// const int batches = ArraySize(output_dims, 1);
|
184
|
+
// but the current --variable_batch hack consists in overwriting the 3rd
|
185
|
+
// dimension with the runtime batch size, as we don't keep track for each
|
186
|
+
// array of which dimension is the batch dimension in it.
|
187
|
+
const int output_dim_count = output_shape.DimensionsCount();
|
188
|
+
const int weights_dim_count = weights_shape.DimensionsCount();
|
189
|
+
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
|
190
|
+
const int output_depth = MatchingDim(weights_shape, weights_dim_count - 2,
|
191
|
+
output_shape, output_dim_count - 1);
|
192
|
+
const int accum_depth = weights_shape.Dims(weights_dim_count - 1);
|
193
|
+
TFLITE_DCHECK((accum_depth % 16) == 0);
|
194
|
+
TFLITE_DCHECK((output_depth % 4) == 0);
|
195
|
+
|
196
|
+
// Shuffling and xoring of input activations into the workspace buffer
|
197
|
+
uint8_t* shuffled_input_workspace_ptr = shuffled_input_workspace_data;
|
198
|
+
if (batches == 1) {
|
199
|
+
for (int i = 0; i < accum_depth; i++) {
|
200
|
+
shuffled_input_workspace_data[i] = input_data[i] ^ 0x80;
|
201
|
+
}
|
202
|
+
} else if (batches == 4) {
|
203
|
+
for (int c = 0; c < accum_depth; c += 16) {
|
204
|
+
for (int b = 0; b < 4; b++) {
|
205
|
+
const uint8_t* src_data_ptr = input_data + b * accum_depth + c;
|
206
|
+
for (int j = 0; j < 16; j++) {
|
207
|
+
uint8_t src_val = *src_data_ptr++;
|
208
|
+
// Flip the sign bit, so that the kernel will only need to
|
209
|
+
// reinterpret these uint8_t values as int8_t, getting for free the
|
210
|
+
// subtraction of the zero_point value 128.
|
211
|
+
uint8_t dst_val = src_val ^ 0x80;
|
212
|
+
*shuffled_input_workspace_ptr++ = dst_val;
|
213
|
+
}
|
214
|
+
}
|
215
|
+
}
|
216
|
+
} else {
|
217
|
+
TFLITE_DCHECK(false);
|
218
|
+
return;
|
219
|
+
}
|
220
|
+
|
221
|
+
// Actual computation
|
222
|
+
if (batches == 1) {
|
223
|
+
int16_t* output_ptr = output_data;
|
224
|
+
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
|
225
|
+
// so that just reinterpreting them as int8_t values is equivalent to
|
226
|
+
// subtracting 128 from them, thus implementing for free the subtraction of
|
227
|
+
// the zero_point value 128.
|
228
|
+
const int8_t* shuffled_weights_ptr =
|
229
|
+
reinterpret_cast<const int8_t*>(shuffled_weights_data);
|
230
|
+
// Likewise, we preshuffled and pre-xored the input data above.
|
231
|
+
const int8_t* shuffled_input_data =
|
232
|
+
reinterpret_cast<const int8_t*>(shuffled_input_workspace_data);
|
233
|
+
for (int c = 0; c < output_depth; c += 4) {
|
234
|
+
// Internal accumulation.
|
235
|
+
// Initialize accumulator with the bias-value.
|
236
|
+
int32_t accum[4] = {0};
|
237
|
+
// Accumulation loop.
|
238
|
+
for (int d = 0; d < accum_depth; d += 16) {
|
239
|
+
for (int i = 0; i < 4; i++) {
|
240
|
+
for (int j = 0; j < 16; j++) {
|
241
|
+
int8_t input_val = shuffled_input_data[d + j];
|
242
|
+
int8_t weights_val = *shuffled_weights_ptr++;
|
243
|
+
accum[i] += weights_val * input_val;
|
244
|
+
}
|
245
|
+
}
|
246
|
+
}
|
247
|
+
for (int i = 0; i < 4; i++) {
|
248
|
+
// Add bias value
|
249
|
+
int32_t acc = accum[i] + bias_data[c + i];
|
250
|
+
// Down-scale the final int32_t accumulator to the scale used by our
|
251
|
+
// (16-bit, typically 3 integer bits) fixed-point format. The quantized
|
252
|
+
// multiplier and shift here have been pre-computed offline
|
253
|
+
// (e.g. by toco).
|
254
|
+
acc =
|
255
|
+
MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
|
256
|
+
// Saturate, cast to int16_t, and store to output array.
|
257
|
+
acc = std::max(acc, output_activation_min);
|
258
|
+
acc = std::min(acc, output_activation_max);
|
259
|
+
output_ptr[c + i] = acc;
|
260
|
+
}
|
261
|
+
}
|
262
|
+
} else if (batches == 4) {
|
263
|
+
int16_t* output_ptr = output_data;
|
264
|
+
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
|
265
|
+
// so that just reinterpreting them as int8_t values is equivalent to
|
266
|
+
// subtracting 128 from them, thus implementing for free the subtraction of
|
267
|
+
// the zero_point value 128.
|
268
|
+
const int8_t* shuffled_weights_ptr =
|
269
|
+
reinterpret_cast<const int8_t*>(shuffled_weights_data);
|
270
|
+
// Likewise, we preshuffled and pre-xored the input data above.
|
271
|
+
const int8_t* shuffled_input_data =
|
272
|
+
reinterpret_cast<const int8_t*>(shuffled_input_workspace_data);
|
273
|
+
for (int c = 0; c < output_depth; c += 4) {
|
274
|
+
const int8_t* shuffled_input_ptr = shuffled_input_data;
|
275
|
+
// Accumulation loop.
|
276
|
+
// Internal accumulation.
|
277
|
+
// Initialize accumulator with the bias-value.
|
278
|
+
int32_t accum[4][4];
|
279
|
+
for (int i = 0; i < 4; i++) {
|
280
|
+
for (int b = 0; b < 4; b++) {
|
281
|
+
accum[i][b] = 0;
|
282
|
+
}
|
283
|
+
}
|
284
|
+
for (int d = 0; d < accum_depth; d += 16) {
|
285
|
+
for (int i = 0; i < 4; i++) {
|
286
|
+
for (int b = 0; b < 4; b++) {
|
287
|
+
for (int j = 0; j < 16; j++) {
|
288
|
+
int8_t input_val = shuffled_input_ptr[16 * b + j];
|
289
|
+
int8_t weights_val = shuffled_weights_ptr[16 * i + j];
|
290
|
+
accum[i][b] += weights_val * input_val;
|
291
|
+
}
|
292
|
+
}
|
293
|
+
}
|
294
|
+
shuffled_input_ptr += 64;
|
295
|
+
shuffled_weights_ptr += 64;
|
296
|
+
}
|
297
|
+
for (int i = 0; i < 4; i++) {
|
298
|
+
for (int b = 0; b < 4; b++) {
|
299
|
+
// Add bias value
|
300
|
+
int32_t acc = accum[i][b] + bias_data[c + i];
|
301
|
+
// Down-scale the final int32_t accumulator to the scale used by our
|
302
|
+
// (16-bit, typically 3 integer bits) fixed-point format. The
|
303
|
+
// quantized multiplier and shift here have been pre-computed offline
|
304
|
+
// (e.g. by toco).
|
305
|
+
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier,
|
306
|
+
output_shift);
|
307
|
+
// Saturate, cast to int16_t, and store to output array.
|
308
|
+
acc = std::max(acc, output_activation_min);
|
309
|
+
acc = std::min(acc, output_activation_max);
|
310
|
+
output_ptr[b * output_depth + c + i] = acc;
|
311
|
+
}
|
312
|
+
}
|
313
|
+
}
|
314
|
+
} else {
|
315
|
+
TFLITE_DCHECK(false);
|
316
|
+
return;
|
317
|
+
}
|
318
|
+
}
|
319
|
+
|
320
|
+
} // namespace reference_ops
|
321
|
+
} // namespace tflite_micro
|
322
|
+
|
323
|
+
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
|