xmos-ai-tools 1.3.2.dev19__py3-none-macosx_11_0_arm64.whl → 1.3.2.dev37__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +9 -9
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +2 -2
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +2 -2
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +3 -3
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +8 -8
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +3 -3
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +4 -4
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +5 -5
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +17 -17
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +1 -1
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +1 -1
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +5 -5
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +8 -8
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +5 -5
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +9 -9
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +7 -7
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +48 -48
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +57 -57
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +15 -15
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +7 -7
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +50 -50
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +2731 -2731
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +2 -2
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- {xmos_ai_tools-1.3.2.dev19.data → xmos_ai_tools-1.3.2.dev37.data}/data/bin/xcore-opt +0 -0
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/METADATA +2 -2
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/RECORD +206 -206
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/WHEEL +0 -0
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/top_level.txt +0 -0
@@ -24,7 +24,7 @@ limitations under the License.
|
|
24
24
|
#include "tensorflow/lite/micro/test_helpers.h"
|
25
25
|
#include "tensorflow/lite/micro/testing/micro_test.h"
|
26
26
|
|
27
|
-
namespace
|
27
|
+
namespace tflite_micro {
|
28
28
|
namespace testing {
|
29
29
|
|
30
30
|
/*Helper Functions (mainly about mimicking the kernel preparation)*/
|
@@ -35,7 +35,7 @@ namespace testing {
|
|
35
35
|
// (put into stack memory) CalculateOpDataFullyConnected in
|
36
36
|
// tensorflow/lite/micro/kernels/fully_connected_common.cc
|
37
37
|
template <typename CellType>
|
38
|
-
|
38
|
+
tflite_micro::FullyConnectedParams CreateFCParams(
|
39
39
|
const TensorQuantizationParameters& input_quant_params,
|
40
40
|
const TensorQuantizationParameters& weight_quant_params,
|
41
41
|
const float nonlinear_activation_input_scale) {
|
@@ -57,10 +57,10 @@ tflite::FullyConnectedParams CreateFCParams(
|
|
57
57
|
data.output_activation_min = std::numeric_limits<CellType>::min();
|
58
58
|
data.output_activation_max = std::numeric_limits<CellType>::max();
|
59
59
|
|
60
|
-
return
|
60
|
+
return tflite_micro::FullyConnectedParamsQuantized(data);
|
61
61
|
}
|
62
62
|
|
63
|
-
inline
|
63
|
+
inline tflite_micro::FullyConnectedParams CreateFCParamsFloat() {
|
64
64
|
FullyConnectedParams op_params;
|
65
65
|
CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min,
|
66
66
|
&op_params.float_activation_max);
|
@@ -69,12 +69,12 @@ inline tflite::FullyConnectedParams CreateFCParamsFloat() {
|
|
69
69
|
|
70
70
|
// Wrapper function to create gate parameters for the four internal LSTM gates
|
71
71
|
template <typename CellType>
|
72
|
-
|
72
|
+
tflite_micro::GateParameters CreateGateParams(
|
73
73
|
const TensorQuantizationParameters& input_quant_params,
|
74
74
|
const TensorQuantizationParameters& hidden_state_quant_params,
|
75
75
|
const GateQuantizationParameters& gate_quantization_settings,
|
76
76
|
const float nonlinear_activation_input_scale) {
|
77
|
-
|
77
|
+
tflite_micro::GateParameters gate_params = {};
|
78
78
|
gate_params.input_fc_params = CreateFCParams<CellType>(
|
79
79
|
input_quant_params, gate_quantization_settings.activation_weight,
|
80
80
|
nonlinear_activation_input_scale);
|
@@ -84,8 +84,8 @@ tflite::GateParameters CreateGateParams(
|
|
84
84
|
return gate_params;
|
85
85
|
}
|
86
86
|
|
87
|
-
inline
|
88
|
-
|
87
|
+
inline tflite_micro::GateParameters CreateGateParamsFloat() {
|
88
|
+
tflite_micro::GateParameters gate_params = {};
|
89
89
|
gate_params.input_fc_params = CreateFCParamsFloat();
|
90
90
|
gate_params.recurrent_fc_params = CreateFCParamsFloat();
|
91
91
|
return gate_params;
|
@@ -97,11 +97,11 @@ inline tflite::GateParameters CreateGateParamsFloat() {
|
|
97
97
|
// output is the updated hidden state, which is asymmetrically quantized. Thus
|
98
98
|
// output may require zero point
|
99
99
|
template <typename OutputType>
|
100
|
-
|
100
|
+
tflite_micro::ArithmeticParams CreateInterGateMulParams(const float input1_scale,
|
101
101
|
const float input2_scale,
|
102
102
|
const float output_scale,
|
103
103
|
const int output_zp = 0) {
|
104
|
-
|
104
|
+
tflite_micro::ArithmeticParams op_params = {};
|
105
105
|
op_params.quantized_activation_min = std::numeric_limits<OutputType>::min();
|
106
106
|
op_params.quantized_activation_max = std::numeric_limits<OutputType>::max();
|
107
107
|
op_params.input1_offset = 0;
|
@@ -118,8 +118,8 @@ tflite::ArithmeticParams CreateInterGateMulParams(const float input1_scale,
|
|
118
118
|
return op_params;
|
119
119
|
}
|
120
120
|
|
121
|
-
inline
|
122
|
-
|
121
|
+
inline tflite_micro::ArithmeticParams CreateInterGateMulParamsFloat() {
|
122
|
+
tflite_micro::ArithmeticParams op_params = {};
|
123
123
|
CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min,
|
124
124
|
&op_params.float_activation_max);
|
125
125
|
return op_params;
|
@@ -133,7 +133,7 @@ CellStateInfo CreateLstmCellStateInfo(const float cell_state_scale,
|
|
133
133
|
CellStateInfo cell_state_info;
|
134
134
|
// cell_state_scale_power: 2^-cell_state_scale_power = cell state scale
|
135
135
|
int buffer;
|
136
|
-
|
136
|
+
tflite_micro::CheckedLog2(cell_state_scale, &buffer);
|
137
137
|
cell_state_info.cell_state_scale_power = buffer;
|
138
138
|
// Cell state specifics
|
139
139
|
cell_state_info.cell_clip = cell_clip;
|
@@ -344,16 +344,16 @@ void TestCalculateLstmGateFloat(const TfLiteEvalTensor* input,
|
|
344
344
|
float gate_output[batch_size * state_dimension] = {};
|
345
345
|
float fc_output_buffer[batch_size * state_dimension] = {};
|
346
346
|
|
347
|
-
|
347
|
+
tflite_micro::GateParameters gate_params = CreateGateParamsFloat();
|
348
348
|
|
349
349
|
// Create step information: only one time step, no need to update
|
350
|
-
auto size_info =
|
350
|
+
auto size_info = tflite_micro::testing::CreateLstmSizeInfo(
|
351
351
|
/*time_major*/ false, input->dims, recurrent->dims);
|
352
352
|
// revise time_major = true to enable batch inference
|
353
353
|
size_info.time_major = true;
|
354
|
-
|
354
|
+
tflite_micro::lstm_internal::LstmStepManager step_info(&size_info);
|
355
355
|
|
356
|
-
|
356
|
+
tflite_micro::lstm_internal::CalculateLstmGate<float, float, float, float>(
|
357
357
|
step_info, gate_params,
|
358
358
|
// Input FC
|
359
359
|
input, input_weight, input_bias,
|
@@ -385,20 +385,20 @@ void TestCalculateLstmGateInteger(
|
|
385
385
|
CellType gate_output[batch_size * state_dimension] = {};
|
386
386
|
CellType fc_output_buffer[batch_size * state_dimension] = {};
|
387
387
|
|
388
|
-
|
388
|
+
tflite_micro::GateParameters gate_params = CreateGateParams<CellType>(
|
389
389
|
node_quantization_settings.input, node_quantization_settings.hidden_state,
|
390
390
|
gate_quantization_settings,
|
391
391
|
node_quantization_settings.nonlinear_activation_input_scale);
|
392
392
|
|
393
393
|
// Create step information: only one time step, no need to update
|
394
|
-
auto size_info =
|
394
|
+
auto size_info = tflite_micro::testing::CreateLstmSizeInfo(
|
395
395
|
/*time_major*/ false, input->dims, recurrent->dims);
|
396
396
|
// revise time_major = true to enable batch inference
|
397
397
|
size_info.time_major = true;
|
398
|
-
|
398
|
+
tflite_micro::lstm_internal::LstmStepManager step_info(&size_info);
|
399
399
|
|
400
400
|
// only int8 weight is supported now
|
401
|
-
|
401
|
+
tflite_micro::lstm_internal::CalculateLstmGate<ActivationType, WeightType, CellType,
|
402
402
|
BiasType>(
|
403
403
|
step_info, gate_params,
|
404
404
|
// Input FC
|
@@ -434,13 +434,13 @@ void TestUpdateLstmCellFloat(
|
|
434
434
|
|
435
435
|
auto cell_state = node_content.CellStateEvalTensor();
|
436
436
|
// Create step information: only one time step, no need to update
|
437
|
-
auto size_info =
|
437
|
+
auto size_info = tflite_micro::testing::CreateLstmSizeInfo(
|
438
438
|
/*time_major*/ false,
|
439
|
-
node_content.GetEvalTensor(
|
439
|
+
node_content.GetEvalTensor(tflite_micro::kLstmInputTensor)->dims,
|
440
440
|
node_content.HiddenStateEvalTensor()->dims);
|
441
441
|
// revise time_major = true to enable batch inference
|
442
442
|
size_info.time_major = true;
|
443
|
-
|
443
|
+
tflite_micro::lstm_internal::LstmStepManager step_info(&size_info);
|
444
444
|
|
445
445
|
// copy the data since it will be updated
|
446
446
|
float forget_gate[batch_size * state_dimension] = {};
|
@@ -450,14 +450,14 @@ void TestUpdateLstmCellFloat(
|
|
450
450
|
CellStateInfo cell_state_info;
|
451
451
|
cell_state_info.cell_clip = node_content.BuiltinData().cell_clip;
|
452
452
|
// Call the function to be tested
|
453
|
-
|
453
|
+
tflite_micro::lstm_internal::UpdateLstmCell<float>(
|
454
454
|
step_info, cell_state, forget_gate,
|
455
455
|
gate_output_data.expected_input_gate_output,
|
456
456
|
gate_output_data.expected_cell_gate_output, forget_cell_mul_params,
|
457
457
|
input_mul_params, cell_state_info, buffer);
|
458
458
|
|
459
459
|
ValidateResultGoldens(gate_output_data.expected_updated_cell,
|
460
|
-
|
460
|
+
tflite_micro::micro::GetTensorData<float>(cell_state),
|
461
461
|
batch_size * state_dimension, tolerance);
|
462
462
|
}
|
463
463
|
|
@@ -472,17 +472,17 @@ void TestUpdateLstmCellInteger(
|
|
472
472
|
const float tolerance) {
|
473
473
|
const auto& quantization_settings = node_content.QuantizationSettings();
|
474
474
|
CellType quantized_forget_gate[batch_size * state_dimension] = {};
|
475
|
-
|
475
|
+
tflite_micro::Quantize(gate_output_data.expected_forget_gate_output,
|
476
476
|
quantized_forget_gate, batch_size * state_dimension,
|
477
477
|
quantization_settings.nonlinear_activation_output_scale, 0);
|
478
478
|
|
479
479
|
CellType quantized_input_gate[batch_size * state_dimension] = {};
|
480
|
-
|
480
|
+
tflite_micro::Quantize(gate_output_data.expected_input_gate_output,
|
481
481
|
quantized_input_gate, batch_size * state_dimension,
|
482
482
|
quantization_settings.nonlinear_activation_output_scale, 0);
|
483
483
|
|
484
484
|
CellType quantized_cell_gate[batch_size * state_dimension] = {};
|
485
|
-
|
485
|
+
tflite_micro::Quantize(gate_output_data.expected_cell_gate_output,
|
486
486
|
quantized_cell_gate, batch_size * state_dimension,
|
487
487
|
quantization_settings.nonlinear_activation_output_scale, 0);
|
488
488
|
|
@@ -503,22 +503,22 @@ void TestUpdateLstmCellInteger(
|
|
503
503
|
|
504
504
|
auto cell_state = node_content.CellStateEvalTensor();
|
505
505
|
// Create step information: only one time step, no need to update
|
506
|
-
auto size_info =
|
506
|
+
auto size_info = tflite_micro::testing::CreateLstmSizeInfo(
|
507
507
|
/*time_major*/ false,
|
508
|
-
node_content.GetEvalTensor(
|
508
|
+
node_content.GetEvalTensor(tflite_micro::kLstmInputTensor)->dims,
|
509
509
|
node_content.HiddenStateEvalTensor()->dims);
|
510
510
|
// revise time_major = true to enable batch inference
|
511
511
|
size_info.time_major = true;
|
512
|
-
|
512
|
+
tflite_micro::lstm_internal::LstmStepManager step_info(&size_info);
|
513
513
|
|
514
514
|
// Call the function to be tested
|
515
|
-
|
515
|
+
tflite_micro::lstm_internal::UpdateLstmCell<CellType>(
|
516
516
|
step_info, cell_state, quantized_forget_gate, quantized_input_gate,
|
517
517
|
quantized_cell_gate, forget_cell_mul_params, input_mul_params,
|
518
518
|
cell_state_info, buffer);
|
519
519
|
|
520
520
|
float cell_state_float[batch_size * state_dimension] = {};
|
521
|
-
Dequantize(
|
521
|
+
Dequantize(tflite_micro::micro::GetTensorData<CellType>(cell_state),
|
522
522
|
batch_size * state_dimension,
|
523
523
|
quantization_settings.cell_state.scale,
|
524
524
|
quantization_settings.cell_state.zero_point, cell_state_float);
|
@@ -543,24 +543,24 @@ void TestUpdateLstmHiddenFloat(
|
|
543
543
|
int32_t cell_state_scale_power = 0;
|
544
544
|
|
545
545
|
// Create step information: only one time step, no need to update
|
546
|
-
auto size_info =
|
546
|
+
auto size_info = tflite_micro::testing::CreateLstmSizeInfo(
|
547
547
|
/*time_major*/ false,
|
548
|
-
node_content.GetEvalTensor(
|
548
|
+
node_content.GetEvalTensor(tflite_micro::kLstmInputTensor)->dims,
|
549
549
|
node_content.HiddenStateEvalTensor()->dims);
|
550
550
|
// revise time_major = true to enable batch inference
|
551
551
|
size_info.time_major = true;
|
552
|
-
|
552
|
+
tflite_micro::lstm_internal::LstmStepManager step_info(&size_info);
|
553
553
|
|
554
554
|
auto cell_state = node_content.CellStateEvalTensor();
|
555
555
|
auto hidden_state = node_content.HiddenStateEvalTensor();
|
556
556
|
|
557
|
-
|
557
|
+
tflite_micro::lstm_internal::UpdateLstmHidden<float, float>(
|
558
558
|
step_info, cell_state, hidden_state,
|
559
559
|
gate_output_data.expected_output_gate_output, mul_params,
|
560
560
|
cell_state_scale_power, buffer);
|
561
561
|
|
562
562
|
ValidateResultGoldens(gate_output_data.expected_updated_hidden,
|
563
|
-
|
563
|
+
tflite_micro::micro::GetTensorData<float>(hidden_state),
|
564
564
|
batch_size * state_dimension, tolerance);
|
565
565
|
}
|
566
566
|
|
@@ -575,7 +575,7 @@ void TestUpdateLstmHiddenInteger(
|
|
575
575
|
const float tolerance) {
|
576
576
|
const auto& quantization_settings = node_content.QuantizationSettings();
|
577
577
|
CellType quantized_output_gate[batch_size * state_dimension] = {};
|
578
|
-
|
578
|
+
tflite_micro::Quantize(gate_output_data.expected_output_gate_output,
|
579
579
|
quantized_output_gate, batch_size * state_dimension,
|
580
580
|
quantization_settings.nonlinear_activation_output_scale, 0);
|
581
581
|
|
@@ -588,28 +588,28 @@ void TestUpdateLstmHiddenInteger(
|
|
588
588
|
quantization_settings.hidden_state.zero_point);
|
589
589
|
|
590
590
|
int cell_state_scale_power_buffer;
|
591
|
-
|
591
|
+
tflite_micro::CheckedLog2(quantization_settings.cell_state.scale,
|
592
592
|
&cell_state_scale_power_buffer);
|
593
593
|
int32_t cell_state_scale_power = cell_state_scale_power_buffer;
|
594
594
|
|
595
595
|
// Create step information: only one time step, no need to update
|
596
|
-
auto size_info =
|
596
|
+
auto size_info = tflite_micro::testing::CreateLstmSizeInfo(
|
597
597
|
/*time_major*/ false,
|
598
|
-
node_content.GetEvalTensor(
|
598
|
+
node_content.GetEvalTensor(tflite_micro::kLstmInputTensor)->dims,
|
599
599
|
node_content.HiddenStateEvalTensor()->dims);
|
600
600
|
// revise time_major = true to enable batch inference
|
601
601
|
size_info.time_major = true;
|
602
|
-
|
602
|
+
tflite_micro::lstm_internal::LstmStepManager step_info(&size_info);
|
603
603
|
|
604
604
|
auto cell_state = node_content.CellStateEvalTensor();
|
605
605
|
auto hidden_state = node_content.HiddenStateEvalTensor();
|
606
606
|
|
607
|
-
|
607
|
+
tflite_micro::lstm_internal::UpdateLstmHidden<CellType, ActivationType>(
|
608
608
|
step_info, cell_state, hidden_state, quantized_output_gate, mul_params,
|
609
609
|
cell_state_scale_power, buffer);
|
610
610
|
|
611
611
|
float hidden_state_float[batch_size * state_dimension] = {};
|
612
|
-
Dequantize(
|
612
|
+
Dequantize(tflite_micro::micro::GetTensorData<ActivationType>(hidden_state),
|
613
613
|
batch_size * state_dimension,
|
614
614
|
quantization_settings.hidden_state.scale,
|
615
615
|
quantization_settings.hidden_state.zero_point, hidden_state_float);
|
@@ -644,17 +644,17 @@ void TestLstmStepFloat(
|
|
644
644
|
OpDataLSTM op_data = CreateLstmOpDataFloat(node_contents);
|
645
645
|
// set time_major to true to test batch inference
|
646
646
|
op_data.size_info.time_major = true;
|
647
|
-
|
648
|
-
|
647
|
+
tflite_micro::lstm_internal::LstmStepManager step_info(&op_data.size_info);
|
648
|
+
tflite_micro::lstm_internal::LstmStep<float, float, float, float>(
|
649
649
|
step_info, op_data, kernel_content, buffers);
|
650
650
|
|
651
651
|
ValidateResultGoldens(
|
652
652
|
gate_output_data.expected_updated_hidden,
|
653
|
-
|
653
|
+
tflite_micro::micro::GetTensorData<float>(kernel_content.HiddenStateTensor()),
|
654
654
|
batch_size * state_dimension, hidden_state_tolerance);
|
655
655
|
ValidateResultGoldens(
|
656
656
|
gate_output_data.expected_updated_cell,
|
657
|
-
|
657
|
+
tflite_micro::micro::GetTensorData<float>(kernel_content.CellStateTensor()),
|
658
658
|
batch_size * state_dimension, cell_state_tolerance);
|
659
659
|
}
|
660
660
|
|
@@ -686,22 +686,22 @@ void TestLstmStepInteger(
|
|
686
686
|
OpDataLSTM op_data = CreateLstmOpData(node_contents);
|
687
687
|
// set time_major to true to test batch inference
|
688
688
|
op_data.size_info.time_major = true;
|
689
|
-
|
690
|
-
|
689
|
+
tflite_micro::lstm_internal::LstmStepManager step_info(&op_data.size_info);
|
690
|
+
tflite_micro::lstm_internal::LstmStep<ActivationType, WeightType, CellType,
|
691
691
|
BiasType>(step_info, op_data, kernel_content,
|
692
692
|
buffers);
|
693
693
|
|
694
694
|
const auto& quantization_settings = node_contents.QuantizationSettings();
|
695
695
|
float dequantized_hidden_state[batch_size * state_dimension] = {};
|
696
696
|
Dequantize(
|
697
|
-
|
697
|
+
tflite_micro::micro::GetTensorData<ActivationType>(
|
698
698
|
kernel_content.HiddenStateTensor()),
|
699
699
|
batch_size * state_dimension, quantization_settings.hidden_state.scale,
|
700
700
|
quantization_settings.hidden_state.zero_point, dequantized_hidden_state);
|
701
701
|
|
702
702
|
float dequantized_cell_state[batch_size * state_dimension] = {};
|
703
703
|
Dequantize(
|
704
|
-
|
704
|
+
tflite_micro::micro::GetTensorData<CellType>(kernel_content.CellStateTensor()),
|
705
705
|
batch_size * state_dimension, quantization_settings.cell_state.scale,
|
706
706
|
quantization_settings.cell_state.zero_point, dequantized_cell_state);
|
707
707
|
|
@@ -737,7 +737,7 @@ void TestEvalLstmFloat(
|
|
737
737
|
|
738
738
|
OpDataLSTM op_data = CreateLstmOpDataFloat(node_contents);
|
739
739
|
|
740
|
-
|
740
|
+
tflite_micro::EvalLstm<float, float, float, float>(op_data, kernel_content,
|
741
741
|
buffers);
|
742
742
|
|
743
743
|
ValidateResultGoldens(eval_check_data.expected_hidden_state,
|
@@ -779,7 +779,7 @@ void TestEvalLstmInteger(
|
|
779
779
|
|
780
780
|
OpDataLSTM op_data = CreateLstmOpData(node_contents);
|
781
781
|
|
782
|
-
|
782
|
+
tflite_micro::EvalLstm<ActivationType, WeightType, CellType, BiasType>(
|
783
783
|
op_data, kernel_content, buffers);
|
784
784
|
|
785
785
|
const auto& quantization_settings = node_contents.QuantizationSettings();
|
@@ -812,6 +812,6 @@ void TestEvalLstmInteger(
|
|
812
812
|
}
|
813
813
|
|
814
814
|
} // namespace testing
|
815
|
-
} // namespace
|
815
|
+
} // namespace tflite_micro
|
816
816
|
|
817
817
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_TEST_H_
|
@@ -18,7 +18,7 @@ limitations under the License.
|
|
18
18
|
#include "tensorflow/lite/c/builtin_op_data.h"
|
19
19
|
#include "tensorflow/lite/kernels/internal/types.h"
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
|
23
23
|
// Input Tensors of size {n_batch, n_input}
|
24
24
|
constexpr int kLstmInputTensor = 0;
|
@@ -146,5 +146,5 @@ struct LSTMBuffers {
|
|
146
146
|
CellType* buffer3;
|
147
147
|
};
|
148
148
|
|
149
|
-
} // namespace
|
149
|
+
} // namespace tflite_micro
|
150
150
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_
|
@@ -27,7 +27,7 @@ limitations under the License.
|
|
27
27
|
// their model requires, using a custom `(Micro)MutableOpResolver`. Selective
|
28
28
|
// registration in turn allows the linker to strip unused kernels.
|
29
29
|
|
30
|
-
namespace
|
30
|
+
namespace tflite_micro {
|
31
31
|
|
32
32
|
// TFLM is incrementally moving towards a flat tflite namespace
|
33
33
|
// (https://abseil.io/tips/130). Any new ops (or cleanup of existing ops should
|
@@ -153,6 +153,6 @@ TFLMRegistration* Register_STACKER();
|
|
153
153
|
TFLMRegistration* Register_WINDOW();
|
154
154
|
} // namespace tflm_signal
|
155
155
|
|
156
|
-
} // namespace
|
156
|
+
} // namespace tflite_micro
|
157
157
|
|
158
158
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_
|
@@ -34,7 +34,7 @@ limitations under the License.
|
|
34
34
|
#define __restrict__ __restrict
|
35
35
|
#endif
|
36
36
|
|
37
|
-
namespace
|
37
|
+
namespace tflite_micro {
|
38
38
|
|
39
39
|
// Not all backends support CpuBackendContext usage, so forward declare to avoid
|
40
40
|
// pulling in its implementation.
|
@@ -51,6 +51,6 @@ void PortableApplyActivationToVector(const float* vector, int v_size,
|
|
51
51
|
TfLiteFusedActivation activation,
|
52
52
|
float* result);
|
53
53
|
|
54
|
-
} // namespace
|
54
|
+
} // namespace tflite_micro
|
55
55
|
|
56
56
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_
|
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "tensorflow/lite/c/builtin_op_data.h"
|
22
22
|
#include "tensorflow/lite/micro/micro_common.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
|
26
26
|
extern const int kMulInput1Tensor;
|
27
27
|
extern const int kMulInput2Tensor;
|
@@ -69,6 +69,6 @@ TFLMRegistration Register_MUL_INT8();
|
|
69
69
|
// Fallback registration
|
70
70
|
inline TFLMRegistration Register_MUL_INT8() { return Register_MUL(); }
|
71
71
|
#endif
|
72
|
-
} // namespace
|
72
|
+
} // namespace tflite_micro
|
73
73
|
|
74
74
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_
|
@@ -18,10 +18,10 @@ limitations under the License.
|
|
18
18
|
|
19
19
|
#include "tensorflow/lite/c/common.h"
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
|
23
23
|
TfLiteStatus PadPrepare(TfLiteContext* context, TfLiteNode* node);
|
24
24
|
|
25
|
-
} // namespace
|
25
|
+
} // namespace tflite_micro
|
26
26
|
|
27
27
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_
|
@@ -29,7 +29,7 @@ limitations under the License.
|
|
29
29
|
#include "tensorflow/lite/micro/kernels/micro_ops.h"
|
30
30
|
#include "tensorflow/lite/micro/micro_log.h"
|
31
31
|
|
32
|
-
namespace
|
32
|
+
namespace tflite_micro {
|
33
33
|
|
34
34
|
extern const int kPoolingInputTensor;
|
35
35
|
extern const int kPoolingOutputTensor;
|
@@ -76,10 +76,10 @@ void AveragePoolingEvalQuantized(TfLiteContext* context, const TfLiteNode* node,
|
|
76
76
|
op_params.quantized_activation_max = data->activation_max;
|
77
77
|
|
78
78
|
reference_integer_ops::AveragePool(op_params,
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
79
|
+
tflite_micro::micro::GetTensorShape(input),
|
80
|
+
tflite_micro::micro::GetTensorData<T>(input),
|
81
|
+
tflite_micro::micro::GetTensorShape(output),
|
82
|
+
tflite_micro::micro::GetTensorData<T>(output));
|
83
83
|
}
|
84
84
|
|
85
85
|
void MaxPoolingEvalFloat(TfLiteContext* context, TfLiteNode* node,
|
@@ -95,7 +95,7 @@ void MaxPoolingEvalQuantized(TfLiteContext* context, TfLiteNode* node,
|
|
95
95
|
TfLiteEvalTensor* output) {
|
96
96
|
TFLITE_DCHECK(input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
|
97
97
|
|
98
|
-
|
98
|
+
tflite_micro::PoolParams op_params;
|
99
99
|
op_params.stride_height = params->stride_height;
|
100
100
|
op_params.stride_width = params->stride_width;
|
101
101
|
op_params.filter_height = params->filter_height;
|
@@ -106,10 +106,10 @@ void MaxPoolingEvalQuantized(TfLiteContext* context, TfLiteNode* node,
|
|
106
106
|
op_params.quantized_activation_max = data->activation_max;
|
107
107
|
|
108
108
|
reference_integer_ops::MaxPool(op_params,
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
109
|
+
tflite_micro::micro::GetTensorShape(input),
|
110
|
+
tflite_micro::micro::GetTensorData<T>(input),
|
111
|
+
tflite_micro::micro::GetTensorShape(output),
|
112
|
+
tflite_micro::micro::GetTensorData<T>(output));
|
113
113
|
}
|
114
114
|
|
115
115
|
#if defined(CMSIS_NN) || defined(XTENSA)
|
@@ -122,21 +122,21 @@ TFLMRegistration Register_AVERAGE_POOL_2D_INT16();
|
|
122
122
|
TFLMRegistration Register_MAX_POOL_2D_INT16();
|
123
123
|
#else
|
124
124
|
inline TFLMRegistration Register_AVERAGE_POOL_2D_INT8() {
|
125
|
-
return
|
125
|
+
return tflite_micro::Register_AVERAGE_POOL_2D();
|
126
126
|
}
|
127
127
|
|
128
128
|
inline TFLMRegistration Register_MAX_POOL_2D_INT8() {
|
129
|
-
return
|
129
|
+
return tflite_micro::Register_MAX_POOL_2D();
|
130
130
|
}
|
131
131
|
|
132
132
|
inline TFLMRegistration Register_AVERAGE_POOL_2D_INT16() {
|
133
|
-
return
|
133
|
+
return tflite_micro::Register_AVERAGE_POOL_2D();
|
134
134
|
}
|
135
135
|
|
136
136
|
inline TFLMRegistration Register_MAX_POOL_2D_INT16() {
|
137
|
-
return
|
137
|
+
return tflite_micro::Register_MAX_POOL_2D();
|
138
138
|
}
|
139
139
|
#endif
|
140
|
-
} // namespace
|
140
|
+
} // namespace tflite_micro
|
141
141
|
|
142
142
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
#include "tensorflow/lite/c/common.h"
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
TfLiteStatus CalculatePreluParams(const TfLiteTensor* input,
|
25
25
|
const TfLiteTensor* alpha,
|
@@ -34,6 +34,6 @@ void BroadcastPrelu4DSlowFloat(const RuntimeShape& unextended_input1_shape,
|
|
34
34
|
|
35
35
|
TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node);
|
36
36
|
|
37
|
-
} // namespace
|
37
|
+
} // namespace tflite_micro
|
38
38
|
|
39
39
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_
|
@@ -18,10 +18,10 @@ limitations under the License.
|
|
18
18
|
#include "tensorflow/lite/c/common.h"
|
19
19
|
#include "tensorflow/lite/kernels/internal/types.h"
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
|
23
23
|
struct OpDataQuantizeReference {
|
24
|
-
|
24
|
+
tflite_micro::QuantizationParams quantization_params;
|
25
25
|
// The scaling factor from input to output (aka the 'real multiplier') can
|
26
26
|
// be represented as a fixed point multiplier plus a left shift.
|
27
27
|
int32_t requantize_output_multiplier;
|
@@ -32,6 +32,6 @@ struct OpDataQuantizeReference {
|
|
32
32
|
|
33
33
|
TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node);
|
34
34
|
TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, TfLiteNode* node);
|
35
|
-
} // namespace
|
35
|
+
} // namespace tflite_micro
|
36
36
|
|
37
37
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_
|
@@ -22,7 +22,7 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/kernels/internal/types.h"
|
23
23
|
#include "tensorflow/lite/micro/micro_common.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
|
27
27
|
extern const int kMaxNumberOfAxis;
|
28
28
|
extern const int kMaxNumberOfReducedAxis;
|
@@ -60,6 +60,6 @@ TFLMRegistration Register_MEAN();
|
|
60
60
|
TFLMRegistration Register_REDUCE_MAX();
|
61
61
|
TFLMRegistration Register_SUM();
|
62
62
|
|
63
|
-
} // namespace
|
63
|
+
} // namespace tflite_micro
|
64
64
|
|
65
65
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_REDUCE_H_
|
@@ -16,11 +16,11 @@ limitations under the License.
|
|
16
16
|
#include "tensorflow/lite/c/builtin_op_data.h"
|
17
17
|
#include "tensorflow/lite/c/common.h"
|
18
18
|
|
19
|
-
namespace
|
19
|
+
namespace tflite_micro {
|
20
20
|
|
21
21
|
constexpr int kReshapeInputTensor = 0;
|
22
22
|
constexpr int kReshapeOutputTensor = 0;
|
23
23
|
|
24
24
|
TfLiteStatus PrepareReshapeReference(TfLiteContext* context, TfLiteNode* node);
|
25
25
|
|
26
|
-
} // namespace
|
26
|
+
} // namespace tflite_micro
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
#include "tensorflow/lite/kernels/internal/types.h"
|
20
20
|
#include "tensorflow/lite/micro/micro_common.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length);
|
25
25
|
|
@@ -62,6 +62,6 @@ inline TFLMRegistration Register_SOFTMAX_INT8() { return Register_SOFTMAX(); }
|
|
62
62
|
inline TFLMRegistration Register_SOFTMAX_INT16() { return Register_SOFTMAX(); }
|
63
63
|
#endif
|
64
64
|
|
65
|
-
} // namespace
|
65
|
+
} // namespace tflite_micro
|
66
66
|
|
67
67
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_
|
@@ -22,7 +22,7 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/c/common.h"
|
23
23
|
#include "tensorflow/lite/micro/micro_common.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
|
27
27
|
constexpr int kStridedSliceInputTensor = 0;
|
28
28
|
constexpr int kStridedSliceBeginTensor = 1;
|
@@ -35,6 +35,6 @@ void* StridedSliceInit(TfLiteContext* context, const char* buffer,
|
|
35
35
|
|
36
36
|
TfLiteStatus StridedSlicePrepare(TfLiteContext* context, TfLiteNode* node);
|
37
37
|
|
38
|
-
} // namespace
|
38
|
+
} // namespace tflite_micro
|
39
39
|
|
40
40
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_STRIDED_SLICE_H_
|
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "tensorflow/lite/c/builtin_op_data.h"
|
22
22
|
#include "tensorflow/lite/c/common.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
|
26
26
|
extern const int kSubInputTensor1;
|
27
27
|
extern const int kSubInputTensor2;
|
@@ -55,6 +55,6 @@ TfLiteStatus CalculateOpDataSub(TfLiteContext* context, TfLiteSubParams* params,
|
|
55
55
|
|
56
56
|
TfLiteStatus SubPrepare(TfLiteContext* context, TfLiteNode* node);
|
57
57
|
|
58
|
-
} // namespace
|
58
|
+
} // namespace tflite_micro
|
59
59
|
|
60
60
|
#endif // TENSORFLOW_LITE_MICRO_KERNELS_SUB_H_
|