xmos-ai-tools 1.3.2.dev19__py3-none-macosx_11_0_arm64.whl → 1.3.2.dev37__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +9 -9
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +2 -2
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +2 -2
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +3 -3
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +8 -8
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +3 -3
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +4 -4
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +5 -5
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +17 -17
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +1 -1
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +1 -1
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +5 -5
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +8 -8
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +5 -5
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +9 -9
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +7 -7
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +48 -48
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +57 -57
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +15 -15
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +7 -7
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +50 -50
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +2731 -2731
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +2 -2
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- {xmos_ai_tools-1.3.2.dev19.data → xmos_ai_tools-1.3.2.dev37.data}/data/bin/xcore-opt +0 -0
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/METADATA +2 -2
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/RECORD +206 -206
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/WHEEL +0 -0
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/top_level.txt +0 -0
@@ -20,10 +20,10 @@ limitations under the License.
|
|
20
20
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/common.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
namespace reference_ops {
|
25
25
|
|
26
|
-
inline void LeakyRelu(const
|
26
|
+
inline void LeakyRelu(const tflite_micro::LeakyReluParams& params,
|
27
27
|
const RuntimeShape& input_shape, const float* input_data,
|
28
28
|
const RuntimeShape& output_shape, float* output_data) {
|
29
29
|
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
@@ -64,6 +64,6 @@ inline void QuantizeLeakyRelu(const LeakyReluParams& params,
|
|
64
64
|
}
|
65
65
|
|
66
66
|
} // namespace reference_ops
|
67
|
-
} // namespace
|
67
|
+
} // namespace tflite_micro
|
68
68
|
|
69
69
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_
|
@@ -22,7 +22,7 @@ limitations under the License.
|
|
22
22
|
#include "fixedpoint/fixedpoint.h"
|
23
23
|
#include "tensorflow/lite/kernels/internal/common.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
namespace reference_ops {
|
27
27
|
|
28
28
|
inline void LogSoftmax(const SoftmaxParams& params,
|
@@ -251,6 +251,6 @@ inline void LogSoftmax(const SoftmaxParams& params, const size_t outer_size,
|
|
251
251
|
}
|
252
252
|
|
253
253
|
} // namespace reference_ops
|
254
|
-
} // namespace
|
254
|
+
} // namespace tflite_micro
|
255
255
|
|
256
256
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_
|
@@ -24,7 +24,7 @@ limitations under the License.
|
|
24
24
|
#include "tensorflow/lite/kernels/internal/types.h"
|
25
25
|
#include "tensorflow/lite/kernels/op_macros.h"
|
26
26
|
|
27
|
-
namespace
|
27
|
+
namespace tflite_micro {
|
28
28
|
namespace reference_ops {
|
29
29
|
|
30
30
|
inline void Logistic(const RuntimeShape& input_shape, const float* input_data,
|
@@ -127,6 +127,6 @@ inline void Logistic(const RuntimeShape& input_shape, const int8_t* input_data,
|
|
127
127
|
}
|
128
128
|
|
129
129
|
} // namespace reference_ops
|
130
|
-
} // namespace
|
130
|
+
} // namespace tflite_micro
|
131
131
|
|
132
132
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_
|
@@ -24,7 +24,7 @@ limitations under the License.
|
|
24
24
|
#include "tensorflow/lite/kernels/internal/reference/fully_connected.h"
|
25
25
|
#include "tensorflow/lite/kernels/internal/types.h"
|
26
26
|
|
27
|
-
namespace
|
27
|
+
namespace tflite_micro {
|
28
28
|
namespace reference_ops {
|
29
29
|
|
30
30
|
inline void LstmCell(
|
@@ -94,14 +94,14 @@ inline void LstmCell(
|
|
94
94
|
float const* concat_input_arrays_data[2] = {input_data, prev_activ_data};
|
95
95
|
const RuntimeShape* concat_input_arrays_shapes[2] = {&input_shape,
|
96
96
|
&prev_activ_shape};
|
97
|
-
|
97
|
+
tflite_micro::ConcatenationParams concat_params;
|
98
98
|
concat_params.axis = 3;
|
99
99
|
concat_params.inputs_count = 2;
|
100
100
|
Concatenation(concat_params, concat_input_arrays_shapes,
|
101
101
|
concat_input_arrays_data, concat_temp_shape, concat_temp_data);
|
102
102
|
|
103
103
|
// Fully connected
|
104
|
-
|
104
|
+
tflite_micro::FullyConnectedParams fc_params;
|
105
105
|
fc_params.float_activation_min = std::numeric_limits<float>::lowest();
|
106
106
|
fc_params.float_activation_max = std::numeric_limits<float>::max();
|
107
107
|
FullyConnected(fc_params, concat_temp_shape, concat_temp_data, weights_shape,
|
@@ -305,7 +305,7 @@ inline void LstmCell(const LstmCellParams& params,
|
|
305
305
|
prev_activ_data_uint8};
|
306
306
|
const RuntimeShape* concat_input_arrays_shapes[2] = {&input_shape,
|
307
307
|
&prev_activ_shape};
|
308
|
-
|
308
|
+
tflite_micro::ConcatenationParams concat_params;
|
309
309
|
concat_params.axis = 3;
|
310
310
|
concat_params.inputs_count = 2;
|
311
311
|
Concatenation(concat_params, concat_input_arrays_shapes,
|
@@ -418,5 +418,5 @@ inline void LstmCell(const LstmCellParams& params,
|
|
418
418
|
}
|
419
419
|
|
420
420
|
} // namespace reference_ops
|
421
|
-
} // namespace
|
421
|
+
} // namespace tflite_micro
|
422
422
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LSTM_CELL_H_
|
@@ -18,7 +18,7 @@ limitations under the License.
|
|
18
18
|
#include "tensorflow/lite/kernels/internal/common.h"
|
19
19
|
#include "tensorflow/lite/kernels/internal/types.h"
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
namespace reference_ops {
|
23
23
|
|
24
24
|
template <typename T, typename Op, int N = 5>
|
@@ -59,6 +59,6 @@ void MaximumMinimumBroadcastSlow(const RuntimeShape& unextended_input1_shape,
|
|
59
59
|
}
|
60
60
|
|
61
61
|
} // namespace reference_ops
|
62
|
-
} // namespace
|
62
|
+
} // namespace tflite_micro
|
63
63
|
|
64
64
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
|
@@ -20,7 +20,7 @@ limitations under the License.
|
|
20
20
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/common.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
|
25
25
|
namespace reference_ops {
|
26
26
|
|
@@ -262,6 +262,6 @@ inline void BroadcastMul4DSlow(
|
|
262
262
|
}
|
263
263
|
|
264
264
|
} // namespace reference_ops
|
265
|
-
} // namespace
|
265
|
+
} // namespace tflite_micro
|
266
266
|
|
267
267
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
|
@@ -17,7 +17,7 @@ limitations under the License.
|
|
17
17
|
|
18
18
|
#include "tensorflow/lite/kernels/internal/types.h"
|
19
19
|
|
20
|
-
namespace
|
20
|
+
namespace tflite_micro {
|
21
21
|
|
22
22
|
namespace reference_ops {
|
23
23
|
|
@@ -32,6 +32,6 @@ inline void Negate(const RuntimeShape& input_shape, const T* input_data,
|
|
32
32
|
}
|
33
33
|
|
34
34
|
} // namespace reference_ops
|
35
|
-
} // namespace
|
35
|
+
} // namespace tflite_micro
|
36
36
|
|
37
37
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
|
@@ -20,7 +20,7 @@ limitations under the License.
|
|
20
20
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/types.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
|
25
25
|
namespace reference_ops {
|
26
26
|
|
@@ -35,7 +35,7 @@ constexpr int PadKernelMaxDimensionCount() { return 5; }
|
|
35
35
|
// Note that two typenames are required, so that T=P=int32_t is considered a
|
36
36
|
// specialization distinct from P=int32_t.
|
37
37
|
template <typename T, typename P>
|
38
|
-
inline void PadImpl(const
|
38
|
+
inline void PadImpl(const tflite_micro::PadParams& op_params,
|
39
39
|
const RuntimeShape& input_shape, const T* input_data,
|
40
40
|
const P* pad_value_ptr, const RuntimeShape& output_shape,
|
41
41
|
T* output_data) {
|
@@ -115,7 +115,7 @@ inline void PadImpl(const tflite::PadParams& op_params,
|
|
115
115
|
}
|
116
116
|
|
117
117
|
template <typename T, typename P>
|
118
|
-
inline void Pad(const
|
118
|
+
inline void Pad(const tflite_micro::PadParams& op_params,
|
119
119
|
const RuntimeShape& input_shape, const T* input_data,
|
120
120
|
const P* pad_value_ptr, const RuntimeShape& output_shape,
|
121
121
|
T* output_data) {
|
@@ -125,7 +125,7 @@ inline void Pad(const tflite::PadParams& op_params,
|
|
125
125
|
|
126
126
|
// The second (pad-value) input can be int32_t when, say, the first is uint8_t.
|
127
127
|
template <typename T>
|
128
|
-
inline void Pad(const
|
128
|
+
inline void Pad(const tflite_micro::PadParams& op_params,
|
129
129
|
const RuntimeShape& input_shape, const T* input_data,
|
130
130
|
const int32_t* pad_value_ptr, const RuntimeShape& output_shape,
|
131
131
|
T* output_data) {
|
@@ -136,7 +136,7 @@ inline void Pad(const tflite::PadParams& op_params,
|
|
136
136
|
|
137
137
|
// This version avoids conflicting template matching.
|
138
138
|
template <>
|
139
|
-
inline void Pad(const
|
139
|
+
inline void Pad(const tflite_micro::PadParams& op_params,
|
140
140
|
const RuntimeShape& input_shape, const int32_t* input_data,
|
141
141
|
const int32_t* pad_value_ptr, const RuntimeShape& output_shape,
|
142
142
|
int32_t* output_data) {
|
@@ -145,7 +145,7 @@ inline void Pad(const tflite::PadParams& op_params,
|
|
145
145
|
}
|
146
146
|
|
147
147
|
template <typename T, typename P>
|
148
|
-
inline void PadImageStyle(const
|
148
|
+
inline void PadImageStyle(const tflite_micro::PadParams& op_params,
|
149
149
|
const RuntimeShape& input_shape, const T* input_data,
|
150
150
|
const P* pad_value_ptr,
|
151
151
|
const RuntimeShape& output_shape, T* output_data) {
|
@@ -154,7 +154,7 @@ inline void PadImageStyle(const tflite::PadParams& op_params,
|
|
154
154
|
}
|
155
155
|
|
156
156
|
template <typename P>
|
157
|
-
inline void PadImageStyle(const
|
157
|
+
inline void PadImageStyle(const tflite_micro::PadParams& op_params,
|
158
158
|
const RuntimeShape& input_shape,
|
159
159
|
const float* input_data, const P* pad_value_ptr,
|
160
160
|
const RuntimeShape& output_shape,
|
@@ -164,6 +164,6 @@ inline void PadImageStyle(const tflite::PadParams& op_params,
|
|
164
164
|
}
|
165
165
|
|
166
166
|
} // namespace reference_ops
|
167
|
-
} // namespace
|
167
|
+
} // namespace tflite_micro
|
168
168
|
|
169
169
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_
|
@@ -22,7 +22,7 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
23
23
|
#include "tensorflow/lite/kernels/internal/types.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
namespace reference_ops {
|
27
27
|
|
28
28
|
inline bool AveragePool(const PoolParams& params,
|
@@ -298,6 +298,6 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
|
298
298
|
}
|
299
299
|
}
|
300
300
|
} // namespace reference_ops
|
301
|
-
} // namespace
|
301
|
+
} // namespace tflite_micro
|
302
302
|
|
303
303
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h
CHANGED
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#define __restrict__ __restrict
|
22
22
|
#endif
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
namespace tensor_utils {
|
26
26
|
|
27
27
|
// Check if all entries of a vector are zero for float.
|
@@ -328,6 +328,6 @@ void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
|
|
328
328
|
}
|
329
329
|
|
330
330
|
} // namespace tensor_utils
|
331
|
-
} // namespace
|
331
|
+
} // namespace tflite_micro
|
332
332
|
|
333
333
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_H_
|
@@ -22,7 +22,7 @@ limitations under the License.
|
|
22
22
|
#define __restrict__ __restrict
|
23
23
|
#endif
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
|
27
27
|
// Not all backends support CpuBackendContext usage, so forward declare to avoid
|
28
28
|
// pulling in its implementation.
|
@@ -239,6 +239,6 @@ void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
|
|
239
239
|
int16_t* output);
|
240
240
|
|
241
241
|
} // namespace tensor_utils
|
242
|
-
} // namespace
|
242
|
+
} // namespace tflite_micro
|
243
243
|
|
244
244
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_
|
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
22
22
|
#include "tensorflow/lite/kernels/internal/types.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
|
26
26
|
namespace reference_ops {
|
27
27
|
|
@@ -106,6 +106,6 @@ inline void Prelu(const PreluParams& params, const RuntimeShape& input_shape,
|
|
106
106
|
}
|
107
107
|
|
108
108
|
} // namespace reference_ops
|
109
|
-
} // namespace
|
109
|
+
} // namespace tflite_micro
|
110
110
|
|
111
111
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h
CHANGED
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
namespace reference_ops {
|
25
25
|
|
@@ -39,7 +39,7 @@ namespace reference_ops {
|
|
39
39
|
// patterns and falling back to generic broadcast.
|
40
40
|
inline bool ProcessBroadcastShapes(const RuntimeShape& shape0,
|
41
41
|
const RuntimeShape& shape1,
|
42
|
-
|
42
|
+
tflite_micro::ArithmeticParams* params) {
|
43
43
|
const int dims_count =
|
44
44
|
std::max(shape0.DimensionsCount(), shape1.DimensionsCount());
|
45
45
|
|
@@ -135,6 +135,6 @@ inline bool ProcessBroadcastShapes(const RuntimeShape& shape0,
|
|
135
135
|
}
|
136
136
|
|
137
137
|
} // namespace reference_ops
|
138
|
-
} // namespace
|
138
|
+
} // namespace tflite_micro
|
139
139
|
|
140
140
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_
|
@@ -24,12 +24,12 @@ limitations under the License.
|
|
24
24
|
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
25
25
|
#include "tensorflow/lite/kernels/internal/types.h"
|
26
26
|
|
27
|
-
namespace
|
27
|
+
namespace tflite_micro {
|
28
28
|
|
29
29
|
namespace reference_ops {
|
30
30
|
|
31
31
|
template <typename InputT, typename OutputT>
|
32
|
-
inline void AffineQuantize(const
|
32
|
+
inline void AffineQuantize(const tflite_micro::QuantizationParams& op_params,
|
33
33
|
const RuntimeShape& input_shape,
|
34
34
|
const InputT* input_data,
|
35
35
|
const RuntimeShape& output_shape,
|
@@ -53,7 +53,7 @@ inline void AffineQuantize(const tflite::QuantizationParams& op_params,
|
|
53
53
|
// Quantizes per-channel.
|
54
54
|
template <typename InputT, typename OutputT>
|
55
55
|
inline void PerChannelQuantize(
|
56
|
-
const
|
56
|
+
const tflite_micro::PerChannelQuantizationParams& op_params,
|
57
57
|
const RuntimeShape& input_shape, const InputT* input_data,
|
58
58
|
const RuntimeShape& output_shape, OutputT* output_data) {
|
59
59
|
// Ensure flat size is same.
|
@@ -85,5 +85,5 @@ inline void PerChannelQuantize(
|
|
85
85
|
|
86
86
|
} // namespace reference_ops
|
87
87
|
|
88
|
-
} // namespace
|
88
|
+
} // namespace tflite_micro
|
89
89
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
|
@@ -44,7 +44,7 @@ inline bool IsFirstReduction(const int* index, const int num_axis,
|
|
44
44
|
return true;
|
45
45
|
}
|
46
46
|
|
47
|
-
namespace
|
47
|
+
namespace tflite_micro {
|
48
48
|
|
49
49
|
namespace reference_ops {
|
50
50
|
|
@@ -268,7 +268,7 @@ inline bool Mean(const T* input_data, const int* input_dims,
|
|
268
268
|
return true;
|
269
269
|
}
|
270
270
|
|
271
|
-
inline void Mean(const
|
271
|
+
inline void Mean(const tflite_micro::MeanParams& op_params,
|
272
272
|
const RuntimeShape& unextended_input_shape,
|
273
273
|
const float* input_data,
|
274
274
|
const RuntimeShape& unextended_output_shape,
|
@@ -486,6 +486,6 @@ inline bool QuantizedReduceProd(const T* input_data, int32_t input_zero_point,
|
|
486
486
|
|
487
487
|
} // namespace reference_ops
|
488
488
|
|
489
|
-
} // namespace
|
489
|
+
} // namespace tflite_micro
|
490
490
|
|
491
491
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
|
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/common.h"
|
22
22
|
#include "tensorflow/lite/kernels/internal/types.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
namespace reference_ops {
|
26
26
|
|
27
27
|
template <typename input_type, typename output_type>
|
@@ -65,6 +65,6 @@ inline void Requantize(const input_type* input_data, int32_t size,
|
|
65
65
|
}
|
66
66
|
|
67
67
|
} // namespace reference_ops
|
68
|
-
} // namespace
|
68
|
+
} // namespace tflite_micro
|
69
69
|
|
70
70
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_
|
@@ -23,7 +23,7 @@ limitations under the License.
|
|
23
23
|
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
24
24
|
#include "tensorflow/lite/kernels/internal/types.h"
|
25
25
|
|
26
|
-
namespace
|
26
|
+
namespace tflite_micro {
|
27
27
|
namespace reference_ops {
|
28
28
|
|
29
29
|
inline void ComputeInterpolationValues(const float value, const float scale,
|
@@ -44,7 +44,7 @@ inline void ComputeInterpolationValues(const float value, const float scale,
|
|
44
44
|
}
|
45
45
|
|
46
46
|
template <typename T>
|
47
|
-
inline void ResizeBilinear(const
|
47
|
+
inline void ResizeBilinear(const tflite_micro::ResizeBilinearParams& op_params,
|
48
48
|
const RuntimeShape& unextended_input_shape,
|
49
49
|
const T* input_data,
|
50
50
|
const RuntimeShape& unextended_output_size_shape,
|
@@ -134,7 +134,7 @@ inline void ComputeInterpolationValuesInteger(
|
|
134
134
|
// Same as above but doesn't use any floating-point for the resize
|
135
135
|
template <typename T>
|
136
136
|
inline void ResizeBilinearInteger(
|
137
|
-
const
|
137
|
+
const tflite_micro::ResizeBilinearParams& op_params,
|
138
138
|
const RuntimeShape& unextended_input_shape, const T* input_data,
|
139
139
|
const RuntimeShape& unextended_output_size_shape,
|
140
140
|
const int32_t* output_size_data,
|
@@ -228,6 +228,6 @@ inline void ResizeBilinearInteger(
|
|
228
228
|
}
|
229
229
|
|
230
230
|
} // namespace reference_ops
|
231
|
-
} // namespace
|
231
|
+
} // namespace tflite_micro
|
232
232
|
|
233
233
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h
CHANGED
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
22
22
|
#include "tensorflow/lite/kernels/internal/types.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
|
26
26
|
namespace reference_ops {
|
27
27
|
|
@@ -48,7 +48,7 @@ inline int32_t GetNearestNeighbor(const int input_value,
|
|
48
48
|
|
49
49
|
template <typename T>
|
50
50
|
inline void ResizeNearestNeighbor(
|
51
|
-
const
|
51
|
+
const tflite_micro::ResizeNearestNeighborParams& op_params,
|
52
52
|
const RuntimeShape& unextended_input_shape, const T* input_data,
|
53
53
|
const RuntimeShape& output_size_shape, const int32_t* output_size_data,
|
54
54
|
const RuntimeShape& unextended_output_shape, T* output_data) {
|
@@ -97,6 +97,6 @@ inline void ResizeNearestNeighbor(
|
|
97
97
|
}
|
98
98
|
|
99
99
|
} // namespace reference_ops
|
100
|
-
} // namespace
|
100
|
+
} // namespace tflite_micro
|
101
101
|
|
102
102
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
namespace reference_ops {
|
25
25
|
|
@@ -47,5 +47,5 @@ inline void Round(const RuntimeShape& input_shape, const float* input_data,
|
|
47
47
|
}
|
48
48
|
|
49
49
|
} // namespace reference_ops
|
50
|
-
} // namespace
|
50
|
+
} // namespace tflite_micro
|
51
51
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
|
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/common.h"
|
22
22
|
#include "tensorflow/lite/kernels/internal/types.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
namespace reference_ops {
|
26
26
|
|
27
27
|
template <typename D, typename T>
|
@@ -146,6 +146,6 @@ void BroadcastSelect5DSlow(const RuntimeShape& input_condition_shape,
|
|
146
146
|
}
|
147
147
|
|
148
148
|
} // namespace reference_ops
|
149
|
-
} // namespace
|
149
|
+
} // namespace tflite_micro
|
150
150
|
|
151
151
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_
|
@@ -18,12 +18,12 @@ limitations under the License.
|
|
18
18
|
#include "tensorflow/lite/kernels/internal/portable_tensor.h"
|
19
19
|
#include "tensorflow/lite/kernels/internal/types.h"
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
|
23
23
|
namespace reference_ops {
|
24
24
|
|
25
25
|
template <typename T>
|
26
|
-
inline void Slice(const
|
26
|
+
inline void Slice(const tflite_micro::SliceParams& op_params,
|
27
27
|
const RuntimeShape& input_shape,
|
28
28
|
const RuntimeShape& output_shape,
|
29
29
|
SequentialTensorWriter<T>* writer) {
|
@@ -59,7 +59,7 @@ inline void Slice(const tflite::SliceParams& op_params,
|
|
59
59
|
}
|
60
60
|
|
61
61
|
template <typename T>
|
62
|
-
inline void Slice(const
|
62
|
+
inline void Slice(const tflite_micro::SliceParams& op_params,
|
63
63
|
const RuntimeShape& input_shape, const T* input_data,
|
64
64
|
const RuntimeShape& output_shape, T* output_data) {
|
65
65
|
SequentialTensorWriter<T> writer(input_data, output_data);
|
@@ -67,7 +67,7 @@ inline void Slice(const tflite::SliceParams& op_params,
|
|
67
67
|
}
|
68
68
|
|
69
69
|
template <typename T>
|
70
|
-
inline void Slice(const
|
70
|
+
inline void Slice(const tflite_micro::SliceParams& op_params,
|
71
71
|
const RuntimeShape& input_shape, const TfLiteTensor* input,
|
72
72
|
const RuntimeShape& output_shape, TfLiteTensor* output) {
|
73
73
|
SequentialTensorWriter<T> writer(input, output);
|
@@ -75,6 +75,6 @@ inline void Slice(const tflite::SliceParams& op_params,
|
|
75
75
|
}
|
76
76
|
|
77
77
|
} // namespace reference_ops
|
78
|
-
} // namespace
|
78
|
+
} // namespace tflite_micro
|
79
79
|
|
80
80
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_
|
@@ -25,7 +25,7 @@ limitations under the License.
|
|
25
25
|
#include "tensorflow/lite/kernels/internal/types.h"
|
26
26
|
#include "tensorflow/lite/kernels/op_macros.h"
|
27
27
|
|
28
|
-
namespace
|
28
|
+
namespace tflite_micro {
|
29
29
|
namespace reference_ops {
|
30
30
|
|
31
31
|
inline void Softmax(const SoftmaxParams& params,
|
@@ -228,6 +228,6 @@ inline void SoftmaxInt16(const SoftmaxParams& params,
|
|
228
228
|
}
|
229
229
|
|
230
230
|
} // namespace reference_ops
|
231
|
-
} // namespace
|
231
|
+
} // namespace tflite_micro
|
232
232
|
|
233
233
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h
CHANGED
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/common.h"
|
22
22
|
#include "tensorflow/lite/kernels/internal/types.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
namespace reference_ops {
|
26
26
|
|
27
27
|
// TODO(b/135760455): Move this method anonymous namespace in a cc file.
|
@@ -104,6 +104,6 @@ inline void SpaceToBatchND(const SpaceToBatchParams& params,
|
|
104
104
|
}
|
105
105
|
|
106
106
|
} // namespace reference_ops
|
107
|
-
} // namespace
|
107
|
+
} // namespace tflite_micro
|
108
108
|
|
109
109
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_
|
@@ -19,11 +19,11 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
namespace reference_ops {
|
24
24
|
|
25
25
|
template <typename T>
|
26
|
-
inline void SpaceToDepth(const
|
26
|
+
inline void SpaceToDepth(const tflite_micro::SpaceToDepthParams& op_params,
|
27
27
|
const RuntimeShape& unextended_input_shape,
|
28
28
|
const T* input_data,
|
29
29
|
const RuntimeShape& unextended_output_shape,
|
@@ -75,6 +75,6 @@ inline void SpaceToDepth(const tflite::SpaceToDepthParams& op_params,
|
|
75
75
|
}
|
76
76
|
|
77
77
|
} // namespace reference_ops
|
78
|
-
} // namespace
|
78
|
+
} // namespace tflite_micro
|
79
79
|
|
80
80
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_
|
@@ -22,19 +22,19 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/kernels/internal/strided_slice_logic.h"
|
23
23
|
#include "tensorflow/lite/kernels/internal/types.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
|
27
27
|
namespace reference_ops {
|
28
28
|
|
29
29
|
template <typename T>
|
30
|
-
inline void StridedSlice(const
|
30
|
+
inline void StridedSlice(const tflite_micro::StridedSliceParams& op_params,
|
31
31
|
const RuntimeShape& unextended_input_shape,
|
32
32
|
const RuntimeShape& unextended_output_shape,
|
33
33
|
SequentialTensorWriter<T>* writer) {
|
34
34
|
ruy::profiler::ScopeLabel label("StridedSlice");
|
35
35
|
|
36
36
|
// Note that the output_shape is not used herein.
|
37
|
-
|
37
|
+
tflite_micro::StridedSliceParams params_copy = op_params;
|
38
38
|
|
39
39
|
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 5);
|
40
40
|
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 5);
|
@@ -120,7 +120,7 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params,
|
|
120
120
|
}
|
121
121
|
|
122
122
|
template <typename T>
|
123
|
-
inline void StridedSlice(const
|
123
|
+
inline void StridedSlice(const tflite_micro::StridedSliceParams& op_params,
|
124
124
|
const RuntimeShape& unextended_input_shape,
|
125
125
|
const T* input_data,
|
126
126
|
const RuntimeShape& unextended_output_shape,
|
@@ -131,7 +131,7 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params,
|
|
131
131
|
}
|
132
132
|
|
133
133
|
template <typename T>
|
134
|
-
inline void StridedSlice(const
|
134
|
+
inline void StridedSlice(const tflite_micro::StridedSliceParams& op_params,
|
135
135
|
const RuntimeShape& unextended_input_shape,
|
136
136
|
const TfLiteTensor* input,
|
137
137
|
const RuntimeShape& unextended_output_shape,
|
@@ -142,6 +142,6 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params,
|
|
142
142
|
}
|
143
143
|
|
144
144
|
} // namespace reference_ops
|
145
|
-
} // namespace
|
145
|
+
} // namespace tflite_micro
|
146
146
|
|
147
147
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_
|
@@ -26,7 +26,7 @@ limitations under the License.
|
|
26
26
|
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
27
27
|
#include "tensorflow/lite/kernels/internal/types.h"
|
28
28
|
|
29
|
-
namespace
|
29
|
+
namespace tflite_micro {
|
30
30
|
|
31
31
|
namespace reference_ops {
|
32
32
|
|
@@ -460,6 +460,6 @@ inline void SubWithActivation(
|
|
460
460
|
}
|
461
461
|
|
462
462
|
} // namespace reference_ops
|
463
|
-
} // namespace
|
463
|
+
} // namespace tflite_micro
|
464
464
|
|
465
465
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_
|