xmos-ai-tools 1.3.2.dev19__py3-none-macosx_11_0_arm64.whl → 1.3.2.dev37__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h +9 -9
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_common.h +2 -2
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h +2 -2
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h +3 -3
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h +8 -8
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +3 -3
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h +4 -4
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h +5 -5
- xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/array.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/context_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +17 -17
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/max.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/min.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/quantization_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add_n.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/arg_min_max.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_matmul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/binary_function.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_args.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/broadcast_to.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/ceil.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/concatenation.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/cumsum.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depth_to_space.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +1 -1
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +1 -1
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/dequantize.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/div.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/elu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/exp.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fill.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_div.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/floor_mod.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/hard_swish.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/l2normalization.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/leaky_relu.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/log_softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/lstm_cell.h +5 -5
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/neg.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pad.h +8 -8
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/prelu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/quantize.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/requantize.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/round.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/select.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/slice.h +5 -5
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/space_to_depth.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/strided_slice.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/tanh.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/transpose_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +9 -9
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/benchmarks/micro_benchmark.h +7 -7
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/flatbuffer_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activation_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/activations.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_function_specializations.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_slicers.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ceva/types.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/dequantize.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/hard_swish.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/leaky_relu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logical.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/logistic.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval.h +48 -48
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_eval_test.h +57 -57
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/lstm_shared.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_tensor_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pad.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +15 -15
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/prelu.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/quantize.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/sub.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/conv_test_data.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h +7 -7
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/hifimini/fixedpoint_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_eval.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/lstm_shared.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_add.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_fully_connected.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pad.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_pooling.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reduce.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_svdf.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_helpers.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/memory_plan_struct.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocation_info.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_arena_constants.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +50 -50
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler_interface.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_resource_variable.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_time.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/tflite_size/src/flatbuffer_size_wrapper.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_allocator.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/recording_micro_interpreter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/system_setup.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +6 -6
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +4 -4
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/flatbuffer_conversions_bridge.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +3 -3
- xmos_ai_tools/runtime/include/tensorflow/lite/portable_type_to_tflitetype.h +2 -2
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +2731 -2731
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h +2 -2
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
- xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
- {xmos_ai_tools-1.3.2.dev19.data → xmos_ai_tools-1.3.2.dev37.data}/data/bin/xcore-opt +0 -0
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/METADATA +2 -2
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/RECORD +206 -206
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/WHEEL +0 -0
- {xmos_ai_tools-1.3.2.dev19.dist-info → xmos_ai_tools-1.3.2.dev37.dist-info}/top_level.txt +0 -0
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
namespace reference_ops {
|
25
25
|
|
@@ -83,6 +83,6 @@ void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
|
|
83
83
|
}
|
84
84
|
|
85
85
|
} // namespace reference_ops
|
86
|
-
} // namespace
|
86
|
+
} // namespace tflite_micro
|
87
87
|
|
88
88
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
|
@@ -23,7 +23,7 @@ limitations under the License.
|
|
23
23
|
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
|
24
24
|
#include "tensorflow/lite/kernels/internal/types.h"
|
25
25
|
|
26
|
-
namespace
|
26
|
+
namespace tflite_micro {
|
27
27
|
namespace reference_ops {
|
28
28
|
namespace batch_matmul {
|
29
29
|
|
@@ -270,6 +270,6 @@ inline void BatchMatMul(const FullyConnectedParams& params,
|
|
270
270
|
}
|
271
271
|
|
272
272
|
} // namespace reference_ops
|
273
|
-
} // namespace
|
273
|
+
} // namespace tflite_micro
|
274
274
|
|
275
275
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h
CHANGED
@@ -20,7 +20,7 @@ limitations under the License.
|
|
20
20
|
#include "ruy/profiler/instrumentation.h" // from @ruy
|
21
21
|
#include "tensorflow/lite/kernels/internal/types.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
namespace reference_ops {
|
25
25
|
|
26
26
|
// TODO(b/135760455): Move this method anonymous namespace in a cc file.
|
@@ -96,6 +96,6 @@ inline void BatchToSpaceND(const RuntimeShape& unextended_input1_shape,
|
|
96
96
|
}
|
97
97
|
|
98
98
|
} // namespace reference_ops
|
99
|
-
} // namespace
|
99
|
+
} // namespace tflite_micro
|
100
100
|
|
101
101
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
namespace reference_ops {
|
25
25
|
|
@@ -86,6 +86,6 @@ inline void BinaryFunction(const RuntimeShape& input1_shape,
|
|
86
86
|
}
|
87
87
|
|
88
88
|
} // namespace reference_ops
|
89
|
-
} // namespace
|
89
|
+
} // namespace tflite_micro
|
90
90
|
|
91
91
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_
|
@@ -18,7 +18,7 @@ limitations under the License.
|
|
18
18
|
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
19
19
|
#include "tensorflow/lite/kernels/internal/types.h"
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
namespace reference_ops {
|
23
23
|
|
24
24
|
template <typename T>
|
@@ -51,6 +51,6 @@ void BroadcastArgs(const RuntimeShape& input1_shape, const T* input1_data,
|
|
51
51
|
}
|
52
52
|
|
53
53
|
} // namespace reference_ops
|
54
|
-
} // namespace
|
54
|
+
} // namespace tflite_micro
|
55
55
|
|
56
56
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_
|
@@ -18,7 +18,7 @@ limitations under the License.
|
|
18
18
|
#include "tensorflow/lite/kernels/internal/common.h"
|
19
19
|
#include "tensorflow/lite/kernels/kernel_util.h"
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
namespace reference_ops {
|
23
23
|
template <int N>
|
24
24
|
void BroadcastImpl(const NdArrayDesc<N>& input_desc, const char* input_data,
|
@@ -93,5 +93,5 @@ inline void BroadcastTo(const RuntimeShape& unextended_input_shape,
|
|
93
93
|
last_broadcast_dim, TfLiteTypeGetSize(data_type));
|
94
94
|
}
|
95
95
|
} // namespace reference_ops
|
96
|
-
} // namespace
|
96
|
+
} // namespace tflite_micro
|
97
97
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
namespace reference_ops {
|
25
25
|
|
@@ -33,5 +33,5 @@ inline void Ceil(const RuntimeShape& input_shape, const float* input_data,
|
|
33
33
|
}
|
34
34
|
|
35
35
|
} // namespace reference_ops
|
36
|
-
} // namespace
|
36
|
+
} // namespace tflite_micro
|
37
37
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
#include "tensorflow/lite/kernels/internal/common.h"
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
namespace reference_ops {
|
25
25
|
|
@@ -266,6 +266,6 @@ TFLITE_COMPARISON_OP(LessEqual)
|
|
266
266
|
#undef TFLITE_COMPARISON_OP
|
267
267
|
|
268
268
|
} // namespace reference_ops
|
269
|
-
} // namespace
|
269
|
+
} // namespace tflite_micro
|
270
270
|
|
271
271
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
|
@@ -23,7 +23,7 @@ limitations under the License.
|
|
23
23
|
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
24
24
|
#include "tensorflow/lite/kernels/internal/types.h"
|
25
25
|
|
26
|
-
namespace
|
26
|
+
namespace tflite_micro {
|
27
27
|
namespace reference_ops {
|
28
28
|
|
29
29
|
template <typename Scalar>
|
@@ -123,7 +123,7 @@ inline void ConcatenationWithScaling(const ConcatenationParams& params,
|
|
123
123
|
const float scale = input_scale[i] * inverse_output_scale;
|
124
124
|
const float bias = -input_zeropoint[i] * scale;
|
125
125
|
for (int j = 0; j < copy_size; ++j) {
|
126
|
-
const int32_t value = static_cast<int32_t>(
|
126
|
+
const int32_t value = static_cast<int32_t>(tflite_micro::TfLiteRound(
|
127
127
|
input_ptr[j] * scale + bias)) +
|
128
128
|
output_zeropoint;
|
129
129
|
output_ptr[j] = static_cast<uint8_t>(
|
@@ -136,6 +136,6 @@ inline void ConcatenationWithScaling(const ConcatenationParams& params,
|
|
136
136
|
}
|
137
137
|
|
138
138
|
} // namespace reference_ops
|
139
|
-
} // namespace
|
139
|
+
} // namespace tflite_micro
|
140
140
|
|
141
141
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
|
@@ -20,7 +20,7 @@ limitations under the License.
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/common.h"
|
21
21
|
#include "tensorflow/lite/kernels/internal/types.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
|
25
25
|
namespace reference_ops {
|
26
26
|
|
@@ -284,6 +284,6 @@ inline void HybridConvPerChannel(
|
|
284
284
|
}
|
285
285
|
|
286
286
|
} // namespace reference_ops
|
287
|
-
} // namespace
|
287
|
+
} // namespace tflite_micro
|
288
288
|
|
289
289
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
|
@@ -22,7 +22,7 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/kernels/internal/common.h"
|
23
23
|
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
namespace reference_ops {
|
27
27
|
|
28
28
|
template <typename T>
|
@@ -170,6 +170,6 @@ inline void CumSum(const ArithmeticParams& params, const int8_t* input_data,
|
|
170
170
|
}
|
171
171
|
|
172
172
|
} // namespace reference_ops
|
173
|
-
} // namespace
|
173
|
+
} // namespace tflite_micro
|
174
174
|
|
175
175
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
|
@@ -17,11 +17,11 @@ limitations under the License.
|
|
17
17
|
|
18
18
|
#include "tensorflow/lite/kernels/internal/types.h"
|
19
19
|
|
20
|
-
namespace
|
20
|
+
namespace tflite_micro {
|
21
21
|
namespace reference_ops {
|
22
22
|
|
23
23
|
template <typename T>
|
24
|
-
inline void DepthToSpace(const
|
24
|
+
inline void DepthToSpace(const tflite_micro::DepthToSpaceParams& op_params,
|
25
25
|
const RuntimeShape& unextended_input_shape,
|
26
26
|
const T* input_data,
|
27
27
|
const RuntimeShape& unextended_output_shape,
|
@@ -74,6 +74,6 @@ inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params,
|
|
74
74
|
}
|
75
75
|
|
76
76
|
} // namespace reference_ops
|
77
|
-
} // namespace
|
77
|
+
} // namespace tflite_micro
|
78
78
|
|
79
79
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h
CHANGED
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
namespace reference_ops {
|
24
24
|
|
25
25
|
inline void DepthwiseConv(
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h
CHANGED
@@ -22,7 +22,7 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
23
23
|
#include "tensorflow/lite/kernels/internal/types.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
|
27
27
|
// Used in tests and template parameters to control which version of depthwise
|
28
28
|
// convolution is called. Primarily for reference code, and specializations
|
@@ -22,13 +22,13 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/kernels/internal/common.h"
|
23
23
|
#include "tensorflow/lite/kernels/internal/types.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
|
27
27
|
namespace reference_ops {
|
28
28
|
|
29
29
|
// Dequantizes into a float without rounding.
|
30
30
|
template <typename InputT, typename OutputT>
|
31
|
-
inline void Dequantize(const
|
31
|
+
inline void Dequantize(const tflite_micro::DequantizationParams& op_params,
|
32
32
|
const RuntimeShape& input_shape,
|
33
33
|
const InputT* input_data,
|
34
34
|
const RuntimeShape& output_shape, OutputT* output_data) {
|
@@ -46,7 +46,7 @@ inline void Dequantize(const tflite::DequantizationParams& op_params,
|
|
46
46
|
// Dequantizes per-channel quantized tensor to float.
|
47
47
|
template <typename T>
|
48
48
|
inline void PerChannelDequantize(
|
49
|
-
const
|
49
|
+
const tflite_micro::PerChannelDequantizationParams& op_params,
|
50
50
|
const RuntimeShape& input_shape, const T* input_data,
|
51
51
|
const RuntimeShape& output_shape, float* output_data) {
|
52
52
|
// Ensure flat size is same.
|
@@ -74,5 +74,5 @@ inline void PerChannelDequantize(
|
|
74
74
|
|
75
75
|
} // namespace reference_ops
|
76
76
|
|
77
|
-
} // namespace
|
77
|
+
} // namespace tflite_micro
|
78
78
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/common.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
namespace reference_ops {
|
25
25
|
|
@@ -242,6 +242,6 @@ inline void Div(const ArithmeticParams& params,
|
|
242
242
|
}
|
243
243
|
|
244
244
|
} // namespace reference_ops
|
245
|
-
} // namespace
|
245
|
+
} // namespace tflite_micro
|
246
246
|
|
247
247
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
|
@@ -18,7 +18,7 @@ limitations under the License.
|
|
18
18
|
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
19
19
|
#include "tensorflow/lite/kernels/internal/types.h"
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
|
23
23
|
namespace reference_ops {
|
24
24
|
|
@@ -32,6 +32,6 @@ inline void Elu(const RuntimeShape& input_shape, const float* input_data,
|
|
32
32
|
}
|
33
33
|
|
34
34
|
} // namespace reference_ops
|
35
|
-
} // namespace
|
35
|
+
} // namespace tflite_micro
|
36
36
|
|
37
37
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
|
@@ -20,7 +20,7 @@ limitations under the License.
|
|
20
20
|
#include "ruy/profiler/instrumentation.h" // from @ruy
|
21
21
|
#include "tensorflow/lite/kernels/internal/types.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
namespace reference_ops {
|
25
25
|
|
26
26
|
template <typename T>
|
@@ -33,6 +33,6 @@ inline void Exp(const T* input_data, const size_t num_elements,
|
|
33
33
|
}
|
34
34
|
|
35
35
|
} // namespace reference_ops
|
36
|
-
} // namespace
|
36
|
+
} // namespace tflite_micro
|
37
37
|
|
38
38
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
namespace reference_ops {
|
24
24
|
|
25
25
|
template <typename T>
|
@@ -33,6 +33,6 @@ void Fill(const RuntimeShape& value_shape, const T* value_data,
|
|
33
33
|
}
|
34
34
|
|
35
35
|
} // namespace reference_ops
|
36
|
-
} // namespace
|
36
|
+
} // namespace tflite_micro
|
37
37
|
|
38
38
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/types.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
|
24
24
|
namespace reference_ops {
|
25
25
|
|
@@ -34,6 +34,6 @@ inline void Floor(const RuntimeShape& input_shape, const float* input_data,
|
|
34
34
|
}
|
35
35
|
|
36
36
|
} // namespace reference_ops
|
37
|
-
} // namespace
|
37
|
+
} // namespace tflite_micro
|
38
38
|
|
39
39
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
|
@@ -20,7 +20,7 @@ limitations under the License.
|
|
20
20
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/types.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
namespace reference_ops {
|
25
25
|
|
26
26
|
template <typename T>
|
@@ -30,6 +30,6 @@ T FloorDiv(T input1, T input2) {
|
|
30
30
|
}
|
31
31
|
|
32
32
|
} // namespace reference_ops
|
33
|
-
} // namespace
|
33
|
+
} // namespace tflite_micro
|
34
34
|
|
35
35
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
|
@@ -18,7 +18,7 @@ limitations under the License.
|
|
18
18
|
#include <cmath>
|
19
19
|
#include <functional>
|
20
20
|
|
21
|
-
namespace
|
21
|
+
namespace tflite_micro {
|
22
22
|
|
23
23
|
namespace reference_ops {
|
24
24
|
|
@@ -39,6 +39,6 @@ T FloorMod(T input1, T input2) {
|
|
39
39
|
}
|
40
40
|
|
41
41
|
} // namespace reference_ops
|
42
|
-
} // namespace
|
42
|
+
} // namespace tflite_micro
|
43
43
|
|
44
44
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
|
@@ -23,7 +23,7 @@ limitations under the License.
|
|
23
23
|
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
24
24
|
#include "tensorflow/lite/kernels/internal/types.h"
|
25
25
|
|
26
|
-
namespace
|
26
|
+
namespace tflite_micro {
|
27
27
|
namespace reference_ops {
|
28
28
|
|
29
29
|
inline void FullyConnected(
|
@@ -318,6 +318,6 @@ inline void ShuffledFullyConnected(
|
|
318
318
|
}
|
319
319
|
|
320
320
|
} // namespace reference_ops
|
321
|
-
} // namespace
|
321
|
+
} // namespace tflite_micro
|
322
322
|
|
323
323
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
|
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/common.h"
|
22
22
|
#include "tensorflow/lite/kernels/internal/types.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
namespace reference_ops {
|
26
26
|
|
27
27
|
inline int16_t SaturatingLeftShift(int16_t value, int amount) {
|
@@ -163,6 +163,6 @@ inline void HardSwish(const HardSwishParams& params,
|
|
163
163
|
}
|
164
164
|
|
165
165
|
} // namespace reference_ops
|
166
|
-
} // namespace
|
166
|
+
} // namespace tflite_micro
|
167
167
|
|
168
168
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_
|
@@ -22,7 +22,7 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/kernels/internal/common.h"
|
23
23
|
#include "tensorflow/lite/kernels/internal/types.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
namespace reference_integer_ops {
|
27
27
|
|
28
28
|
inline void CheckArithmeticParams(const ArithmeticParams& params) {
|
@@ -245,6 +245,6 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
|
245
245
|
}
|
246
246
|
|
247
247
|
} // namespace reference_integer_ops
|
248
|
-
} // namespace
|
248
|
+
} // namespace tflite_micro
|
249
249
|
|
250
250
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/common.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
namespace reference_integer_ops {
|
24
24
|
|
25
25
|
// Fixed-point per-channel-quantization convolution reference kernel.
|
@@ -236,6 +236,6 @@ inline void ConvPerChannel(
|
|
236
236
|
}
|
237
237
|
|
238
238
|
} // namespace reference_integer_ops
|
239
|
-
} // namespace
|
239
|
+
} // namespace tflite_micro
|
240
240
|
|
241
241
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/common.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
namespace reference_integer_ops {
|
24
24
|
inline void DepthwiseConvPerChannel(
|
25
25
|
const DepthwiseParams& params, const int32_t* output_multiplier,
|
@@ -286,6 +286,6 @@ inline void DepthwiseConvHybridPerChannel(
|
|
286
286
|
}
|
287
287
|
|
288
288
|
} // namespace reference_integer_ops
|
289
|
-
} // namespace
|
289
|
+
} // namespace tflite_micro
|
290
290
|
|
291
291
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/common.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
namespace reference_integer_ops {
|
24
24
|
|
25
25
|
// For per-channel functions, since it is defined in quantization spec that
|
@@ -121,6 +121,6 @@ void FullyConnected(const FullyConnectedParams& params,
|
|
121
121
|
}
|
122
122
|
|
123
123
|
} // namespace reference_integer_ops
|
124
|
-
} // namespace
|
124
|
+
} // namespace tflite_micro
|
125
125
|
|
126
126
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/common.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
namespace reference_integer_ops {
|
24
24
|
|
25
25
|
inline void L2Normalization(int32_t input_zero_point, int32_t outer_size,
|
@@ -62,6 +62,6 @@ inline void L2Normalization(int32_t input_zero_point, int32_t outer_size,
|
|
62
62
|
}
|
63
63
|
}
|
64
64
|
} // namespace reference_integer_ops
|
65
|
-
} // namespace
|
65
|
+
} // namespace tflite_micro
|
66
66
|
|
67
67
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h
CHANGED
@@ -20,7 +20,7 @@ limitations under the License.
|
|
20
20
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/common.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
namespace reference_integer_ops {
|
25
25
|
|
26
26
|
inline void Logistic(int32_t input_zero_point, int32_t input_range_radius,
|
@@ -116,6 +116,6 @@ inline void Logistic(int32_t input_multiplier, int32_t input_left_shift,
|
|
116
116
|
}
|
117
117
|
|
118
118
|
} // namespace reference_integer_ops
|
119
|
-
} // namespace
|
119
|
+
} // namespace tflite_micro
|
120
120
|
|
121
121
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_
|
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "ruy/profiler/instrumentation.h" // from @ruy
|
22
22
|
#include "tensorflow/lite/kernels/internal/common.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
namespace reference_integer_ops {
|
26
26
|
|
27
27
|
// Maximum dimension supported by the broadcast mul operation.
|
@@ -190,5 +190,5 @@ inline void BroadcastMul4DSlow(
|
|
190
190
|
}
|
191
191
|
|
192
192
|
} // namespace reference_integer_ops
|
193
|
-
} // namespace
|
193
|
+
} // namespace tflite_micro
|
194
194
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
|
xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h
CHANGED
@@ -20,7 +20,7 @@ limitations under the License.
|
|
20
20
|
|
21
21
|
#include "tensorflow/lite/kernels/internal/common.h"
|
22
22
|
|
23
|
-
namespace
|
23
|
+
namespace tflite_micro {
|
24
24
|
namespace reference_integer_ops {
|
25
25
|
|
26
26
|
inline bool AveragePool(const PoolParams& params,
|
@@ -259,6 +259,6 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
|
259
259
|
}
|
260
260
|
|
261
261
|
} // namespace reference_integer_ops
|
262
|
-
} // namespace
|
262
|
+
} // namespace tflite_micro
|
263
263
|
|
264
264
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
|
@@ -21,7 +21,7 @@ limitations under the License.
|
|
21
21
|
#include "fixedpoint/fixedpoint.h"
|
22
22
|
#include "tensorflow/lite/kernels/internal/common.h"
|
23
23
|
|
24
|
-
namespace
|
24
|
+
namespace tflite_micro {
|
25
25
|
namespace reference_integer_ops {
|
26
26
|
|
27
27
|
inline void Tanh(int32_t input_zero_point, int32_t input_range_radius,
|
@@ -112,6 +112,6 @@ inline void Tanh(int32_t input_multiplier, int32_t input_left_shift,
|
|
112
112
|
}
|
113
113
|
|
114
114
|
} // namespace reference_integer_ops
|
115
|
-
} // namespace
|
115
|
+
} // namespace tflite_micro
|
116
116
|
|
117
117
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
|
@@ -19,7 +19,7 @@ limitations under the License.
|
|
19
19
|
|
20
20
|
#include "tensorflow/lite/kernels/internal/common.h"
|
21
21
|
|
22
|
-
namespace
|
22
|
+
namespace tflite_micro {
|
23
23
|
namespace reference_integer_ops {
|
24
24
|
|
25
25
|
// Fixed-point per-channel-quantization transpose convolution reference kernel.
|
@@ -219,6 +219,6 @@ inline void TransposeConv(
|
|
219
219
|
}
|
220
220
|
|
221
221
|
} // namespace reference_integer_ops
|
222
|
-
} // namespace
|
222
|
+
} // namespace tflite_micro
|
223
223
|
|
224
224
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_
|
@@ -22,11 +22,11 @@ limitations under the License.
|
|
22
22
|
#include "tensorflow/lite/kernels/internal/common.h"
|
23
23
|
#include "tensorflow/lite/kernels/internal/types.h"
|
24
24
|
|
25
|
-
namespace
|
25
|
+
namespace tflite_micro {
|
26
26
|
|
27
27
|
namespace reference_ops {
|
28
28
|
|
29
|
-
inline void L2Normalization(const
|
29
|
+
inline void L2Normalization(const tflite_micro::L2NormalizationParams& op_params,
|
30
30
|
const RuntimeShape& input_shape,
|
31
31
|
const float* input_data,
|
32
32
|
const RuntimeShape& output_shape,
|
@@ -50,7 +50,7 @@ inline void L2Normalization(const tflite::L2NormalizationParams& op_params,
|
|
50
50
|
}
|
51
51
|
}
|
52
52
|
|
53
|
-
inline void L2Normalization(const
|
53
|
+
inline void L2Normalization(const tflite_micro::L2NormalizationParams& op_params,
|
54
54
|
const RuntimeShape& input_shape,
|
55
55
|
const uint8_t* input_data,
|
56
56
|
const RuntimeShape& output_shape,
|
@@ -86,5 +86,5 @@ inline void L2Normalization(const tflite::L2NormalizationParams& op_params,
|
|
86
86
|
}
|
87
87
|
|
88
88
|
} // namespace reference_ops
|
89
|
-
} // namespace
|
89
|
+
} // namespace tflite_micro
|
90
90
|
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_
|