xmos-ai-tools 1.1.2.dev216__py3-none-macosx_11_0_arm64.whl → 1.1.2.dev236__py3-none-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. xmos_ai_tools/runtime/include/lib_nn/api/nn_layers.h +16 -0
  2. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_approximation.h +80 -0
  3. xmos_ai_tools/runtime/include/lib_nn/api/quadratic_interpolation.h +23 -0
  4. xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h +15 -15
  5. xmos_ai_tools/runtime/include/{tensorflow/lite/micro/examples/micro_speech/simple_features/model.h → signal/micro/kernels/delay_flexbuffers_generated_data.h} +7 -9
  6. xmos_ai_tools/runtime/include/signal/micro/kernels/energy_flexbuffers_generated_data.h +28 -0
  7. xmos_ai_tools/runtime/include/signal/micro/kernels/fft_flexbuffers_generated_data.h +37 -0
  8. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_flexbuffers_generated_data.h +25 -0
  9. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_log_flexbuffers_generated_data.h +27 -0
  10. xmos_ai_tools/runtime/include/signal/micro/kernels/filter_bank_spectral_subtraction_flexbuffers_generated_data.h +26 -0
  11. xmos_ai_tools/runtime/include/signal/micro/kernels/framer_flexbuffers_generated_data.h +25 -0
  12. xmos_ai_tools/runtime/include/{tensorflow/lite/micro/examples/micro_speech/simple_features/no_simple_features_data.h → signal/micro/kernels/irfft.h} +15 -7
  13. xmos_ai_tools/runtime/include/signal/micro/kernels/overlap_add_flexbuffers_generated_data.h +25 -0
  14. xmos_ai_tools/runtime/include/signal/micro/kernels/pcan_flexbuffers_generated_data.h +7 -0
  15. xmos_ai_tools/runtime/include/signal/micro/kernels/rfft.h +31 -0
  16. xmos_ai_tools/runtime/include/signal/micro/kernels/stacker_flexbuffers_generated_data.h +25 -0
  17. xmos_ai_tools/runtime/include/signal/micro/kernels/window_flexbuffers_generated_data.h +25 -0
  18. xmos_ai_tools/runtime/include/signal/src/circular_buffer.h +118 -0
  19. xmos_ai_tools/runtime/include/signal/src/complex.h +29 -0
  20. xmos_ai_tools/runtime/include/signal/src/energy.h +38 -0
  21. xmos_ai_tools/runtime/include/signal/src/fft_auto_scale.h +35 -0
  22. xmos_ai_tools/runtime/include/signal/src/filter_bank.h +69 -0
  23. xmos_ai_tools/runtime/include/signal/src/filter_bank_log.h +38 -0
  24. xmos_ai_tools/runtime/include/signal/src/filter_bank_spectral_subtraction.h +73 -0
  25. xmos_ai_tools/runtime/include/{tensorflow/lite/micro/examples/micro_speech/main_functions.h → signal/src/filter_bank_square_root.h} +14 -17
  26. xmos_ai_tools/runtime/include/signal/src/irfft.h +84 -0
  27. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_common.h +49 -0
  28. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_float.h +31 -0
  29. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int16.h +30 -0
  30. xmos_ai_tools/runtime/include/signal/src/kiss_fft_wrappers/kiss_fft_int32.h +31 -0
  31. xmos_ai_tools/runtime/include/{tensorflow/lite/micro/examples/micro_speech/micro_features/no_micro_features_data.h → signal/src/log.h} +13 -6
  32. xmos_ai_tools/runtime/include/{tensorflow/lite/micro/python/interpreter/src/python_utils.h → signal/src/max_abs.h} +11 -11
  33. xmos_ai_tools/runtime/include/{tensorflow/lite/micro/examples/micro_speech/micro_features/yes_micro_features_data.h → signal/src/msb.h} +15 -6
  34. xmos_ai_tools/runtime/include/signal/src/overlap_add.h +46 -0
  35. xmos_ai_tools/runtime/include/signal/src/pcan_argc_fixed.h +41 -0
  36. xmos_ai_tools/runtime/include/signal/src/rfft.h +85 -0
  37. xmos_ai_tools/runtime/include/signal/src/square_root.h +32 -0
  38. xmos_ai_tools/runtime/include/{tensorflow/lite/micro/python/interpreter/src/numpy_utils.h → signal/src/window.h} +13 -15
  39. xmos_ai_tools/runtime/include/signal/testdata/fft_test_data.h +48 -0
  40. xmos_ai_tools/runtime/include/tensorflow/lite/array.h +156 -0
  41. xmos_ai_tools/runtime/include/tensorflow/lite/builtin_ops.h +44 -0
  42. xmos_ai_tools/runtime/include/tensorflow/lite/c/c_api_types.h +6 -0
  43. xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h +8 -25
  44. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h +3 -3
  45. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h +15 -0
  46. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h +92 -3
  47. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h +61 -51
  48. xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h +302 -1
  49. xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h +78 -0
  50. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h +129 -43
  51. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/cppmath.h +2 -2
  52. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/portable_tensor.h +23 -4
  53. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/add.h +210 -151
  54. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/comparisons.h +9 -18
  55. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/conv.h +2 -0
  56. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +103 -72
  57. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +2 -0
  58. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +2 -63
  59. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +87 -26
  60. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/mul.h +129 -80
  61. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/reduce.h +42 -93
  62. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/resize_bilinear.h +5 -0
  63. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/reference/sub.h +249 -263
  64. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/runtime_shape.h +11 -1
  65. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/strided_slice_logic.h +5 -1
  66. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/tensor_ctypes.h +5 -10
  67. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/types.h +4 -2
  68. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h +25 -14
  69. xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h +14 -3
  70. xmos_ai_tools/runtime/include/tensorflow/lite/micro/debug_log.h +10 -3
  71. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_model_settings.h +37 -0
  72. xmos_ai_tools/runtime/include/tensorflow/lite/micro/fake_micro_context.h +7 -0
  73. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/add.h +6 -5
  74. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/circular_buffer.h +0 -3
  75. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv.h +19 -20
  76. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/conv_test.h +8 -31
  77. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/depthwise_conv.h +8 -8
  78. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/ethosu.h +1 -1
  79. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/fully_connected.h +9 -9
  80. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_runner.h +14 -9
  81. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/kernel_util.h +9 -4
  82. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/micro_ops.h +119 -100
  83. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/mul.h +4 -4
  84. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/pooling.h +8 -8
  85. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reduce.h +4 -4
  86. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/reshape.h +26 -0
  87. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/softmax.h +12 -16
  88. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/strided_slice.h +40 -0
  89. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/svdf.h +8 -7
  90. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.h +5 -5
  91. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa.h +2 -2
  92. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_conv.h +26 -21
  93. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_depthwise_conv.h +4 -4
  94. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h +2 -4
  95. xmos_ai_tools/runtime/include/tensorflow/lite/micro/kernels/xtensa/xtensa_softmax.h +2 -2
  96. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +5 -0
  97. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +4 -0
  98. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/micro_memory_planner.h +4 -0
  99. xmos_ai_tools/runtime/include/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h +4 -0
  100. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_allocator.h +23 -8
  101. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_common.h +38 -0
  102. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_context.h +23 -65
  103. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_graph.h +15 -57
  104. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter.h +16 -5
  105. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_context.h +125 -0
  106. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_interpreter_graph.h +110 -0
  107. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_log.h +6 -8
  108. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_mutable_op_resolver.h +114 -32
  109. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_op_resolver.h +6 -5
  110. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_profiler.h +1 -1
  111. xmos_ai_tools/runtime/include/tensorflow/lite/micro/mock_micro_graph.h +1 -1
  112. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/python_ops_resolver.h +21 -0
  113. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helper_custom_ops.h +3 -4
  114. xmos_ai_tools/runtime/include/tensorflow/lite/micro/test_helpers.h +28 -12
  115. xmos_ai_tools/runtime/include/tensorflow/lite/micro/testing/micro_test.h +1 -0
  116. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h +273 -0
  117. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h +41 -0
  118. xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h +127 -0
  119. xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h +9139 -5010
  120. xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a +0 -0
  121. xmos_ai_tools/runtime/lib/libxtflitemicro.a +0 -0
  122. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib +0 -0
  123. xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib +0 -0
  124. {xmos_ai_tools-1.1.2.dev216.data → xmos_ai_tools-1.1.2.dev236.data}/data/bin/xcore-opt +0 -0
  125. {xmos_ai_tools-1.1.2.dev216.dist-info → xmos_ai_tools-1.1.2.dev236.dist-info}/METADATA +3 -4
  126. {xmos_ai_tools-1.1.2.dev216.dist-info → xmos_ai_tools-1.1.2.dev236.dist-info}/RECORD +128 -105
  127. {xmos_ai_tools-1.1.2.dev216.dist-info → xmos_ai_tools-1.1.2.dev236.dist-info}/WHEEL +1 -1
  128. xmos_ai_tools/runtime/include/tensorflow/lite/core/api/op_resolver.h +0 -129
  129. xmos_ai_tools/runtime/include/tensorflow/lite/micro/all_ops_resolver.h +0 -38
  130. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/audio_provider.h +0 -44
  131. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/command_responder.h +0 -30
  132. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/feature_provider.h +0 -50
  133. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h +0 -30
  134. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h +0 -43
  135. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h +0 -29
  136. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h +0 -29
  137. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/recognize_commands.h +0 -151
  138. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h +0 -29
  139. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.h +0 -29
  140. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_model_settings.h +0 -43
  141. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h +0 -29
  142. xmos_ai_tools/runtime/include/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_simple_features_data.h +0 -23
  143. xmos_ai_tools/runtime/include/tensorflow/lite/micro/micro_string.h +0 -33
  144. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/interpreter_wrapper.h +0 -51
  145. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/pybind11_lib.h +0 -64
  146. xmos_ai_tools/runtime/include/tensorflow/lite/micro/python/interpreter/src/shared_library.h +0 -40
  147. {xmos_ai_tools-1.1.2.dev216.dist-info → xmos_ai_tools-1.1.2.dev236.dist-info}/top_level.txt +0 -0
@@ -1,129 +0,0 @@
1
- /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
- #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
16
- #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
17
-
18
- #include <functional>
19
- #include <memory>
20
- #include <vector>
21
-
22
- #include "tensorflow/lite/core/api/error_reporter.h"
23
- #include "tensorflow/lite/core/c/common.h"
24
- #include "tensorflow/lite/schema/schema_generated.h"
25
-
26
- namespace tflite {
27
-
28
- /// Abstract interface that returns TfLiteRegistrations given op codes or custom
29
- /// op names. This is the mechanism that ops being referenced in the flatbuffer
30
- /// model are mapped to executable function pointers (TfLiteRegistrations).
31
- class OpResolver {
32
- public:
33
- /// Finds the op registration for a builtin operator by enum code.
34
- virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
35
- int version) const = 0;
36
- /// Finds the op registration of a custom operator by op name.
37
- virtual const TfLiteRegistration* FindOp(const char* op,
38
- int version) const = 0;
39
-
40
- // Represents a sequence of delegates.
41
- using TfLiteDelegatePtrVector =
42
- std::vector<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>>;
43
-
44
- // Returns optional delegates for resolving and handling ops in the flatbuffer
45
- // model. This may be used in addition to the standard TfLiteRegistration
46
- // lookup for graph resolution.
47
- // WARNING: This API is deprecated, GetDelegateCreators is preferred.
48
- virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const {
49
- return {};
50
- }
51
-
52
- // Represents a function that creates a TfLite delegate instance.
53
- using TfLiteDelegateCreator =
54
- std::function<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>(
55
- TfLiteContext* /*context*/)>;
56
-
57
- // Represents a sequence of delegate creator functions.
58
- using TfLiteDelegateCreators = std::vector<TfLiteDelegateCreator>;
59
-
60
- // Returns a vector of delegate creators to create optional delegates for
61
- // resolving and handling ops in the flatbuffer model. This may be used in
62
- // addition to the standard TfLiteRegistration lookup for graph resolution.
63
- //
64
- // Note that this method is not used (will not be called) if you are using
65
- // TF Lite in Google Play Services; the GetOpaqueDelegateCreators method
66
- // (see below) is used for that case.
67
- virtual TfLiteDelegateCreators GetDelegateCreators() const { return {}; }
68
-
69
- // TODO(b/202712825): it would be nice if we could avoid the need for separate
70
- // "opaque" types & methods for use only with TF Lite in Google Play Services.
71
-
72
- // Represents an opaque delegate instance.
73
- // WARNING: Experimental interface, subject to change.
74
- using TfLiteOpaqueDelegatePtr =
75
- std::unique_ptr<TfLiteOpaqueDelegate, void (*)(TfLiteOpaqueDelegate*)>;
76
-
77
- // Represents a function that creates an opaque delegate instance.
78
- // WARNING: Experimental interface, subject to change.
79
- using TfLiteOpaqueDelegateCreator =
80
- std::function<TfLiteOpaqueDelegatePtr(int /*num_threads*/)>;
81
-
82
- // Represents a sequence of opaque delegate creator functions.
83
- // WARNING: Experimental interface, subject to change.
84
- using TfLiteOpaqueDelegateCreators = std::vector<TfLiteOpaqueDelegateCreator>;
85
-
86
- // Returns a vector of opaque delegate creators to create optional opaque
87
- // delegates for resolving and handling ops in the flatbuffer model. This may
88
- // be used in addition to the standard TfLiteRegistration lookup for graph
89
- // resolution.
90
- //
91
- // Note that this method will be called only if you are using TF Lite in
92
- // Google Play Services; if you are using regular TF Lite, GetDelegateCreators
93
- // (see above) is used instead.
94
- //
95
- // WARNING: Experimental interface, subject to change.
96
- virtual TfLiteOpaqueDelegateCreators GetOpaqueDelegateCreators() const {
97
- return {};
98
- }
99
-
100
- virtual ~OpResolver() {}
101
-
102
- private:
103
- /// Returns true if this OpResolver may contain any "user defined" ops.
104
- /// By "user defined" ops, we mean any op definitions other than those
105
- /// contained in tflite::ops::builtin::BuiltinOpResolver.
106
- ///
107
- /// If this method returns true, it doesn't necessarily mean that the
108
- /// OpResolver contains a user-defined op, just that the absence of
109
- /// user-defined ops can't be guaranteed.
110
- ///
111
- /// Note that "user-defined" ops are not the same as "custom" ops;
112
- /// BuiltinOpResolver may support certain "custom" ops, in addition to
113
- /// "builtin" ops, and may not support all of the "builtin" op enum values.
114
- virtual bool MayContainUserDefinedOps() const { return true; }
115
-
116
- friend class OpResolverInternal;
117
- };
118
-
119
- // Handles the logic for converting between an OperatorCode structure extracted
120
- // from a flatbuffer and information about a registered operator
121
- // implementation.
122
- TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode,
123
- const OpResolver& op_resolver,
124
- ErrorReporter* error_reporter,
125
- const TfLiteRegistration** registration);
126
-
127
- } // namespace tflite
128
-
129
- #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
@@ -1,38 +0,0 @@
1
- /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
- #ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
16
- #define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
17
-
18
- #include "tensorflow/lite/micro/compatibility.h"
19
- #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
20
-
21
- namespace tflite {
22
-
23
- // The magic number in the template parameter is the maximum number of ops that
24
- // can be added to AllOpsResolver. It can be increased if needed. And most
25
- // applications that care about the memory footprint will want to directly use
26
- // MicroMutableOpResolver and have an application specific template parameter.
27
- // The examples directory has sample code for this.
28
- class AllOpsResolver : public MicroMutableOpResolver<128> {
29
- public:
30
- AllOpsResolver();
31
-
32
- private:
33
- TF_LITE_REMOVE_VIRTUAL_DELETE
34
- };
35
-
36
- } // namespace tflite
37
-
38
- #endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_
@@ -1,44 +0,0 @@
1
- /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
17
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
18
-
19
- #include "tensorflow/lite/c/common.h"
20
-
21
- // This is an abstraction around an audio source like a microphone, and is
22
- // expected to return 16-bit PCM sample data for a given point in time. The
23
- // sample data itself should be used as quickly as possible by the caller, since
24
- // to allow memory optimizations there are no guarantees that the samples won't
25
- // be overwritten by new data in the future. In practice, implementations should
26
- // ensure that there's a reasonable time allowed for clients to access the data
27
- // before any reuse.
28
- // The reference implementation can have no platform-specific dependencies, so
29
- // it just returns an array filled with zeros. For real applications, you should
30
- // ensure there's a specialized implementation that accesses hardware APIs.
31
- TfLiteStatus GetAudioSamples(int start_ms, int duration_ms,
32
- int* audio_samples_size, int16_t** audio_samples);
33
-
34
- // Returns the time that audio data was last captured in milliseconds. There's
35
- // no contract about what time zero represents, the accuracy, or the granularity
36
- // of the result. Subsequent calls will generally not return a lower value, but
37
- // even that's not guaranteed if there's an overflow wraparound.
38
- // The reference implementation of this function just returns a constantly
39
- // incrementing value for each call, since it would need a non-portable platform
40
- // call to access time information. For real applications, you'll need to write
41
- // your own platform-specific implementation.
42
- int32_t LatestAudioTimestamp();
43
-
44
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
@@ -1,30 +0,0 @@
1
- /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- // Provides an interface to take an action based on an audio command.
17
-
18
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
19
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
20
-
21
- #include "tensorflow/lite/c/common.h"
22
-
23
- // Called every time the results of an audio recognition run are available. The
24
- // human-readable name of any recognized command is in the `found_command`
25
- // argument, `score` has the numerical confidence, and `is_new_command` is set
26
- // if the previous command was different to this one.
27
- void RespondToCommand(int32_t current_time, const char* found_command,
28
- uint8_t score, bool is_new_command);
29
-
30
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_COMMAND_RESPONDER_H_
@@ -1,50 +0,0 @@
1
- /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
17
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
18
-
19
- #include "tensorflow/lite/c/common.h"
20
-
21
- // Binds itself to an area of memory intended to hold the input features for an
22
- // audio-recognition neural network model, and fills that data area with the
23
- // features representing the current audio input, for example from a microphone.
24
- // The audio features themselves are a two-dimensional array, made up of
25
- // horizontal slices representing the frequencies at one point in time, stacked
26
- // on top of each other to form a spectrogram showing how those frequencies
27
- // changed over time.
28
- class FeatureProvider {
29
- public:
30
- // Create the provider, and bind it to an area of memory. This memory should
31
- // remain accessible for the lifetime of the provider object, since subsequent
32
- // calls will fill it with feature data. The provider does no memory
33
- // management of this data.
34
- FeatureProvider(int feature_size, int8_t* feature_data);
35
- ~FeatureProvider();
36
-
37
- // Fills the feature data with information from audio inputs, and returns how
38
- // many feature slices were updated.
39
- TfLiteStatus PopulateFeatureData(int32_t last_time_in_ms, int32_t time_in_ms,
40
- int* how_many_new_slices);
41
-
42
- private:
43
- int feature_size_;
44
- int8_t* feature_data_;
45
- // Make sure we don't try to use cached information if this is the first call
46
- // into the provider.
47
- bool is_first_run_;
48
- };
49
-
50
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
@@ -1,30 +0,0 @@
1
- /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
17
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
18
-
19
- #include "tensorflow/lite/c/common.h"
20
-
21
- // Sets up any resources needed for the feature generation pipeline.
22
- TfLiteStatus InitializeMicroFeatures();
23
-
24
- // Converts audio sample data into a more compact form that's appropriate for
25
- // feeding into a neural network.
26
- TfLiteStatus GenerateMicroFeatures(const int16_t* input, int input_size,
27
- int output_size, int8_t* output,
28
- size_t* num_samples_read);
29
-
30
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_FEATURES_GENERATOR_H_
@@ -1,43 +0,0 @@
1
- /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
17
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
18
-
19
- // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20
- // on the stack for our working memory.
21
-
22
- // The size of the input time series data we pass to the FFT to produce the
23
- // frequency information. This has to be a power of two, and since we're dealing
24
- // with 30ms of 16KHz inputs, which means 480 samples, this is the next value.
25
- constexpr int kMaxAudioSampleSize = 512;
26
- constexpr int kAudioSampleFrequency = 16000;
27
-
28
- // The following values are derived from values used during model training.
29
- // If you change the way you preprocess the input, update all these constants.
30
- constexpr int kFeatureSliceSize = 40;
31
- constexpr int kFeatureSliceCount = 49;
32
- constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount);
33
- constexpr int kFeatureSliceStrideMs = 20;
34
- constexpr int kFeatureSliceDurationMs = 30;
35
-
36
- // Variables for the model's output categories.
37
- constexpr int kSilenceIndex = 0;
38
- constexpr int kUnknownIndex = 1;
39
- // If you modify the output categories, you need to update the following values.
40
- constexpr int kCategoryCount = 4;
41
- extern const char* kCategoryLabels[kCategoryCount];
42
-
43
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_MICRO_MODEL_SETTINGS_H_
@@ -1,29 +0,0 @@
1
- /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- // This data was extracted from the larger feature data held in
17
- // no_features_data.cc and consists of the 29th spectrogram slice of 43 values.
18
- // This is the expected result of running the sample data in
19
- // no_30ms_sample_data.cc through the preprocessing pipeline.
20
-
21
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_
22
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_
23
-
24
- #include <cstdint>
25
-
26
- constexpr int g_no_feature_data_slice_size = 40;
27
- extern const int8_t g_no_feature_data_slice[];
28
-
29
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_
@@ -1,29 +0,0 @@
1
- /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- // This data was extracted from the larger feature data held in
17
- // no_micro_features_data.cc and consists of the 26th spectrogram slice of 40
18
- // values. This is the expected result of running the sample data in
19
- // yes_30ms_sample_data.cc through the preprocessing pipeline.
20
-
21
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_
22
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_
23
-
24
- #include <cstdint>
25
-
26
- constexpr int g_yes_feature_data_slice_size = 40;
27
- extern const int8_t g_yes_feature_data_slice[];
28
-
29
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_
@@ -1,151 +0,0 @@
1
- /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_
17
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_
18
-
19
- #include <cstdint>
20
-
21
- #include "tensorflow/lite/c/common.h"
22
- #include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
23
- #include "tensorflow/lite/micro/micro_log.h"
24
-
25
- // Partial implementation of std::dequeue, just providing the functionality
26
- // that's needed to keep a record of previous neural network results over a
27
- // short time period, so they can be averaged together to produce a more
28
- // accurate overall prediction. This doesn't use any dynamic memory allocation
29
- // so it's a better fit for microcontroller applications, but this does mean
30
- // there are hard limits on the number of results it can store.
31
- class PreviousResultsQueue {
32
- public:
33
- PreviousResultsQueue() : front_index_(0), size_(0) {}
34
-
35
- // Data structure that holds an inference result, and the time when it
36
- // was recorded.
37
- struct Result {
38
- Result() : time_(0), scores() {}
39
- Result(int32_t time, int8_t* input_scores) : time_(time) {
40
- for (int i = 0; i < kCategoryCount; ++i) {
41
- scores[i] = input_scores[i];
42
- }
43
- }
44
- int32_t time_;
45
- int8_t scores[kCategoryCount];
46
- };
47
-
48
- int size() { return size_; }
49
- bool empty() { return size_ == 0; }
50
- Result& front() { return results_[front_index_]; }
51
- Result& back() {
52
- int back_index = front_index_ + (size_ - 1);
53
- if (back_index >= kMaxResults) {
54
- back_index -= kMaxResults;
55
- }
56
- return results_[back_index];
57
- }
58
-
59
- void push_back(const Result& entry) {
60
- if (size() >= kMaxResults) {
61
- MicroPrintf("Couldn't push_back latest result, too many already!");
62
- return;
63
- }
64
- size_ += 1;
65
- back() = entry;
66
- }
67
-
68
- Result pop_front() {
69
- if (size() <= 0) {
70
- MicroPrintf("Couldn't pop_front result, none present!");
71
- return Result();
72
- }
73
- Result result = front();
74
- front_index_ += 1;
75
- if (front_index_ >= kMaxResults) {
76
- front_index_ = 0;
77
- }
78
- size_ -= 1;
79
- return result;
80
- }
81
-
82
- // Most of the functions are duplicates of dequeue containers, but this
83
- // is a helper that makes it easy to iterate through the contents of the
84
- // queue.
85
- Result& from_front(int offset) {
86
- if ((offset < 0) || (offset >= size_)) {
87
- MicroPrintf("Attempt to read beyond the end of the queue!");
88
- offset = size_ - 1;
89
- }
90
- int index = front_index_ + offset;
91
- if (index >= kMaxResults) {
92
- index -= kMaxResults;
93
- }
94
- return results_[index];
95
- }
96
-
97
- private:
98
- static constexpr int kMaxResults = 50;
99
- Result results_[kMaxResults];
100
-
101
- int front_index_;
102
- int size_;
103
- };
104
-
105
- // This class is designed to apply a very primitive decoding model on top of the
106
- // instantaneous results from running an audio recognition model on a single
107
- // window of samples. It applies smoothing over time so that noisy individual
108
- // label scores are averaged, increasing the confidence that apparent matches
109
- // are real.
110
- // To use it, you should create a class object with the configuration you
111
- // want, and then feed results from running a TensorFlow model into the
112
- // processing method. The timestamp for each subsequent call should be
113
- // increasing from the previous, since the class is designed to process a stream
114
- // of data over time.
115
- class RecognizeCommands {
116
- public:
117
- // labels should be a list of the strings associated with each one-hot score.
118
- // The window duration controls the smoothing. Longer durations will give a
119
- // higher confidence that the results are correct, but may miss some commands.
120
- // The detection threshold has a similar effect, with high values increasing
121
- // the precision at the cost of recall. The minimum count controls how many
122
- // results need to be in the averaging window before it's seen as a reliable
123
- // average. This prevents erroneous results when the averaging window is
124
- // initially being populated for example. The suppression argument disables
125
- // further recognitions for a set time after one has been triggered, which can
126
- // help reduce spurious recognitions.
127
- explicit RecognizeCommands(int32_t average_window_duration_ms = 1000,
128
- uint8_t detection_threshold = 200,
129
- int32_t suppression_ms = 1500,
130
- int32_t minimum_count = 3);
131
-
132
- // Call this with the results of running a model on sample data.
133
- TfLiteStatus ProcessLatestResults(const TfLiteTensor* latest_results,
134
- const int32_t current_time_ms,
135
- const char** found_command, uint8_t* score,
136
- bool* is_new_command);
137
-
138
- private:
139
- // Configuration
140
- int32_t average_window_duration_ms_;
141
- uint8_t detection_threshold_;
142
- int32_t suppression_ms_;
143
- int32_t minimum_count_;
144
-
145
- // Working variables
146
- PreviousResultsQueue previous_results_;
147
- const char* previous_top_label_;
148
- int32_t previous_top_label_time_;
149
- };
150
-
151
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_RECOGNIZE_COMMANDS_H_
@@ -1,29 +0,0 @@
1
- /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- // This data was extracted from the larger feature data held in
17
- // no_features_data.cc and consists of the 29th spectrogram slice of 43 values.
18
- // This is the expected result of running the sample data in
19
- // no_30ms_sample_data.cc through the preprocessing pipeline.
20
-
21
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_
22
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_
23
-
24
- #include <cstdint>
25
-
26
- constexpr int g_no_power_spectrum_data_size = 43;
27
- extern const uint8_t g_no_power_spectrum_data[];
28
-
29
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_
@@ -1,29 +0,0 @@
1
- /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- ==============================================================================*/
15
-
16
- #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_
17
- #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_
18
-
19
- #include "tensorflow/lite/c/common.h"
20
-
21
- // Converts audio sample data into a more compact form that's appropriate for
22
- // feeding into a neural network. There are reference implementations that use
23
- // both floating point and fixed point available, but because the calculations
24
- // involved can be time-consuming, it's recommended that you use or write
25
- // specialized versions for your platform.
26
- TfLiteStatus GenerateSimpleFeatures(const int16_t* input, int input_size,
27
- int output_size, uint8_t* output);
28
-
29
- #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_SIMPLE_FEATURES_GENERATOR_H_