xmos-ai-tools 1.3.2.dev213__py3-none-macosx_10_15_universal2.whl → 1.3.2.dev232__py3-none-macosx_10_15_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -60,10 +60,9 @@ struct tflite_micro_objects;
60
60
  */
61
61
  typedef struct inference_engine {
62
62
  uint32_t *UNSAFE
63
- memory_primary; ///< Pointer to space for tensor arena and optional model
63
+ tensor_arena; ///< Pointer to space for tensor arena
64
64
  uint32_t *UNSAFE
65
- memory_secondary; ///< Pointer to secondary space. If null,
66
- // use the primary for model and tensor arena
65
+ external_memory; ///< Pointer to external memory
67
66
  uint32_t
68
67
  outputs; ///< Number of output tensors, initialised on loading a model.
69
68
  uint32_t inputs; ///< Number of input tensors, initialised on loading a model.
@@ -78,9 +77,9 @@ typedef struct inference_engine {
78
77
  uint32_t output_size; ///< Total size of all outputs - TODO: obsolete?
79
78
  uint32_t input_size; ///< Total size of all inputs - TODO: obsolete?
80
79
  uint32_t
81
- memory_primary_bytes; ///< Number of bytes available in primary memory
80
+ tensor_arena_size; ///< Number of bytes availabe to be used for tensor_arena
82
81
  uint32_t
83
- memory_secondary_bytes; ///< Number of bytes available in secondary memory
82
+ external_memory_size; ///< Number of bytes available in external memory
84
83
  uint32_t
85
84
  output_times_size; ///< Number of bytes available to store profiling data
86
85
  uint32_t operators_size; ///< ???
@@ -12,6 +12,7 @@ struct xc_context_config_t {
12
12
  int model_thread_count;
13
13
  thread_info_t thread_info;
14
14
  void *UNSAFE weights_data_ptr; // DDR ptr or channel to flash/tile server.
15
+ void *UNSAFE paging_ptr; // DDR ptr for paging in/out tensor arena.
15
16
  };
16
17
 
17
18
  #endif // XCORE_CONFIG_H_
@@ -38,6 +38,9 @@ constexpr const char *XC_mean_OpCode = "XC_mean";
38
38
  constexpr const char *XC_meani16_OpCode = "XC_meani16";
39
39
  constexpr const char *XC_expand_8_to_16_OpCode = "XC_expand_8_to_16";
40
40
  constexpr const char *XC_no_op_OpCode = "XC_no_op";
41
+ constexpr const char *XC_store_tensor_OpCode = "XC_store_tensor";
42
+ constexpr const char *XC_load_tensor_OpCode = "XC_load_tensor";
43
+
41
44
  // Binarized ops
42
45
  constexpr const char *XC_bsign_8_OpCode = "XC_bsign_8";
43
46
 
@@ -70,6 +73,9 @@ TFLMRegistration *Register_XC_mean();
70
73
  TFLMRegistration *Register_XC_meani16();
71
74
  TFLMRegistration *Register_XC_expand_8_to_16();
72
75
  TFLMRegistration *Register_XC_no_op();
76
+ TFLMRegistration *Register_XC_store_tensor();
77
+ TFLMRegistration *Register_XC_load_tensor();
78
+
73
79
  // Binarized ops
74
80
  TFLMRegistration *Register_XC_bsign_8();
75
81
 
@@ -8,15 +8,11 @@
8
8
  #include "tensorflow/lite/c/common.h"
9
9
  #include "tensorflow/lite/micro/kernels/kernel_util.h"
10
10
  #include "tensorflow/lite/micro/memory_helpers.h"
11
+ #include "tensorflow/lite/micro/micro_utils.h"
11
12
 
12
13
  namespace tflite_micro {
13
14
  namespace ops {
14
15
  namespace micro {
15
-
16
- struct XCoreOpData {
17
- const char *name;
18
- };
19
-
20
16
  namespace xcore {
21
17
  /* Unpack an integer data type from a byte array
22
18
  * T data type to unpack
@@ -58,7 +54,7 @@ static inline TfLiteStatus request_scratch_if_needed(TfLiteContext *context,
58
54
  static inline TfLiteStatus request_scratch_if_needed(TfLiteContext *context,
59
55
  const TfLiteTensor *tensor,
60
56
  int &scratch_idx) {
61
- return request_scratch_if_needed(context, tensor->data.data, tensor->bytes,
57
+ return request_scratch_if_needed(context, tensor->data.data, tflite_micro::EvalTensorBytes((const TfLiteEvalTensor*)tensor),
62
58
  scratch_idx);
63
59
  }
64
60
 
@@ -21,7 +21,7 @@ limitations under the License.
21
21
  namespace tflite_micro {
22
22
 
23
23
  // Resets a variable tensor to the default value.
24
- TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor);
24
+ TfLiteStatus ResetVariableTensor(TfLiteContext* context, TfLiteTensor* tensor);
25
25
 
26
26
  } // namespace tflite_micro
27
27
 
@@ -47,6 +47,7 @@ limitations under the License.
47
47
  #include <stdbool.h>
48
48
  #include <stddef.h>
49
49
  #include <stdint.h>
50
+ #include <assert.h>
50
51
 
51
52
  #include "tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export
52
53
 
@@ -590,21 +591,6 @@ typedef struct TfLiteTensor {
590
591
  // Quantization information.
591
592
  TfLiteQuantizationParams params;
592
593
 
593
- // The number of bytes required to store the data of this Tensor. I.e.
594
- // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
595
- // type is kTfLiteFloat32 and dims = {3, 2} then
596
- // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
597
- size_t bytes;
598
-
599
- // How memory is mapped
600
- // kTfLiteMmapRo: Memory mapped read only.
601
- // i.e. weights
602
- // kTfLiteArenaRw: Arena allocated read write memory
603
- // (i.e. temporaries, outputs).
604
- TfLiteAllocationType allocation_type;
605
-
606
- // True if the tensor is a variable.
607
- bool is_variable;
608
594
  } TfLiteTensor;
609
595
 
610
596
  // Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains
@@ -906,6 +892,16 @@ typedef struct TfLiteContext {
906
892
  TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
907
893
  int tensor_idx);
908
894
 
895
+ // TODO
896
+ // New interface to avoid storing an is_variable bool
897
+ bool (*IsVariableTensor)(struct TfLiteContext* context,
898
+ TfLiteTensor* tensor);
899
+
900
+ // TODO
901
+ // New interface to avoid storing allocation type in TfLiteTensor
902
+ bool (*IsConstantTensor)(struct TfLiteContext* context,
903
+ TfLiteTensor* tensor);
904
+
909
905
  // Retrieves named metadata buffer from the TFLite model.
910
906
  // Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer
911
907
  // Model: that is, there exists a `metadata` entry with given `name` string.
@@ -1488,7 +1484,9 @@ TfLiteStatus TfLiteTensorVariantRealloc(TfLiteTensor* t,
1488
1484
  new_vd = new VariantType(std::forward<VariantArgs>(args)...);
1489
1485
  }
1490
1486
  t->data.data = static_cast<VariantData*>(new_vd);
1491
- t->allocation_type = kTfLiteVariantObject;
1487
+
1488
+ assert(false && "Disabled function!");
1489
+ // t->allocation_type = kTfLiteVariantObject;
1492
1490
  return kTfLiteOk;
1493
1491
  }
1494
1492
 
@@ -16,6 +16,7 @@ limitations under the License.
16
16
  #define TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
17
17
 
18
18
  #include <stdint.h>
19
+ #include <assert.h>
19
20
 
20
21
  #include <limits>
21
22
  #ifndef TF_LITE_STATIC_MEMORY
@@ -200,19 +201,27 @@ inline int64_t NumElements(const TfLiteTensor* t) {
200
201
  // persistent-read-only, which would be useful for most tensor kernels that
201
202
  // are potentially dynamic based on the input tensor value availability at the
202
203
  // time of prepare.
203
- inline bool IsConstantTensor(const TfLiteTensor* tensor) {
204
+ inline bool IsConstantTensor(TfLiteContext *context, TfLiteTensor* tensor) {
205
+ #ifndef NO_INTERPRETER
204
206
  return tensor->allocation_type == kTfLiteMmapRo;
207
+ #else
208
+ return context->IsConstantTensor(context, tensor);
209
+ #endif
205
210
  }
206
211
 
207
212
  inline bool IsConstantOrPersistentTensor(const TfLiteTensor* tensor) {
208
- return IsConstantTensor(tensor) ||
209
- (tensor->allocation_type == kTfLitePersistentRo);
213
+ assert(false && "Disabled function!");
214
+ return false;
215
+ // return IsConstantTensor(tensor) ||
216
+ // (tensor->allocation_type == kTfLitePersistentRo);
210
217
  }
211
218
 
212
219
  // Determines whether tensor is dynamic. Note that a tensor can be non-const and
213
220
  // not dynamic. This function specifically checks for a dynamic tensor.
214
221
  inline bool IsDynamicTensor(const TfLiteTensor* tensor) {
215
- return tensor->allocation_type == kTfLiteDynamic;
222
+ assert(false && "Disabled function!");
223
+ return false;
224
+ // return tensor->allocation_type == kTfLiteDynamic;
216
225
  }
217
226
  #ifndef TF_LITE_STATIC_MEMORY
218
227
  // Sets tensor to dynamic.
@@ -7689,7 +7689,7 @@ struct Conv2DOptionsT : public flatbuffers::NativeTable {
7689
7689
  tflite_micro::ActivationFunctionType fused_activation_function = tflite_micro::ActivationFunctionType_NONE;
7690
7690
  int32_t dilation_w_factor = 1;
7691
7691
  int32_t dilation_h_factor = 1;
7692
- tflite_micro::TensorType quantized_bias_type = tflite_micro::TensorType_FLOAT32;
7692
+ tflite_micro::TensorType quantized_bias_type = tflite_micro::TensorType_INT32;
7693
7693
  };
7694
7694
 
7695
7695
  struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
Binary file
@@ -57,6 +57,7 @@ class TFLMHostInterpreter:
57
57
  lib.new_interpreter.restype = ctypes.c_void_p
58
58
  lib.new_interpreter.argtypes = [
59
59
  ctypes.c_size_t,
60
+ ctypes.c_size_t,
60
61
  ]
61
62
 
62
63
  lib.print_memory_plan.restype = None
@@ -127,7 +128,7 @@ class TFLMHostInterpreter:
127
128
  running concurrently. Defaults to 0 for use with a single model.
128
129
  """
129
130
  max_model_size = 50000000
130
- self.obj = lib.new_interpreter(max_model_size)
131
+ self.obj = lib.new_interpreter(max_model_size, max_model_size)
131
132
  currentModel = None
132
133
 
133
134
  for model in self.models:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: xmos_ai_tools
3
- Version: 1.3.2.dev213
3
+ Version: 1.3.2.dev232
4
4
  Summary: XMOS AI Tools
5
5
  Home-page: https://github.com/xmos/ai_tools
6
6
  Author: XMOS
@@ -41,4 +41,4 @@ Dynamic: summary
41
41
  Documentation
42
42
  -------------
43
43
 
44
- Click [here](https://github.com/xmos/ai_tools/blob/a7702f071fc2738e6f26eb570e946089b3306c3c/README.md) for documentation on using xmos-ai-tools to deploy AI models on xcore.
44
+ Click [here](https://github.com/xmos/ai_tools/blob/24050bb7d969295eacb8b21cc5a4a4df778147a3/README.md) for documentation on using xmos-ai-tools to deploy AI models on xcore.
@@ -72,11 +72,11 @@ xmos_ai_tools/runtime/include/lib_nn/api/xs3a_registers.h,sha256=5i6Tue1VplRI2Vl
72
72
  xmos_ai_tools/runtime/include/lib_nn/src/asm/asm_constants.h,sha256=VB8pk_H4vI4S18xYfPFCOGZoLG6S0FYW5uVfG3u9glU,1018
73
73
  xmos_ai_tools/runtime/include/lib_nn/src/asm/window_op_plan.h,sha256=4I5u3jkbPOXQnj073h24346uJTYApf0A2oegNc0TKjc,704
74
74
  xmos_ai_tools/runtime/include/lib_tflite_micro/api/fast_flash.h,sha256=ob6pARd2fzBAAvy8oDt8oROHzuL6e8VMoKkxmIxGx-Q,2175
75
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h,sha256=GkmCI01aywc3docA7SV-11pSi89URw1ErxbaSkO9EIs,9303
75
+ xmos_ai_tools/runtime/include/lib_tflite_micro/api/inference_engine.h,sha256=BluOxaYHsrfx0x1j7gbyM989-3Ci4K6uIC0O4Pxrx4o,9204
76
76
  xmos_ai_tools/runtime/include/lib_tflite_micro/api/load_weights.h,sha256=nzQ7lodtCKUpqCKHJ6f15RzaG5T2RO4FmAk_xN2P3js,2309
77
77
  xmos_ai_tools/runtime/include/lib_tflite_micro/api/memory_parallel_transport.h,sha256=P6o4-yWfE3GW_R08zf_kTsg-h4589eAhg9lNvJA7ZCM,1932
78
78
  xmos_ai_tools/runtime/include/lib_tflite_micro/api/version.h,sha256=ImFxGU2PzLZ3TzGiB1V2Ghgd2yh0qWR7UI7ao-MrG6w,318
79
- xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h,sha256=6KOImWQXzY6FXIepK746QlkQllmCo3eH5FD3Vp1x7PQ,519
79
+ xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_config.h,sha256=gywR3hBJe05GTLkcdJss0bmk3dzsWivXK6U9FF7Pfm0,589
80
80
  xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_device_memory.h,sha256=bLWcRDNrzClLh8_eR3XRRz3sA2pEAzzxGLDoPTsNp8A,1917
81
81
  xmos_ai_tools/runtime/include/lib_tflite_micro/api/xcore_shared_config.h,sha256=qv3cxHGUHDxdR0xlfdd0qWDOd4V0vwPkmYEVka_j6xw,1015
82
82
  xmos_ai_tools/runtime/include/lib_tflite_micro/src/thread_call.h,sha256=KNG-3gNWMSonzYTGbk4L0kT8WU9JD0bOzlk8naz7JO0,4965
@@ -84,9 +84,9 @@ xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/conv2d_f
84
84
  xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_custom_options.h,sha256=lC4Tw1Pxxg3zOXRdqNNtokuU-_cX9TTkYmGLe47-9dQ,630
85
85
  xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_error_reporter.h,sha256=_NIzvBYMqlwJexYESP5t5JXpxYTt-ZKq-1AdqAB9-Sc,812
86
86
  xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_interpreter.h,sha256=-0BNn65tzxWgNnHLolCYyUYhboL9nN3ksKni3BjH6QU,1801
87
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h,sha256=sT8EJUFD_R57wpOEWcVD10xxvmmTcGuGWH61SzfGwoY,3299
87
+ xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_ops.h,sha256=rjqoujyrPet0PCpPaTJvaym9sAFXLxwFjulEvH6x_4c,3522
88
88
  xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_profiler.h,sha256=Ytqbj4TsbhZrtl42I2dgLyeloLi-1vZwjysIoOkgX9s,1239
89
- xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h,sha256=CkxEhyN7i2rmlk_ua18XH1XDV_E4mS2u3Ph48mIhN7M,4747
89
+ xmos_ai_tools/runtime/include/lib_tflite_micro/src/tflite-xcore-kernels/xcore_utils.h,sha256=_wUizDBPCj3R8dgSl6FRHa0lafcq3613ZGMlPcI4OvE,4797
90
90
  xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud.h,sha256=aQNbN6EvpVQm-OkgE_JGn2SeqYE_sKtwWZebu50IwnE,20701
91
91
  xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_conf_default.h,sha256=cFjrjT1T7X4BerX6mRRCQwBnwDPX2553q5AzYhvzrhc,251
92
92
  xmos_ai_tools/runtime/include/lib_xud/lib_xud/api/xud_device.h,sha256=iUS2WaT-BBZbcL4Geso3Kv154VNbVpWc1yExb2xNC60,3383
@@ -147,10 +147,10 @@ xmos_ai_tools/runtime/include/tensorflow/lite/c/common.h,sha256=8dWHvE3jaBjwFHY6
147
147
  xmos_ai_tools/runtime/include/tensorflow/lite/core/macros.h,sha256=8fZFtTWlWjx8sM70cDqu6ViXtGKoA3n4q3DR2_1Lg0M,2759
148
148
  xmos_ai_tools/runtime/include/tensorflow/lite/core/api/error_reporter.h,sha256=Sn9kDYtK-s_NOZ-B3l50293J5ghnsVlXNHxF-mRWKWw,2971
149
149
  xmos_ai_tools/runtime/include/tensorflow/lite/core/api/flatbuffer_conversions.h,sha256=Z6lV84mWXH1wVoKJ7GZmjaumKLA3n62YOuCHq2jH5dw,21293
150
- xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h,sha256=9kHs1PmX70Vz7JdbNZuG12Tph0dwKkhKr-tA3Ju6zoY,1028
150
+ xmos_ai_tools/runtime/include/tensorflow/lite/core/api/tensor_utils.h,sha256=TcScYjunqdbiFMovTscFqQYnFDmXyojfVDQFC23ANEk,1052
151
151
  xmos_ai_tools/runtime/include/tensorflow/lite/core/c/builtin_op_data.h,sha256=2YkHwO7KqG9aJtKCIhC5nI4FjNYgRHPa-zr6s46_F-0,16326
152
152
  xmos_ai_tools/runtime/include/tensorflow/lite/core/c/c_api_types.h,sha256=-XvGicMx_AKeTX-MZ5-7s_Pq8-4oY3gaRMjqY61eVAo,6328
153
- xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h,sha256=CgWZanWNv443Wfl-yXE1cibyX8QyQW5Nr3T0CU01ooE,68785
153
+ xmos_ai_tools/runtime/include/tensorflow/lite/core/c/common.h,sha256=QLv3R9NKc-mYmpZ_N0Yb9y_9c5bUzDlzwTGoPG0Fh8s,68679
154
154
  xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/bits.h,sha256=ECUf1QiZa_Cj8K807tCT0-24MSQgTBUE2znzWYegFaA,2916
155
155
  xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft.h,sha256=XXMKYjdmAn3CMmpRMcZb5uyMbJAPNKLQXXJqlHXqtQk,1381
156
156
  xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/fft_io.h,sha256=Uii9_PJ4ArpvGzHHDyG-FZHuMmIg3pRS8vOczAXHQkI,1210
@@ -175,7 +175,7 @@ xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/pca
175
175
  xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window.h,sha256=5MolgRHCsP-9nnAsuVht4JXFyGzKVwmhPBuF3HkCYHU,1476
176
176
  xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_io.h,sha256=gR7TO_YI6Mgu83A6imdEiJ4sNfGMjkGxLmCq0lb2lAo,1237
177
177
  xmos_ai_tools/runtime/include/tensorflow/lite/experimental/microfrontend/lib/window_util.h,sha256=0GnfFOwHjcxF9hxBmPwFDw_jdDmx_TwyhABGiljmJaQ,1565
178
- xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h,sha256=e0hZavKHQCLb2PX1PAZrdYgr6bDbhalTT_rnRkFCtd0,14255
178
+ xmos_ai_tools/runtime/include/tensorflow/lite/kernels/kernel_util.h,sha256=erdyPXvKxlh_pvhmqI1dj4_vbH7IOV1JxbgBkBIvrLM,14511
179
179
  xmos_ai_tools/runtime/include/tensorflow/lite/kernels/op_macros.h,sha256=BKqR9eM421HLoKn9hHIu6AbtQf5NZEVb-kS_OgvQL9E,1446
180
180
  xmos_ai_tools/runtime/include/tensorflow/lite/kernels/padding.h,sha256=N4MRLEoGkv_9x1STOr2q2NDciSnQgtm7kYUoXGsk4xE,5023
181
181
  xmos_ai_tools/runtime/include/tensorflow/lite/kernels/internal/common.h,sha256=BQiD0sqPn6qENyiqxuwpxbUvb7tEXivfKSc5r8GYx28,55506
@@ -377,19 +377,19 @@ xmos_ai_tools/runtime/include/tensorflow/lite/micro/tflite_bridge/micro_error_re
377
377
  xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/log_utils.h,sha256=os4fNRaYPa-stH2mtBa9MYXViwgDjwQQxyECSgEJjTQ,10059
378
378
  xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/metrics.h,sha256=FW4NtJK9SmirmtD5gkHsp_BlLbwLizDDVin1CeujAfc,1553
379
379
  xmos_ai_tools/runtime/include/tensorflow/lite/micro/tools/benchmarking/op_resolver.h,sha256=g0dl9tzUqngiINvjBlqDclFqvkC85MC4kbU13vE4OkY,6071
380
- xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h,sha256=OzGCvswrW_FArm-HxD70BExn0PVtBbHAFPDO6ZZOFtc,1093177
380
+ xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_generated.h,sha256=hSumwEqc-LmMHG82KKl1yPLiVMNbLj_q19-VRPCR7ng,1093175
381
381
  xmos_ai_tools/runtime/include/tensorflow/lite/schema/schema_utils.h,sha256=tkHMDPARjIqppYCVInIowwdHxjNP3pfSS9O7vx-ODeo,1333
382
- xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a,sha256=qchUREM_VBx0q_Is_JuenmCCCVL0NtIhhvO0xpwPed8,2463016
383
- xmos_ai_tools/runtime/lib/libxtflitemicro.a,sha256=6E27PKKB1hjYb80BPEC9cAD05Kj9IlcuIIERxSCfAj4,71502254
382
+ xmos_ai_tools/runtime/lib/libhost_xtflitemicro.a,sha256=-5Wh8DvSGKZM8yLfLNXaSMlHmN2-FO0OlBYGVNe3OS8,2468232
383
+ xmos_ai_tools/runtime/lib/libxtflitemicro.a,sha256=zMcXwpvNBEX-kIq_jBajm10WKhFMQdj_CTo3VIIzgCw,71830940
384
384
  xmos_ai_tools/xformer/__init__.py,sha256=a4K06oiSu2TYVwGvzwMDGGejPUYTvW9-5Uw6SfuFCX4,1903
385
385
  xmos_ai_tools/xformer/flash.py,sha256=MG4coi_Lvvg-oQmw1pomJD8eeOH4gAMjixjBFvO2BCk,6376
386
386
  xmos_ai_tools/xinterpreters/__init__.py,sha256=PFRB9VxOLKaA--j2ZvWGcmesv2C6uNYqJ_kBam68aUI,50
387
387
  xmos_ai_tools/xinterpreters/exceptions.py,sha256=HOjADxHYMPI9mN0YIbWxtw9hSeL2B6XWWwqtGtyJdVs,577
388
- xmos_ai_tools/xinterpreters/host_interpreter.py,sha256=rqoTcSWK7O9eboMsg0o2UYWapWTE5xiki7XTJZ_JbwQ,25664
389
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib,sha256=B19AOHjt8IxbZsg_f6idjbWllNHGCv6rrD3TSi9zFuk,2356904
390
- xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib,sha256=B19AOHjt8IxbZsg_f6idjbWllNHGCv6rrD3TSi9zFuk,2356904
391
- xmos_ai_tools-1.3.2.dev213.data/data/bin/xcore-opt,sha256=lihZetZGOatjzkjSTwPSJgxHPDXytwqjaObZbOd9U4A,285409352
392
- xmos_ai_tools-1.3.2.dev213.dist-info/METADATA,sha256=iq6kYYd5wgP_U4xcRpA5Ep-mu2WwEI931SnSnjPXK0Y,1574
393
- xmos_ai_tools-1.3.2.dev213.dist-info/WHEEL,sha256=d1tw4CznjypCP7WIuu-X_VtKtms3NEdUd9Roh13sxAM,112
394
- xmos_ai_tools-1.3.2.dev213.dist-info/top_level.txt,sha256=YWegea73ll3tMlRWRdHJemUy2VOuEYDdOIaffxu_eF0,14
395
- xmos_ai_tools-1.3.2.dev213.dist-info/RECORD,,
388
+ xmos_ai_tools/xinterpreters/host_interpreter.py,sha256=9oZVFVGs34dkBAyye-8pg1TTfCj8B1ZvbsBKB0HnBtY,25709
389
+ xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.1.0.1.dylib,sha256=snXJNzi2mM4-YB_5wbx2-C602nv_Jdfl_yqr9M5sZ30,2357816
390
+ xmos_ai_tools/xinterpreters/libs/macos/xtflm_python.dylib,sha256=snXJNzi2mM4-YB_5wbx2-C602nv_Jdfl_yqr9M5sZ30,2357816
391
+ xmos_ai_tools-1.3.2.dev232.data/data/bin/xcore-opt,sha256=IdAQD2i4_2Rj3XGzmc3JigXXCKZABPt0pPpdGMo_mqw,286222392
392
+ xmos_ai_tools-1.3.2.dev232.dist-info/METADATA,sha256=kQpVAtse6xSBNRR6FnCm-VIZIpAVRHEIb-dcDRrBEhk,1574
393
+ xmos_ai_tools-1.3.2.dev232.dist-info/WHEEL,sha256=HwB_UO1X29gO_-RNo4EXa9K4pIwQmN1aaAHIeI9Tfw8,112
394
+ xmos_ai_tools-1.3.2.dev232.dist-info/top_level.txt,sha256=YWegea73ll3tMlRWRdHJemUy2VOuEYDdOIaffxu_eF0,14
395
+ xmos_ai_tools-1.3.2.dev232.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (75.8.2)
3
3
  Root-Is-Purelib: false
4
4
  Tag: py3-none-macosx_10_15_universal2
5
5