ai-edge-torch-nightly 0.5.0.dev20250506__py3-none-any.whl → 0.5.0.dev20250507__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -201,7 +201,7 @@ class Decoder(nn.Module):
201
201
  torch.zeros_like(sliding_mask_bool, dtype=torch.float),
202
202
  torch.full_like(
203
203
  sliding_mask_bool,
204
- self.config.get_causal_mask_value(),
204
+ self.config.causal_mask_value,
205
205
  dtype=torch.float,
206
206
  ),
207
207
  )
@@ -219,7 +219,7 @@ class Decoder(nn.Module):
219
219
  mask = torch.logical_and(mask, pixel_mask)
220
220
  else:
221
221
  mask = torch.logical_or(mask, pixel_mask)
222
- mask = torch.where(mask, 0, self.config.get_causal_mask_value())
222
+ mask = torch.where(mask, 0, self.config.causal_mask_value)
223
223
  return mask
224
224
 
225
225
  def build_pixel_mask(self, image_indices: torch.Tensor):
@@ -251,5 +251,5 @@ class ModelConfig:
251
251
  return self.block_configs[idx]
252
252
 
253
253
  @property
254
- def get_causal_mask_value(self) -> float:
254
+ def causal_mask_value(self) -> float:
255
255
  return self.block_config(0).attn_config.causal_mask_value
@@ -23,6 +23,7 @@ class Dtype(enum.Enum):
23
23
  FP32 = enum.auto()
24
24
  FP16 = enum.auto()
25
25
  INT8 = enum.auto()
26
+ INT4 = enum.auto()
26
27
 
27
28
 
28
29
  @enum.unique
@@ -66,3 +67,4 @@ class Granularity(enum.Enum):
66
67
 
67
68
  NONE = enum.auto()
68
69
  CHANNELWISE = enum.auto()
70
+ BLOCKWISE = enum.auto()
@@ -36,6 +36,7 @@ class LayerQuantRecipe:
36
36
  mode: Type of quantization.
37
37
  algorithm: Algorithm for calculating quantization parameters.
38
38
  granularity: Granularity of quantization.
39
+ block_size: Size of the block for blockwise quantization.
39
40
  """
40
41
 
41
42
  activation_dtype: quant_attrs.Dtype
@@ -43,15 +44,18 @@ class LayerQuantRecipe:
43
44
  mode: quant_attrs.Mode
44
45
  algorithm: quant_attrs.Algorithm
45
46
  granularity: quant_attrs.Granularity
47
+ block_size: int = 0
46
48
 
47
49
  def __str__(self):
48
- return (
50
+ base_str = (
49
51
  f'(a:{self.activation_dtype.name}, '
50
52
  f'w:{self.weight_dtype.name}, '
51
53
  f'{self.mode.name}, '
52
54
  f'{self.algorithm.name}, '
53
- f'{self.granularity.name})'
55
+ f'{self.granularity.name}'
56
+ f'{self.block_size}'
54
57
  )
58
+ return f'{base_str})'
55
59
 
56
60
  __repr__ = __str__
57
61
 
@@ -70,6 +74,16 @@ class LayerQuantRecipe:
70
74
  and self.algorithm == supported[3]
71
75
  and self.granularity == supported[4]
72
76
  ):
77
+ if self.block_size > 0:
78
+ if (
79
+ self.block_size % 32 == 0
80
+ and self.granularity == quant_attrs.Granularity.BLOCKWISE
81
+ ):
82
+ is_valid = True
83
+ break
84
+ else:
85
+ is_valid = False
86
+ break
73
87
  is_valid = True
74
88
  break
75
89
 
@@ -60,3 +60,16 @@ def create_layer_quant_fp16() -> quant_recipe.LayerQuantRecipe:
60
60
  algorithm=quant_attrs.Algorithm.FLOAT_CAST,
61
61
  granularity=quant_attrs.Granularity.NONE,
62
62
  )
63
+
64
+
65
+ def create_layer_quant_int4_dynamic_block(
66
+ block_size: int,
67
+ ) -> quant_recipe.LayerQuantRecipe:
68
+ return quant_recipe.LayerQuantRecipe(
69
+ activation_dtype=quant_attrs.Dtype.FP32,
70
+ weight_dtype=quant_attrs.Dtype.INT4,
71
+ mode=quant_attrs.Mode.DYNAMIC_RANGE,
72
+ algorithm=quant_attrs.Algorithm.MIN_MAX,
73
+ granularity=quant_attrs.Granularity.BLOCKWISE,
74
+ block_size=block_size,
75
+ )
@@ -54,3 +54,15 @@ def full_fp16_recipe() -> quant_config.QuantConfig:
54
54
  default=quant_recipe_utils.create_layer_quant_fp16()
55
55
  )
56
56
  )
57
+
58
+
59
+ def all_supported_int4_dynamic_block_recipe(
60
+ block_size: int,
61
+ ) -> quant_config.QuantConfig:
62
+ return quant_config.QuantConfig(
63
+ generative_recipe=quant_recipe.GenerativeQuantRecipe(
64
+ default=quant_recipe_utils.create_layer_quant_int4_dynamic_block(
65
+ block_size
66
+ )
67
+ )
68
+ )
@@ -29,4 +29,5 @@ def get_supported_layer_schemes():
29
29
  (_t.FP32, _t.INT8, _m.DYNAMIC_RANGE, _a.MIN_MAX, _g.CHANNELWISE),
30
30
  (_t.FP32, _t.INT8, _m.WEIGHT_ONLY, _a.MIN_MAX, _g.CHANNELWISE),
31
31
  (_t.FP32, _t.FP16, _m.WEIGHT_ONLY, _a.FLOAT_CAST, _g.NONE),
32
+ (_t.FP32, _t.INT4, _m.DYNAMIC_RANGE, _a.MIN_MAX, _g.BLOCKWISE),
32
33
  ]
@@ -40,6 +40,7 @@ class TestVerifyRecipes(parameterized.TestCase):
40
40
  (Dtype.INT8, Dtype.FP16),
41
41
  (Dtype.FP16, Dtype.INT8),
42
42
  (Dtype.FP16, Dtype.FP16),
43
+ (Dtype.FP16, Dtype.INT4),
43
44
  ])
44
45
  def test_verify_invalid_recipes(
45
46
  self,
@@ -74,6 +75,14 @@ class TestVerifyRecipes(parameterized.TestCase):
74
75
  Algorithm.FLOAT_CAST,
75
76
  Granularity.NONE,
76
77
  ),
78
+ (
79
+ Dtype.FP32,
80
+ Dtype.INT4,
81
+ Mode.DYNAMIC_RANGE,
82
+ Algorithm.MIN_MAX,
83
+ Granularity.BLOCKWISE,
84
+ 32,
85
+ ),
77
86
  ])
78
87
  def test_verify_valid_recipes(
79
88
  self,
@@ -82,6 +91,7 @@ class TestVerifyRecipes(parameterized.TestCase):
82
91
  mode,
83
92
  algo,
84
93
  granularity,
94
+ block_size=None,
85
95
  ):
86
96
  quant_recipe.LayerQuantRecipe(
87
97
  activation, weight, mode, algo, granularity
@@ -150,6 +160,33 @@ class TestQuantizeConvert(parameterized.TestCase):
150
160
  "Quantized model isn't smaller than F32 model.",
151
161
  )
152
162
 
163
+ def test_quantize_convert_toy_blockwise(self):
164
+ config = toy_model.get_model_config()
165
+ pytorch_model = toy_model.ToySingleLayerModel(config)
166
+ idx = torch.unsqueeze(torch.arange(0, 100, dtype=torch.int), 0)
167
+ input_pos = torch.arange(0, 100, dtype=torch.int)
168
+ quant_config = quant_recipes.all_supported_int4_dynamic_block_recipe(32)
169
+ quantized_model = ai_edge_torch.convert(
170
+ pytorch_model, (idx, input_pos), quant_config=quant_config
171
+ )
172
+ float_model = ai_edge_torch.convert(pytorch_model, (idx, input_pos))
173
+ self.assertLess(
174
+ len(quantized_model._tflite_model),
175
+ len(float_model._tflite_model),
176
+ "Quantized model isn't smaller than F32 model.",
177
+ )
178
+
179
+ def test_unsupported_block_size(self):
180
+ config = toy_model.get_model_config()
181
+ pytorch_model = toy_model.ToySingleLayerModel(config)
182
+ idx = torch.unsqueeze(torch.arange(0, 100, dtype=torch.int), 0)
183
+ input_pos = torch.arange(0, 100, dtype=torch.int)
184
+ self.assertRaises(
185
+ ValueError,
186
+ quant_recipes.all_supported_int4_dynamic_block_recipe,
187
+ 36,
188
+ )
189
+
153
190
  def test_quantize_convert_compare_toy(self):
154
191
  self.skipTest("b/338288901")
155
192
  config = toy_model_with_kv_cache.get_model_config()
@@ -243,15 +243,13 @@ def _export_helper(
243
243
 
244
244
  prefill_masks = None
245
245
  if flags.FLAGS.mask_as_input:
246
- prefill_masks = [
247
- _build_mask(
248
- flags.FLAGS.prefill_seq_lens,
249
- flags.FLAGS.kv_cache_max_len,
250
- config.get_causal_mask_value(),
251
- )
252
- ]
253
-
254
- if prefill_masks:
246
+ prefill_masks = _build_mask(
247
+ flags.FLAGS.prefill_seq_lens,
248
+ flags.FLAGS.kv_cache_max_len,
249
+ config.causal_mask_value,
250
+ )
251
+ if not isinstance(prefill_masks, list):
252
+ prefill_masks = [prefill_masks]
255
253
  assert len(prefill_masks) == len(prefill_seq_lens)
256
254
 
257
255
  decode_token = torch.tensor(
@@ -321,7 +319,7 @@ def _export_helper(
321
319
  # torch.triu(mask, diagonal=decode_position).unsqueeze(0).unsqueeze(0)
322
320
  #
323
321
  sample_kwargs['mask'] = _build_mask(
324
- 1, flags.FLAGS.kv_cache_max_len, config.get_causal_mask_value()
322
+ 1, flags.FLAGS.kv_cache_max_len, config.causal_mask_value
325
323
  )
326
324
  if lora is not None:
327
325
  sample_kwargs['lora'] = lora
@@ -13,9 +13,9 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- from ai_edge_quantizer import quantizer
17
16
  from ai_edge_torch.generative.quantize import quant_attrs
18
17
  from ai_edge_torch.generative.quantize import quant_recipe
18
+ from ai_edge_quantizer import quantizer
19
19
 
20
20
  _ComputePrecision = quantizer.qtyping.ComputePrecision
21
21
  _QuantGranularity = quantizer.qtyping.QuantGranularity
@@ -39,6 +39,8 @@ def _get_nbits_from_dtype(dtype: quant_attrs.Dtype) -> int:
39
39
  return 16
40
40
  elif dtype == quant_attrs.Dtype.INT8:
41
41
  return 8
42
+ elif dtype == quant_attrs.Dtype.INT4:
43
+ return 4
42
44
  raise ValueError('Unimplemented number of bits')
43
45
 
44
46
 
@@ -76,6 +78,8 @@ def _get_granularity(
76
78
  return _QuantGranularity.CHANNELWISE
77
79
  if granularity == quant_attrs.Granularity.NONE:
78
80
  return _QuantGranularity.TENSORWISE
81
+ if granularity == quant_attrs.Granularity.BLOCKWISE:
82
+ return _QuantGranularity.BLOCKWISE
79
83
  raise ValueError('Unimplemented granularity')
80
84
 
81
85
 
@@ -101,6 +105,7 @@ def _set_quant_config(
101
105
  symmetric=True,
102
106
  granularity=_get_granularity(layer_recipe.granularity),
103
107
  dtype=_get_dtype_from_dtype(layer_recipe.weight_dtype),
108
+ block_size=layer_recipe.block_size,
104
109
  ),
105
110
  compute_precision=_get_compute_precision_from_mode(layer_recipe.mode),
106
111
  explicit_dequantize=_get_explicit_dequant_from_mode(
ai_edge_torch/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- __version__ = "0.5.0.dev20250506"
16
+ __version__ = "0.5.0.dev20250507"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ai-edge-torch-nightly
3
- Version: 0.5.0.dev20250506
3
+ Version: 0.5.0.dev20250507
4
4
  Summary: Supporting PyTorch models with the Google AI Edge TFLite runtime.
5
5
  Home-page: https://github.com/google-ai-edge/ai-edge-torch
6
6
  Keywords: On-Device ML,AI,Google,TFLite,PyTorch,LLMs,GenAI
@@ -2,7 +2,7 @@ ai_edge_torch/__init__.py,sha256=8sPR_5uXJA4NEE0nIwNdSl-ADOJEoR8hAgYvBQDY70Y,120
2
2
  ai_edge_torch/_config.py,sha256=AiqhbcheF7j_ozIGDLC89k1we95aVgFDa-tR6h7UI0s,2529
3
3
  ai_edge_torch/conftest.py,sha256=r0GTrhMRhlmOGrrkvumHN8hkmyug6WvF60vWq8wRIBI,758
4
4
  ai_edge_torch/model.py,sha256=wxjSFq_rBSxSqbUE8E8EJTCkgvgaRLjq_ZuAM-IZpCU,5606
5
- ai_edge_torch/version.py,sha256=h2DvepK-jWas3mxk9s_2MbivYh_uy9-TIfaXJN7FlTY,706
5
+ ai_edge_torch/version.py,sha256=87Gl0LNfMSQT4gAxzJTQ7v7ZnE7ZMa1QhSbSsKLUIW8,706
6
6
  ai_edge_torch/_convert/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
7
7
  ai_edge_torch/_convert/conversion.py,sha256=QVugYVfbyaeBgSKKbhFzHG5oXA7t3M-40JcpcdSu6W8,5436
8
8
  ai_edge_torch/_convert/conversion_utils.py,sha256=Sr8qXVcTwc-ZnZmK7yxVrIOOp1S_vNrwzC0zUvLTI2o,2160
@@ -69,7 +69,7 @@ ai_edge_torch/generative/examples/gemma/verify_gemma2.py,sha256=jhiyinOqPt5ZZjEa
69
69
  ai_edge_torch/generative/examples/gemma/verify_util.py,sha256=n7f2nF6Lin_tDvPs0JVldsuaBzo7pAwi5YAHAhlIxQg,6139
70
70
  ai_edge_torch/generative/examples/gemma3/__init__.py,sha256=JaAnrFoXTl3RJX97XspklkTyqOHVyAgRJsZtzNDd10c,671
71
71
  ai_edge_torch/generative/examples/gemma3/convert_gemma3_to_tflite.py,sha256=MjkQDVynaw9C5z9ODzKfb85xW5JfxHUWBJ_Aco05FHo,1760
72
- ai_edge_torch/generative/examples/gemma3/decoder.py,sha256=fzLpuJO5JseQLA38Li-i9Xdnh9I4zdBWQEOeNbUEfjI,15737
72
+ ai_edge_torch/generative/examples/gemma3/decoder.py,sha256=xGxeNKQvgyrENmUQMu0uKymL3qthvbdoxdMbAzwiLz0,15725
73
73
  ai_edge_torch/generative/examples/gemma3/gemma3.py,sha256=GACDBI_MsFowR8A3wAWrpzradPYe-AUgB9ZjXaVBG-s,6485
74
74
  ai_edge_torch/generative/examples/gemma3/image_encoder.py,sha256=uRoLoBWzFtQz5wFZfPCxbkvZsgPAqSkUUsV3977GbYc,5184
75
75
  ai_edge_torch/generative/examples/gemma3/verify_gemma3.py,sha256=v8oNXFICmVOtQxfO7IhZ8GnbvotEkDi9lzYHjoQyOso,2464
@@ -165,7 +165,7 @@ ai_edge_torch/generative/layers/feed_forward.py,sha256=hdICat-8gW7-vxDAevJQ8NQ-m
165
165
  ai_edge_torch/generative/layers/feed_forward_test.py,sha256=8ZGy79BBpsyS6yKKDEKrDt249G5Mz-8VKWW7_WHx0u4,1655
166
166
  ai_edge_torch/generative/layers/kv_cache.py,sha256=b-7shzDaKexmvQF7P3SiAmIz4ZofjYWv3m5u71GojsA,10460
167
167
  ai_edge_torch/generative/layers/lora.py,sha256=hsvWLLOnW7HQ0AysOZu30x_cetMquDd1tjfyLz8HCSU,17892
168
- ai_edge_torch/generative/layers/model_config.py,sha256=dRZUMa71ADaEllu7TfXUWTMHRCcMgvkFMYMzmeJi4G8,8576
168
+ ai_edge_torch/generative/layers/model_config.py,sha256=X_gjN5524DCDBNXsX5GrOBlkKM4UHzj_RfdCD0-VOxQ,8572
169
169
  ai_edge_torch/generative/layers/normalization.py,sha256=MbwH-n80Fob5YvjBzdqDjBizMHLzSJGYRDdbD-rL5C0,6174
170
170
  ai_edge_torch/generative/layers/rotary_position_embedding.py,sha256=975zR202MdIrILJ7blceAcxrNqX1ZCN0ECKG1gz-bV8,2655
171
171
  ai_edge_torch/generative/layers/scaled_dot_product_attention.py,sha256=2_AgwENsaOgaxgiSqgoj0V0JzQ09dFtP_nBhX-lJK2g,5648
@@ -177,11 +177,11 @@ ai_edge_torch/generative/layers/unet/builder.py,sha256=zAqWXdimmMrQRhmE_t9XkS68m
177
177
  ai_edge_torch/generative/layers/unet/model_config.py,sha256=pPDwLawc23pfMaPVyMJlYmxVVusjMvx-l8wBwOYOH-c,9692
178
178
  ai_edge_torch/generative/quantize/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
179
179
  ai_edge_torch/generative/quantize/example.py,sha256=1lfVNUd2cEyRUnoZ7BLbRJ9IN-FTKiWBtZNPFUzAiWE,1747
180
- ai_edge_torch/generative/quantize/quant_attrs.py,sha256=n1Fm8BFC8gJa_oiwwAOOghJyHtOXYZ4q-5ZRy4pHrIw,1957
181
- ai_edge_torch/generative/quantize/quant_recipe.py,sha256=tKnuJq6hPD23JPCB9nPAlE1UHAwdbChkgPShiVaz4CE,5156
182
- ai_edge_torch/generative/quantize/quant_recipe_utils.py,sha256=4fgmP_GgeiFUOkIaC9ZZXC12eO3DQZdrWDXRz5YXiwU,2270
183
- ai_edge_torch/generative/quantize/quant_recipes.py,sha256=0Kvr_o7pbMnE8VMe6Ml0FBxkHM6RJ3C14B2I1mjItjc,2030
184
- ai_edge_torch/generative/quantize/supported_schemes.py,sha256=FjdycEOvxRgBmQdZVufetPvkDoD7rUowIOSKV9oV5Kk,1418
180
+ ai_edge_torch/generative/quantize/quant_attrs.py,sha256=plMsd7JBi98r2NHsAdMdvS6TPTXAoRFLCwOXu8H3-24,2004
181
+ ai_edge_torch/generative/quantize/quant_recipe.py,sha256=3xT4N5tfggXJqgwKW4ntIkwsrNVtkG2SIUHeiSF5yOs,5579
182
+ ai_edge_torch/generative/quantize/quant_recipe_utils.py,sha256=h3k_na6rbR08Ip79-2JbkeH8RDk_rrnEGiytuzFDhqc,2678
183
+ ai_edge_torch/generative/quantize/quant_recipes.py,sha256=a71KFHVbjJdBDpYshbUI69NxGNOmPuqp_NZvNSrf00c,2349
184
+ ai_edge_torch/generative/quantize/supported_schemes.py,sha256=TwR2FpQuBEORy6FshEyHNBMKARWlA2MVtTfX9tXV5aE,1488
185
185
  ai_edge_torch/generative/test/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
186
186
  ai_edge_torch/generative/test/test_custom_dus.py,sha256=MjIhTvkTko872M35XMciobvICcDWTcIDJ3rociko-wM,3267
187
187
  ai_edge_torch/generative/test/test_kv_cache.py,sha256=1sXN2RPntq0PP3IEy0NkvIbzQ0Y8JhPIwRSFwO9JLlE,5728
@@ -189,10 +189,10 @@ ai_edge_torch/generative/test/test_loader.py,sha256=9mQUeeZKOVApOWSWl2cN9c10axZj
189
189
  ai_edge_torch/generative/test/test_lora.py,sha256=6QIM6RLTc2HrodGpp_aS3OxM9Rco2KAzEnYgotkg41M,5310
190
190
  ai_edge_torch/generative/test/test_model_conversion.py,sha256=mhNJikLnGVGi9NKmXB8FhnqeDy9gtrvC3yEbrTABZ4Y,6163
191
191
  ai_edge_torch/generative/test/test_model_conversion_large.py,sha256=vQWmpzMkJ2hPmWpg41ZMWwBsngTykRVzRPHtpbkwiLM,12811
192
- ai_edge_torch/generative/test/test_quantize.py,sha256=bEJMhpQ9bIDUZVBXTW888728FcH-i3SyE4JSZZUgU0A,6071
192
+ ai_edge_torch/generative/test/test_quantize.py,sha256=TG6vTF9yOZWe2wW7v8-hmuaQoODwJC1Z-2d5xv3zgfI,7389
193
193
  ai_edge_torch/generative/test/utils.py,sha256=tF6aCfAGJnc9dmzCnZCEOuKNVimfWOqscv9og0DDLHU,2656
194
194
  ai_edge_torch/generative/utilities/__init__.py,sha256=-_jxnnFnCgnTU4oTm4MnRsvL5lqhomBNdFBbqfmfHPo,720
195
- ai_edge_torch/generative/utilities/converter.py,sha256=K1gZWPq5f3Z7f9USeJ_PphctO1dyYTNrWSJQ-cztgKA,11658
195
+ ai_edge_torch/generative/utilities/converter.py,sha256=d0JOWN5l2vbvt8RzFFiRoulkWiejyEZ21xKv5LdLIyc,11675
196
196
  ai_edge_torch/generative/utilities/export_config.py,sha256=5IvR3grlMd4mWO5c_Y4x9Fk1b1xa57MzlYNE8XUaN28,2049
197
197
  ai_edge_torch/generative/utilities/loader.py,sha256=7p__m2JryWphGlYOuRxdoT4id4_tWJEVOV7y2X4H-Ak,13737
198
198
  ai_edge_torch/generative/utilities/model_builder.py,sha256=ZYX1TxpFdj573du2QCyHJlFjx4q1m12R74fp4Gwl92A,6343
@@ -214,7 +214,7 @@ ai_edge_torch/lowertools/common_utils.py,sha256=4HQtquPZ6oiId8vR_1ykW_uK4ELnyo5z
214
214
  ai_edge_torch/lowertools/odml_torch_utils.py,sha256=QRuS7S5lULRWEh3J1sWIsnKh-rbX7rd9tt6JJHbMPfo,8317
215
215
  ai_edge_torch/lowertools/test_utils.py,sha256=mdxTlhqHABZEQ_GEmPFCL8LIAWtqRtYZUGdSY1ieZjw,1949
216
216
  ai_edge_torch/lowertools/torch_xla_utils.py,sha256=1EytIw2R6dthhLhf69wN1L9BaQTeybCD0wga-PhHcMI,9518
217
- ai_edge_torch/lowertools/translate_recipe.py,sha256=ymkBpFqAUiupRWqrPOWiVphKcXR1K5vHK0RjgBFtxlE,5652
217
+ ai_edge_torch/lowertools/translate_recipe.py,sha256=kUVCe69_DzvfbNYVB0MY2rCZwWaN8t3NoNu8Vh4x5bQ,5849
218
218
  ai_edge_torch/odml_torch/__init__.py,sha256=S8jOzE9nLof-6es3XDiGJRN-9H_XTxsVm9dE7lD3RWo,812
219
219
  ai_edge_torch/odml_torch/_torch_future.py,sha256=jSYHf1CMTJzMizPMbu2b39hAt0ZTR6gQLq67GMe9KTo,2336
220
220
  ai_edge_torch/odml_torch/_torch_library.py,sha256=Lw1gqL2HWNRspdTwNhIkYAHDyafHedHtkXyKKxn-Wss,805
@@ -251,8 +251,8 @@ ai_edge_torch/testing/__init__.py,sha256=_yGgvnBZWb7T3IN3mc4x1sS4vM96HZwM8pwIcPG
251
251
  ai_edge_torch/testing/export.py,sha256=k5mGDGzwc23Z4zaIVDs8CNh-oOt64gsf9MS9NjhbPy4,3293
252
252
  ai_edge_torch/testing/model_coverage/__init__.py,sha256=5P8J6Zk5YYtDvTBucFvB9NGSRI7Gw_24WnrbhXgycEE,765
253
253
  ai_edge_torch/testing/model_coverage/model_coverage.py,sha256=UPB448aMDUyC0HNYVqio2rcJPnDN0tBQMP08J6vPYew,4718
254
- ai_edge_torch_nightly-0.5.0.dev20250506.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
255
- ai_edge_torch_nightly-0.5.0.dev20250506.dist-info/METADATA,sha256=U800OYIqsYFtAziBGVh7GJ__RvSvmkHAzcttf1Z3vME,2051
256
- ai_edge_torch_nightly-0.5.0.dev20250506.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
257
- ai_edge_torch_nightly-0.5.0.dev20250506.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
258
- ai_edge_torch_nightly-0.5.0.dev20250506.dist-info/RECORD,,
254
+ ai_edge_torch_nightly-0.5.0.dev20250507.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
255
+ ai_edge_torch_nightly-0.5.0.dev20250507.dist-info/METADATA,sha256=YKm-Nzrn7i9780PIOL6mO3vLxeCaGhJ_C4gSHTOthYI,2051
256
+ ai_edge_torch_nightly-0.5.0.dev20250507.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
257
+ ai_edge_torch_nightly-0.5.0.dev20250507.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
258
+ ai_edge_torch_nightly-0.5.0.dev20250507.dist-info/RECORD,,