ai-edge-quantizer-nightly 0.4.0.dev20250904__py3-none-any.whl → 0.4.0.dev20250906__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -123,6 +123,7 @@ MIN_MAX_OP_NAME_MATERIALIZE_FUNC_DICT = {
123
123
  _TFLOpName.GATHER: common_quantize.materialize_gather,
124
124
  _TFLOpName.HARD_SWISH: common_quantize.materialize_hard_swish,
125
125
  _TFLOpName.MAXIMUM: common_quantize.materialize_maximum,
126
+ _TFLOpName.PADV2: common_quantize.materialize_padv2,
126
127
  }
127
128
  for op_name, materialize_func in MIN_MAX_OP_NAME_MATERIALIZE_FUNC_DICT.items():
128
129
  register_quantized_op(
@@ -272,6 +273,7 @@ _OCTAV_OP_NAME_MATERIALIZE_FUNC_DICT = immutabledict({
272
273
  _TFLOpName.GATHER: common_quantize.materialize_gather,
273
274
  _TFLOpName.HARD_SWISH: common_quantize.materialize_hard_swish,
274
275
  _TFLOpName.MAXIMUM: common_quantize.materialize_maximum,
276
+ _TFLOpName.PADV2: common_quantize.materialize_padv2,
275
277
  })
276
278
 
277
279
  for op_name, materialize_func in _OCTAV_OP_NAME_MATERIALIZE_FUNC_DICT.items():
@@ -712,7 +712,24 @@ def materialize_pad(
712
712
  tensor_name_to_qsv,
713
713
  get_tensor_quant_params_fn,
714
714
  constraint=_OpQuantConstraint.SAME_AS_INPUT_SCALE,
715
- inputs_to_ignore=[1], # Padding value does not need to be quantized.
715
+ inputs_to_ignore=[1], # Paddings tensor does not need to be quantized.
716
+ )
717
+
718
+
719
+ def materialize_padv2(
720
+ get_tensor_quant_params_fn: qtyping.GetTensorQuantParamsFuncSignature,
721
+ op_info: qtyping.OpInfo,
722
+ graph_info: qtyping.GraphInfo,
723
+ tensor_name_to_qsv: dict[str, Any],
724
+ ) -> list[qtyping.TensorTransformationParams]:
725
+ """Materialize tensors in tfl.padv2."""
726
+ return common_utils.materialize_standard_op(
727
+ op_info,
728
+ graph_info,
729
+ tensor_name_to_qsv,
730
+ get_tensor_quant_params_fn,
731
+ constraint=_OpQuantConstraint.SAME_AS_OUTPUT_SCALE,
732
+ inputs_to_ignore=[1], # Paddings tensor does not need to be quantized.
716
733
  )
717
734
 
718
735
 
@@ -41,6 +41,7 @@ _DRQ_OR_WEIGHT_ONLY_OPS = frozenset([
41
41
 
42
42
  _SUPPORTED_SUBCHANNEL_OPS = frozenset([
43
43
  _TFLOpName.FULLY_CONNECTED,
44
+ _TFLOpName.EMBEDDING_LOOKUP,
44
45
  ])
45
46
 
46
47
 
@@ -259,6 +260,60 @@ def _get_single_tensor_params(
259
260
  )
260
261
 
261
262
 
263
+ def _materialize_tensors_with_quantized_data_update(
264
+ op_tensor_params: list[qtyping.TensorTransformationParams],
265
+ tensors: Sequence[Any],
266
+ quant_params: Optional[qtyping.UniformQuantParams],
267
+ is_inbounding_tensor: bool,
268
+ op_info: qtyping.OpInfo,
269
+ graph_info: qtyping.GraphInfo,
270
+ tensor_name_to_qsv: dict[str, Any],
271
+ get_tensor_quant_params_fn: qtyping.GetTensorQuantParamsFuncSignature,
272
+ ) -> None:
273
+ """Materialize a list of tensors with `quantized_data` updated when needed.
274
+
275
+ Args:
276
+ op_tensor_params: Tensor transformation parameters for the op. Will be
277
+ modified to include new tensor parameters.
278
+ tensors: Tensors to be materialized.
279
+ quant_params: The quantization parameters to be used for materialization.
280
+ is_inbounding_tensor: Whether the tensor is an inbounding tensor for the op.
281
+ op_info: Aggregated information about the op (e.g., quantization config).
282
+ graph_info: Graph information needed to perform quantization for the op.
283
+ tensor_name_to_qsv: A map of tensor name to quantization parameters.
284
+ get_tensor_quant_params_fn: Function to get quantization parameters for the
285
+ tensor.
286
+ """
287
+ if quant_params is not None and quant_params.quantized_data is not None:
288
+ quant_params = dataclasses.replace(quant_params, quantized_data=None)
289
+
290
+ for tensor in tensors:
291
+ tensor_data = tfl_flatbuffer_utils.get_tensor_data(
292
+ tensor, graph_info.buffers
293
+ )
294
+ if quant_params is None or tensor_data is None:
295
+ tensor_quant_params = quant_params
296
+ else:
297
+ # Constant tensors require updating `quantized_data`.
298
+ quantized_data = uniform_quantize_tensor.uniform_quantize(
299
+ tensor_data, quant_params
300
+ )
301
+ tensor_quant_params = dataclasses.replace(
302
+ quant_params,
303
+ quantized_data=quantized_data,
304
+ )
305
+ _materialize_op_tensors(
306
+ op_tensor_params,
307
+ [tensor],
308
+ is_inbounding_tensor=is_inbounding_tensor,
309
+ op_info=op_info,
310
+ graph_info=graph_info,
311
+ tensor_name_to_qsv=tensor_name_to_qsv,
312
+ get_tensor_quant_params_fn=get_tensor_quant_params_fn,
313
+ quant_params=tensor_quant_params,
314
+ )
315
+
316
+
262
317
  def _materialize_standard_op_with_same_as_input_scale(
263
318
  input_tensors: Sequence[Any],
264
319
  output_tensors: Sequence[Any],
@@ -293,45 +348,24 @@ def _materialize_standard_op_with_same_as_input_scale(
293
348
  get_tensor_quant_params_fn=get_tensor_quant_params_fn,
294
349
  )
295
350
  op_tensor_params.append(input_tensor_params)
296
- # Use input quantization params for all output tensors but without
297
- # quantized_data in case the input is a constant tensor.
298
- input_quant_params = dataclasses.replace(
299
- input_tensor_params.consumers[0].parameters,
300
- quantized_data=None,
301
- )
351
+ # Use input quantization params for all output tensors.
352
+ input_quant_params = input_tensor_params.consumers[0].parameters
302
353
  if not isinstance(input_quant_params, qtyping.UniformQuantParams):
303
354
  raise ValueError(
304
355
  "_materialize_standard_op_with_same_as_input_scale only supports"
305
356
  f" UniformQuantParams. For tensor {input_tensor_params.tensor_name},"
306
357
  f" got {type(input_quant_params)}"
307
358
  )
308
- # Materialize each of the output tensors separately in case there are
309
- # constants among them, requiring updating `quantized_data` first.
310
- for output_tensor in output_tensors:
311
- output_tensor_data = tfl_flatbuffer_utils.get_tensor_data(
312
- output_tensor, graph_info.buffers
313
- )
314
- # Quantize constant inputs' data with the output quantization params.
315
- if output_tensor_data is None:
316
- quant_params = input_quant_params
317
- else:
318
- quantized_data = uniform_quantize_tensor.uniform_quantize(
319
- output_tensor_data, input_quant_params
320
- )
321
- quant_params = dataclasses.replace(
322
- input_quant_params,
323
- quantized_data=quantized_data,
324
- )
325
- _materialize_op_tensors(
326
- op_tensor_params,
327
- [output_tensor],
328
- is_inbounding_tensor=False,
329
- op_info=op_info,
330
- graph_info=graph_info,
331
- tensor_name_to_qsv=tensor_name_to_qsv,
332
- get_tensor_quant_params_fn=get_tensor_quant_params_fn,
333
- quant_params=quant_params,
334
- )
359
+ _materialize_tensors_with_quantized_data_update(
360
+ op_tensor_params,
361
+ output_tensors,
362
+ input_quant_params,
363
+ is_inbounding_tensor=False,
364
+ op_info=op_info,
365
+ graph_info=graph_info,
366
+ tensor_name_to_qsv=tensor_name_to_qsv,
367
+ get_tensor_quant_params_fn=get_tensor_quant_params_fn,
368
+ )
335
369
 
336
370
  # Change output qsv to be the same as input qsv. This is safe since TFL
337
371
  # subgraph is acyclic.
@@ -379,19 +413,26 @@ def _materialize_standard_op_with_same_as_output_scale(
379
413
  )
380
414
  # Use output quantization params for all input tensors.
381
415
  if output_tensor_params.producer is None:
382
- quant_params = None
416
+ output_quant_params = None
383
417
  else:
384
- quant_params = output_tensor_params.producer.parameters
385
- _materialize_op_tensors(
418
+ output_quant_params = output_tensor_params.producer.parameters
419
+ if not isinstance(output_quant_params, qtyping.UniformQuantParams):
420
+ raise ValueError(
421
+ "_materialize_standard_op_with_same_as_output_scale only supports"
422
+ f" UniformQuantParams. For tensor {output_tensor_params.tensor_name},"
423
+ f" got {type(output_quant_params)}"
424
+ )
425
+ _materialize_tensors_with_quantized_data_update(
386
426
  op_tensor_params,
387
427
  input_tensors,
428
+ output_quant_params,
388
429
  is_inbounding_tensor=True,
389
430
  op_info=op_info,
390
431
  graph_info=graph_info,
391
432
  tensor_name_to_qsv=tensor_name_to_qsv,
392
433
  get_tensor_quant_params_fn=get_tensor_quant_params_fn,
393
- quant_params=quant_params,
394
434
  )
435
+
395
436
  op_tensor_params.append(output_tensor_params)
396
437
 
397
438
  return op_tensor_params
@@ -195,7 +195,8 @@ DEFAULT_JSON_POLICY = """
195
195
  "BROADCAST_TO",
196
196
  "SQRT",
197
197
  "GATHER",
198
- "MAXIMUM"
198
+ "MAXIMUM",
199
+ "PADV2"
199
200
  ],
200
201
  "static_wi8_ai8": [
201
202
  "ADD",
@@ -240,7 +241,8 @@ DEFAULT_JSON_POLICY = """
240
241
  "SQRT",
241
242
  "GATHER",
242
243
  "HARD_SWISH",
243
- "MAXIMUM"
244
+ "MAXIMUM",
245
+ "PADV2"
244
246
  ],
245
247
  "static_wi4_ai8": ["FULLY_CONNECTED", "CONV_2D", "INPUT", "OUTPUT", "EMBEDDING_LOOKUP"],
246
248
  "static_wi4_ai16": ["FULLY_CONNECTED", "CONV_2D", "INPUT", "OUTPUT", "EMBEDDING_LOOKUP"],
@@ -76,6 +76,7 @@ class TFLOperationName(str, enum.Enum):
76
76
  GATHER = 'GATHER'
77
77
  HARD_SWISH = 'HARD_SWISH'
78
78
  MAXIMUM = 'MAXIMUM'
79
+ PADV2 = 'PADV2'
79
80
 
80
81
 
81
82
  class QuantizeMode(enum.Enum):
@@ -33,7 +33,7 @@ class ConstrainedOpsUtilsTest(parameterized.TestCase):
33
33
  dict(
34
34
  testcase_name="same_as_output_scale",
35
35
  constraint=_OpQuantConstraint.SAME_AS_OUTPUT_SCALE,
36
- expected_num_ops=6,
36
+ expected_num_ops=7,
37
37
  ),
38
38
  dict(
39
39
  testcase_name="no_constrain",
@@ -70,6 +70,7 @@ TFL_OP_NAME_TO_CODE = immutabledict.immutabledict({
70
70
  _TFLOpName.GATHER: schema.BuiltinOperator.GATHER,
71
71
  _TFLOpName.HARD_SWISH: schema.BuiltinOperator.HARD_SWISH,
72
72
  _TFLOpName.MAXIMUM: schema.BuiltinOperator.MAXIMUM,
73
+ _TFLOpName.PADV2: schema.BuiltinOperator.PADV2,
73
74
  })
74
75
 
75
76
  TFL_OP_CODE_TO_NAME = immutabledict.immutabledict(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ai-edge-quantizer-nightly
3
- Version: 0.4.0.dev20250904
3
+ Version: 0.4.0.dev20250906
4
4
  Summary: A quantizer for advanced developers to quantize converted AI Edge models.
5
5
  Home-page: https://github.com/google-ai-edge/ai-edge-quantizer
6
6
  Keywords: On-Device ML,AI,Google,TFLite,Quantization,LLMs,GenAI
@@ -1,18 +1,18 @@
1
1
  ai_edge_quantizer/__init__.py,sha256=4pFSkukSwahYyzwqia0yPRyz8TnFQfGRthVJhYpMWas,793
2
- ai_edge_quantizer/algorithm_manager.py,sha256=O_psY-4R0ARmgTQHwfH2px81AJY8PmfamHtE7xJDRjQ,13424
2
+ ai_edge_quantizer/algorithm_manager.py,sha256=3kmn-hTLEhHOfAQTkUoN8xXymFtoljzLU-ADpd7uBrE,13538
3
3
  ai_edge_quantizer/algorithm_manager_api.py,sha256=u903TG0s1uIDhJqfeJne3CFl8A93phZrwgV2-hwdcXU,9247
4
4
  ai_edge_quantizer/algorithm_manager_api_test.py,sha256=w6bSONvXkX6bzXAGc0-7b6gNDt9oz9ieq97KP8Sg_JU,7666
5
5
  ai_edge_quantizer/calibrator.py,sha256=Sms7_AIHPH9G5xFaz5Ef3a5gPhxuIWQI8d2LUM8C96I,12071
6
6
  ai_edge_quantizer/calibrator_test.py,sha256=ZLzIMWB2FSFU4TOatDioYuwp_kLh8iSCefZ5_Q9FU7s,11900
7
7
  ai_edge_quantizer/conftest.py,sha256=SxCz-5LlRD_lQm4hQc4c6IGG7DS8d7IyEWY9gnscPN0,794
8
- ai_edge_quantizer/default_policy.py,sha256=G_JZtZaQAnrWyfCusDWXwO27iLysk27RS91GlS61m_Q,11592
8
+ ai_edge_quantizer/default_policy.py,sha256=6kEYu0nOQqBKpclzgmxuzvatiVR0BF_ce6zoKCoudW4,11622
9
9
  ai_edge_quantizer/model_modifier.py,sha256=teGa8I6kGvn6TQY6Xv53YFIc_pQEhNvM9Zb4bvhezyw,7110
10
10
  ai_edge_quantizer/model_modifier_test.py,sha256=cJd04SLOG-fQZZNZPcisoBLx3cLtWEwGqUBbLb-pif4,4751
11
11
  ai_edge_quantizer/model_validator.py,sha256=Hj0_5o-Oa3dSlJ3ryVjRhvsyelHNyek1GrtG9buMczg,13153
12
12
  ai_edge_quantizer/model_validator_test.py,sha256=EeqOP_mrZsnZ3rug756s0ryDDqd2KgIDld5Lm_gDuWY,13020
13
13
  ai_edge_quantizer/params_generator.py,sha256=hcgMHJlERZERUyIAEi6AHJcLJ8gsKIBAEojzFFz-tqk,20098
14
14
  ai_edge_quantizer/params_generator_test.py,sha256=RDYoRZDJfEZRtjlTAU2kZ_4t3JHOqEHxfJX9V4ETAhg,40597
15
- ai_edge_quantizer/qtyping.py,sha256=zXXmLBZUT-cfjnQrqDkytDZaGg3z_yy1wWhKr34_XVg,16792
15
+ ai_edge_quantizer/qtyping.py,sha256=ygLmj_PPTYM1yAs3oCJ649q75cZPQYjJ8hXtFLVqfv8,16810
16
16
  ai_edge_quantizer/quantizer.py,sha256=ckAEOnnBxuCKZuvlzdChevCKPuE-IeDPHCNtFTWr250,17857
17
17
  ai_edge_quantizer/quantizer_test.py,sha256=m6f4ayyaF3yQb9i4V0aFAbmGw0OKZ2Zam1RoTPh-u24,22917
18
18
  ai_edge_quantizer/recipe.py,sha256=MEkfQ2Sg3KAE9LAORHWcbjYNPg06EUbwc1d-VspQA2U,6461
@@ -28,7 +28,7 @@ ai_edge_quantizer/algorithms/nonlinear_quantize/__init__.py,sha256=lpq1g2ayg3lCP
28
28
  ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting.py,sha256=Bs9CK7wZAw6jNaZ8xEtbwO2vM34VYXNZSMVWvxJo9nw,9297
29
29
  ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting_test.py,sha256=EqIHGEZ1LgUrTN7zf880RuAzEv3Qy7kgh5ivObJGHSo,22646
30
30
  ai_edge_quantizer/algorithms/uniform_quantize/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
31
- ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=rkf7jLPVDKpx2ju1LyyP7bxc6n34cLD2E3w2mxLd6qE,35344
31
+ ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=TQQxkxeAngrZO6ro6RjOeJAieWHIgK4hrACtbU0-Buk,35919
32
32
  ai_edge_quantizer/algorithms/uniform_quantize/common_quantize_test.py,sha256=GGf_n3wIeg3GB_eGsmyNJ0fTcxgpeMMbugTMRONK6TQ,3553
33
33
  ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py,sha256=BDdn_uBZakfHyzdMJPKadsOqxqyC-s6W2ZzFH99L4fE,8652
34
34
  ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery_test.py,sha256=sT5eX5TLZEHTtPfnSkCPDlS0sQxlTFWbCsbvOuj--yY,8889
@@ -41,7 +41,7 @@ ai_edge_quantizer/algorithms/uniform_quantize/octav_test.py,sha256=sha1d99Xk87bI
41
41
  ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor.py,sha256=uCREMXi0U2ckhXXfgGVzwSgjFZc0IbtnFU-OjlG9IO8,17146
42
42
  ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor_test.py,sha256=7kHluzpteMv36hFD6LD_qnwwMoE1GKUP4bGmGMFbOdA,12755
43
43
  ai_edge_quantizer/algorithms/utils/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
44
- ai_edge_quantizer/algorithms/utils/common_utils.py,sha256=QrEeCuvA7gY_vK1nbKtqassNDClyAjN1ClZIiw63k5U,35895
44
+ ai_edge_quantizer/algorithms/utils/common_utils.py,sha256=4eAlGph6DDW18bUdoY0XcUoOXEr3P_3_W1ptidD8qK4,37611
45
45
  ai_edge_quantizer/algorithms/utils/common_utils_test.py,sha256=zqapGEfYhjQWe9cNGPLmdbwtEUUYQRhlO_kNe0cXX6E,18104
46
46
  ai_edge_quantizer/transformations/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
47
47
  ai_edge_quantizer/transformations/dequant_insert.py,sha256=sL1LHFVzBDSd9jgrzlHz38LWU0bwmVX7iBkaNcui0ts,3566
@@ -62,16 +62,16 @@ ai_edge_quantizer/utils/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V
62
62
  ai_edge_quantizer/utils/calibration_utils.py,sha256=iMf_bSCf-O86MzDt5D9hLKqbTydqLwirluaC6BJ9yHo,11553
63
63
  ai_edge_quantizer/utils/calibration_utils_test.py,sha256=4BlksXl7b4yptL8xPR67hmJCnjhN9V10a2PunzfHrUE,9372
64
64
  ai_edge_quantizer/utils/constrained_ops_utils.py,sha256=EAITCf7Ku_PFZcw3K-wd-8hGbyuRd5W5UtNdGvalwAE,4478
65
- ai_edge_quantizer/utils/constrained_ops_utils_test.py,sha256=6k_AqfB-NmiLkW5WwEV5NSuswFWky2sL0xBGmV6Fdwk,1756
65
+ ai_edge_quantizer/utils/constrained_ops_utils_test.py,sha256=xWujKhNR_OFXReFM-njFbiaC_4W7kMNr7lmFFRlGNLw,1756
66
66
  ai_edge_quantizer/utils/test_utils.py,sha256=a4Nk-wbeB09dFjTDZiA0K67d26j5DD0UDH_GIVmVG_4,8685
67
- ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=aNtL4dpWH5uGGGlaygnMDkh5llTstbgs5ZxO0JkH5VQ,11718
67
+ ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=1NnRPdvqdvZ5sKbIdePcBv8SaCS2LqZXX_B51oDRXrQ,11770
68
68
  ai_edge_quantizer/utils/tfl_flatbuffer_utils_test.py,sha256=K1SbK8q92qYVtiVj0I0GtugsPTkpIpEKv9zakvFV_Sc,8555
69
69
  ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=EoVjI_hplX_Rml3hfRsGmQOihexmizeJqt4SQcET9aA,14925
70
70
  ai_edge_quantizer/utils/tfl_interpreter_utils_test.py,sha256=6fjkM-rycZ95L4yfvlr0TN6RlrhfPzxNUYrZaYO_F0A,12013
71
71
  ai_edge_quantizer/utils/validation_utils.py,sha256=oYw33Sg547AqtGw-choPUJmp9SAKkV46J_ddqSsum2Q,3950
72
72
  ai_edge_quantizer/utils/validation_utils_test.py,sha256=V_qNDikPD4OPB-siOLQCWNVWTAu87h2IgNYt7teFd-o,2934
73
- ai_edge_quantizer_nightly-0.4.0.dev20250904.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
74
- ai_edge_quantizer_nightly-0.4.0.dev20250904.dist-info/METADATA,sha256=AqVFUY7NzeZmDKSjDGACfkR1HIsitVwnnJ91NlaktX0,1508
75
- ai_edge_quantizer_nightly-0.4.0.dev20250904.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
76
- ai_edge_quantizer_nightly-0.4.0.dev20250904.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
77
- ai_edge_quantizer_nightly-0.4.0.dev20250904.dist-info/RECORD,,
73
+ ai_edge_quantizer_nightly-0.4.0.dev20250906.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
74
+ ai_edge_quantizer_nightly-0.4.0.dev20250906.dist-info/METADATA,sha256=hywZ1CdX4MVCVLEbrx_plaGJL9tEXziGga_psNOiUsc,1508
75
+ ai_edge_quantizer_nightly-0.4.0.dev20250906.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
76
+ ai_edge_quantizer_nightly-0.4.0.dev20250906.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
77
+ ai_edge_quantizer_nightly-0.4.0.dev20250906.dist-info/RECORD,,