onnx2tf 1.27.8__py3-none-any.whl → 1.27.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.27.8'
3
+ __version__ = '1.27.9'
onnx2tf/onnx2tf.py CHANGED
@@ -78,6 +78,7 @@ def convert(
78
78
  not_use_opname_auto_generate: Optional[bool] = False,
79
79
  batch_size: Optional[int] = None,
80
80
  overwrite_input_shape: Optional[List[str]] = None,
81
+ shape_hints: Optional[List[str]] = None,
81
82
  no_large_tensor: Optional[bool] = False,
82
83
  output_nms_with_dynamic_tensor: Optional[bool] = False,
83
84
  switch_nms_version: Optional[str] = 'v4',
@@ -255,6 +256,20 @@ def convert(
255
256
  Numerical values other than dynamic dimensions are ignored.\n
256
257
  Ignores batch_size if specified at the same time as batch_size.
257
258
 
259
+ shape_hints: Optional[List[str]]
260
+ Shape hints for input tensors containing dynamic dimensions.\n
261
+ Specify input shapes for test inference with -cotof or -coto.\n
262
+ Unlike `--overwrite_input_shape`, this operation does not overwrite\n
263
+ the ONNX input shape with a static shape.\n
264
+ The format is\n
265
+ ["input_name_1:dim0,...,dimN","input_name_2:dim0,...,dimN","input_name_3:dim0,...,dimN"].\n
266
+ When there is only one input, for example,\n
267
+ ['data:1,3,224,224']\n
268
+ When there are multiple inputs, for example,\n
269
+ ['data1:1,3,224,224','data2:1,3,112,112','data3:5']\n
270
+ A value of 1 or more must be specified.\n
271
+ Numerical values other than dynamic dimensions are ignored.
272
+
258
273
  no_large_tensor: Optional[bool]
259
274
  Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.\n
260
275
  See: https://github.com/daquexian/onnx-simplifier/issues/178
@@ -1055,6 +1070,7 @@ def convert(
1055
1070
  tf_layers_dict=tf_layers_dict,
1056
1071
  use_cuda=use_cuda,
1057
1072
  disable_strict_mode=disable_strict_mode,
1073
+ shape_hints=shape_hints if (check_onnx_tf_outputs_elementwise_close or check_onnx_tf_outputs_elementwise_close_full) else None,
1058
1074
  )
1059
1075
  """
1060
1076
  onnx_tensor_infos_for_validation:
@@ -1889,6 +1905,7 @@ def convert(
1889
1905
  custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
1890
1906
  tf_layers_dict=tf_layers_dict,
1891
1907
  use_cuda=use_cuda,
1908
+ shape_hints=shape_hints,
1892
1909
  )
1893
1910
  except Exception as ex:
1894
1911
  warn(
@@ -1898,12 +1915,17 @@ def convert(
1898
1915
  warn(f'{ex}')
1899
1916
  else:
1900
1917
  # TF dummy inference
1901
- tf_tensor_infos: Dict[Any] = dummy_tf_inference(
1902
- model=model,
1903
- inputs=inputs,
1904
- test_data_nhwc=test_data_nhwc,
1905
- custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
1906
- )
1918
+ tf_tensor_infos: Dict[Any] = \
1919
+ dummy_tf_inference(
1920
+ model=model,
1921
+ inputs=inputs,
1922
+ test_data_nhwc=test_data_nhwc,
1923
+ custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
1924
+ shape_hints=shape_hints,
1925
+ keep_shape_absolutely_input_names=keep_shape_absolutely_input_names,
1926
+ keep_ncw_or_nchw_or_ncdhw_input_names=keep_ncw_or_nchw_or_ncdhw_input_names,
1927
+ keep_nwc_or_nhwc_or_ndhwc_input_names=keep_nwc_or_nhwc_or_ndhwc_input_names,
1928
+ )
1907
1929
  # Validation
1908
1930
  onnx_tensor_infos = {
1909
1931
  output_name: dummy_onnx_output \
@@ -2207,6 +2229,25 @@ def main():
2207
2229
  'Numerical values other than dynamic dimensions are ignored. \n' +
2208
2230
  'Ignores --batch_size if specified at the same time as --batch_size.'
2209
2231
  )
2232
+ parser.add_argument(
2233
+ '-sh',
2234
+ '--shape_hints',
2235
+ type=str,
2236
+ nargs='+',
2237
+ help=\
2238
+ 'Shape hints for input tensors containing dynamic dimensions. \n' +
2239
+ 'Specify input shapes for test inference with -cotof or -coto. \n' +
2240
+ 'Unlike `--overwrite_input_shape`, this operation does not overwrite \n' +
2241
+ 'the ONNX input shape with a static shape.\n' +
2242
+ 'The format is\n' +
2243
+ '"input_name_1:dim0,...,dimN" "input_name_2:dim0,...,dimN" "input_name_3:dim0,...,dimN". \n' +
2244
+ 'When there is only one input, for example, \n' +
2245
+ '"data:1,3,224,224" \n' +
2246
+ 'When there are multiple inputs, for example, \n' +
2247
+ '"data1:1,3,224,224" "data2:1,3,112,112" "data3:5" \n' +
2248
+ 'Only applied to dynamic dimensions in inputs. \n' +
2249
+ 'Only used when -cotof or -coto are specified.'
2250
+ )
2210
2251
  parser.add_argument(
2211
2252
  '-nlt',
2212
2253
  '--no_large_tensor',
@@ -2631,6 +2672,7 @@ def main():
2631
2672
  not_use_opname_auto_generate=args.not_use_opname_auto_generate,
2632
2673
  batch_size=args.batch_size,
2633
2674
  overwrite_input_shape=args.overwrite_input_shape,
2675
+ shape_hints=args.shape_hints,
2634
2676
  no_large_tensor=args.no_large_tensor,
2635
2677
  output_nms_with_dynamic_tensor=args.output_nms_with_dynamic_tensor,
2636
2678
  switch_nms_version=args.switch_nms_version,
@@ -3633,6 +3633,7 @@ def dummy_onnx_inference(
3633
3633
  tf_layers_dict: Optional[Dict] = None,
3634
3634
  use_cuda: bool = False,
3635
3635
  disable_strict_mode: bool = False,
3636
+ shape_hints: Optional[List[str]] = None,
3636
3637
  ) -> List[np.ndarray]:
3637
3638
  """Perform inference on ONNX subgraphs with an all-1 dummy tensor.
3638
3639
 
@@ -3775,24 +3776,46 @@ def dummy_onnx_inference(
3775
3776
  onnx_inputs = gs_graph.inputs
3776
3777
  input_names: List[str] = [inp.name for inp in onnx_inputs]
3777
3778
  input_sizes: List[int] = [inp.shape for inp in onnx_inputs]
3778
- new_input_sizes = []
3779
- for input_size in input_sizes:
3780
- new_input_size = []
3781
- for idx, dim in enumerate(input_size):
3782
- if idx == 0 and input_sizes[0][0] is not None \
3783
- and not isinstance(input_sizes[0][0], str) \
3784
- and len(input_sizes[0]) == len(input_size) \
3785
- and (dim is None or isinstance(dim, str)):
3786
- # Batch size assignment for input OPs
3787
- new_input_size.append(input_sizes[0][0])
3788
- elif dim is None or isinstance(dim, str):
3789
- # Fixed and assigned 1
3790
- new_input_size.append(1)
3791
- else:
3792
- # Assign input shape as is
3793
- new_input_size.append(dim)
3794
- new_input_sizes.append(new_input_size)
3795
- input_sizes = new_input_sizes
3779
+
3780
+ if shape_hints is None:
3781
+ new_input_sizes = []
3782
+ for input_size in input_sizes:
3783
+ new_input_size = []
3784
+ for idx, dim in enumerate(input_size):
3785
+ if idx == 0 and input_sizes[0][0] is not None \
3786
+ and not isinstance(input_sizes[0][0], str) \
3787
+ and len(input_sizes[0]) == len(input_size) \
3788
+ and (dim is None or isinstance(dim, str)):
3789
+ # Batch size assignment for input OPs
3790
+ new_input_size.append(input_sizes[0][0])
3791
+ elif dim is None or isinstance(dim, str):
3792
+ # Fixed and assigned 1
3793
+ new_input_size.append(1)
3794
+ else:
3795
+ # Assign input shape as is
3796
+ new_input_size.append(dim)
3797
+ new_input_sizes.append(new_input_size)
3798
+ input_sizes = new_input_sizes
3799
+
3800
+ else:
3801
+ shape_hints_dict = {}
3802
+ for hint in shape_hints:
3803
+ parts = hint.split(':')
3804
+ if len(parts) == 2:
3805
+ input_name = parts[0]
3806
+ shape_values = [int(val) for val in parts[1].split(',')]
3807
+ shape_hints_dict[input_name] = shape_values
3808
+
3809
+ for i, (input_name, original_shape) in enumerate(zip(input_names, input_sizes)):
3810
+ if input_name in shape_hints_dict:
3811
+ updated_shape = shape_hints_dict[input_name]
3812
+ for j, (orig_dim, hint_dim) in enumerate(zip(original_shape, updated_shape)):
3813
+ if orig_dim is not None and not isinstance(orig_dim, str):
3814
+ updated_shape[j] = orig_dim
3815
+ else:
3816
+ updated_shape[j] = hint_dim
3817
+ input_sizes[i] = updated_shape
3818
+
3796
3819
  input_dtypes: List[Any] = [inp.dtype for inp in onnx_inputs]
3797
3820
  input_datas = {}
3798
3821
 
@@ -3886,6 +3909,10 @@ def dummy_tf_inference(
3886
3909
  test_data_nhwc: Optional[np.ndarray] = None,
3887
3910
  verification_datas: Optional[List[np.ndarray]] = None,
3888
3911
  custom_input_op_name_np_data_path: Optional[str] = None,
3912
+ shape_hints: Optional[List[str]] = None,
3913
+ keep_shape_absolutely_input_names: Optional[List[str]] = None,
3914
+ keep_ncw_or_nchw_or_ncdhw_input_names: Optional[List[str]] = None,
3915
+ keep_nwc_or_nhwc_or_ndhwc_input_names: Optional[List[str]] = None,
3889
3916
  ) -> Any:
3890
3917
  """Perform inference on TF subgraphs with an all-1 dummy tensor.
3891
3918
 
@@ -3914,23 +3941,74 @@ def dummy_tf_inference(
3914
3941
  """
3915
3942
  input_names: List[str] = [inp.name for inp in inputs]
3916
3943
  input_sizes: List[int] = [inp.shape for inp in inputs]
3917
- new_input_sizes = []
3918
- for input_size in input_sizes:
3919
- new_input_size = []
3920
- for idx, dim in enumerate(input_size):
3921
- if idx == 0 and input_sizes[0][0] is not None \
3922
- and len(input_sizes[0]) == len(input_size) \
3923
- and dim is None:
3924
- # Batch size assignment for input OPs
3925
- new_input_size.append(input_sizes[0][0])
3926
- elif dim is None:
3927
- # Fixed and assigned 1
3928
- new_input_size.append(1)
3929
- else:
3930
- # Assign input shape as is
3931
- new_input_size.append(dim)
3932
- new_input_sizes.append(new_input_size)
3933
- input_sizes = new_input_sizes
3944
+
3945
+ if shape_hints is None:
3946
+ new_input_sizes = []
3947
+ for input_size in input_sizes:
3948
+ new_input_size = []
3949
+ for idx, dim in enumerate(input_size):
3950
+ if idx == 0 and input_sizes[0][0] is not None \
3951
+ and len(input_sizes[0]) == len(input_size) \
3952
+ and dim is None:
3953
+ # Batch size assignment for input OPs
3954
+ new_input_size.append(input_sizes[0][0])
3955
+ elif dim is None:
3956
+ # Fixed and assigned 1
3957
+ new_input_size.append(1)
3958
+ else:
3959
+ # Assign input shape as is
3960
+ new_input_size.append(dim)
3961
+ new_input_sizes.append(new_input_size)
3962
+ input_sizes = new_input_sizes
3963
+
3964
+ else:
3965
+ shape_hints_dict = {}
3966
+ for hint in shape_hints:
3967
+ parts = hint.split(':')
3968
+ if len(parts) == 2:
3969
+ input_name = parts[0]
3970
+ shape_values = [int(val) for val in parts[1].split(',')]
3971
+ shape_hints_dict[input_name] = shape_values
3972
+
3973
+ for i, (input_name, original_shape) in enumerate(zip(input_names, input_sizes)):
3974
+ if input_name in shape_hints_dict:
3975
+ hint_shape = shape_hints_dict[input_name]
3976
+ updated_shape = []
3977
+
3978
+ # Check if we need to keep the original shape
3979
+ keep_absolutely = (
3980
+ keep_shape_absolutely_input_names is not None and
3981
+ input_name in keep_shape_absolutely_input_names
3982
+ )
3983
+ keep_nchw = (
3984
+ keep_ncw_or_nchw_or_ncdhw_input_names is not None and
3985
+ input_name in keep_ncw_or_nchw_or_ncdhw_input_names
3986
+ )
3987
+ keep_nhwc = (
3988
+ keep_nwc_or_nhwc_or_ndhwc_input_names is not None and
3989
+ input_name in keep_nwc_or_nhwc_or_ndhwc_input_names
3990
+ )
3991
+
3992
+ if keep_absolutely or keep_nchw:
3993
+ updated_shape = hint_shape
3994
+ # Otherwise, convert from NCHW to NHWC based on dimensionality
3995
+ elif len(hint_shape) == 3: # NCW -> NWC [0,2,1]
3996
+ updated_shape = [hint_shape[0], hint_shape[2], hint_shape[1]]
3997
+ elif len(hint_shape) == 4: # NCHW -> NHWC [0,3,1,2]
3998
+ updated_shape = [hint_shape[0], hint_shape[2], hint_shape[3], hint_shape[1]]
3999
+ elif len(hint_shape) == 5: # NCDHW -> NDHWC [0,4,1,2,3]
4000
+ updated_shape = [hint_shape[0], hint_shape[2], hint_shape[3], hint_shape[4], hint_shape[1]]
4001
+ else:
4002
+ updated_shape = hint_shape
4003
+
4004
+ for j, (orig_dim, hint_dim) in enumerate(zip(original_shape, updated_shape)):
4005
+ if orig_dim is not None and not isinstance(orig_dim, str):
4006
+ updated_shape[j] = orig_dim
4007
+ else:
4008
+ updated_shape[j] = hint_dim
4009
+
4010
+ input_sizes[i] = updated_shape
4011
+
3934
4012
  input_dtypes: List[Any] = [inp.dtype for inp in inputs]
3935
4013
  input_datas = {}
3936
4014
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.27.8
3
+ Version: 1.27.9
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -334,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
334
  docker run --rm -it \
335
335
  -v `pwd`:/workdir \
336
336
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.27.8
337
+ ghcr.io/pinto0309/onnx2tf:1.27.9
338
338
 
339
339
  or
340
340
 
@@ -342,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
342
  docker run --rm -it \
343
343
  -v `pwd`:/workdir \
344
344
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.27.8
345
+ docker.io/pinto0309/onnx2tf:1.27.9
346
346
 
347
347
  or
348
348
 
@@ -1747,6 +1747,21 @@ optional arguments:
1747
1747
  Numerical values other than dynamic dimensions are ignored.
1748
1748
  Ignores --batch_size if specified at the same time as --batch_size.
1749
1749
 
1750
+ -sh SHAPE_HINTS [SHAPE_HINTS ...], \
1751
+ --shape_hints SHAPE_HINTS [SHAPE_HINTS ...]
1752
+ Shape hints for input tensors containing dynamic dimensions.
1753
+ Specify input shapes for test inference with -cotof or -coto.
1754
+ Unlike `--overwrite_input_shape`, this operation does not overwrite
1755
+ the ONNX input shape with a static shape.
1756
+ The format is
1757
+ "i1:dim0,...,dimN" "i2:dim0,...,dimN" "i3:dim0,...,dimN"
1758
+ When there is only one input, for example,
1759
+ "data:1,3,224,224"
1760
+ When there are multiple inputs, for example,
1761
+ "data1:1,3,224,224" "data2:1,3,112" "data3:5"
1762
+ A value of 1 or more must be specified.
1763
+ Numerical values other than dynamic dimensions are ignored.
1764
+
1750
1765
  -nlt, --no_large_tensor
1751
1766
  Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
1752
1767
  See: https://github.com/daquexian/onnx-simplifier/issues/178
@@ -2052,6 +2067,7 @@ convert(
2052
2067
  not_use_opname_auto_generate: Optional[bool] = False,
2053
2068
  batch_size: Union[int, NoneType] = None,
2054
2069
  overwrite_input_shape: Union[List[str], NoneType] = None,
2070
+ shape_hints: Union[List[str], NoneType] = None,
2055
2071
  no_large_tensor: Optional[bool] = False,
2056
2072
  output_nms_with_dynamic_tensor: Optional[bool] = False,
2057
2073
  switch_nms_version: Optional[str] = 'v4',
@@ -2244,6 +2260,20 @@ convert(
2244
2260
  Numerical values other than dynamic dimensions are ignored.
2245
2261
  Ignores batch_size if specified at the same time as batch_size.
2246
2262
 
2263
+ shape_hints: Optional[List[str]]
2264
+ Shape hints for input tensors containing dynamic dimensions.
2265
+ Specify input shapes for test inference with -cotof or -coto.
2266
+ Unlike `--overwrite_input_shape`, this operation does not overwrite
2267
+ the ONNX input shape with a static shape.
2268
+ The format is
2269
+ ['i1:dim0,...,dimN', 'i2:dim0,...,dimN', 'i3:dim0,...,dimN']
2270
+ When there is only one input, for example,
2271
+ ['data:1,3,224,224']
2272
+ When there are multiple inputs, for example,
2273
+ ['data1:1,3,224,224', 'data2:1,3,112', 'data3:5']
2274
+ A value of 1 or more must be specified.
2275
+ Numerical values other than dynamic dimensions are ignored.
2276
+
2247
2277
  no_large_tensor: Optional[bool]
2248
2278
  Suppresses constant bloat caused by Tile OP when optimizing models in onnxsim.
2249
2279
  See: https://github.com/daquexian/onnx-simplifier/issues/178
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=FA0T9QrV9uxJnT2OPSYDXM4XnIfbdPVzPKxKz-BfM6s,66
1
+ onnx2tf/__init__.py,sha256=hCzkVCeiufss9-eyErBsiXMsFVA44Rp3QwxBCmpNWds,66
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=KPFVEhTDBhX7Y-Bh7X40ZEclGf2_Vfp9-M9AMPqOywI,123892
3
+ onnx2tf/onnx2tf.py,sha256=_PB395J0GPgruBNqFzf_45-LR5boe3avZzoQIdnoSOY,126240
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -185,13 +185,13 @@ onnx2tf/ops/_Loop.py,sha256=eo5sNfrfOnKV6_I737AWsM5LJTY9DVOxQEvhanxtP4g,11322
185
185
  onnx2tf/ops/__Loop.py,sha256=ClwMcbNS4hqUtW_pzwjMa9Cqg7ONvz9aplke55A0uJ0,19704
186
186
  onnx2tf/ops/__init__.py,sha256=jnmUWWa-3dHzBZV9bmPzXu6eoz2dumJTzO7i8JdcgSM,25
187
187
  onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
188
- onnx2tf/utils/common_functions.py,sha256=TXvZ7qg9stD1qKvbtkoE70jRxojdtdQhhHIoYaM0I1E,241826
188
+ onnx2tf/utils/common_functions.py,sha256=ZxIdTjdTD69qqiVF85oMm5nSRFWfnxeN5CxoCcM4RxY,245467
189
189
  onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
190
190
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
191
- onnx2tf-1.27.8.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
192
- onnx2tf-1.27.8.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
193
- onnx2tf-1.27.8.dist-info/METADATA,sha256=LemcJx7-r_QtmIE02r6C6XUgQKnDKGzOsyWyCF-1s5s,147712
194
- onnx2tf-1.27.8.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
195
- onnx2tf-1.27.8.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
196
- onnx2tf-1.27.8.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
197
- onnx2tf-1.27.8.dist-info/RECORD,,
191
+ onnx2tf-1.27.9.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
192
+ onnx2tf-1.27.9.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
193
+ onnx2tf-1.27.9.dist-info/METADATA,sha256=95mh0J9Jr3DzywE8DPCmW_GAcMhbQhXlm2DExf0PlVE,149134
194
+ onnx2tf-1.27.9.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
195
+ onnx2tf-1.27.9.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
196
+ onnx2tf-1.27.9.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
197
+ onnx2tf-1.27.9.dist-info/RECORD,,