onnx2tf 1.28.2__py3-none-any.whl → 1.28.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.28.2'
3
+ __version__ = '1.28.3'
onnx2tf/onnx2tf.py CHANGED
@@ -109,6 +109,7 @@ def convert(
109
109
  replace_to_pseudo_operators: List[str] = None,
110
110
  param_replacement_file: Optional[str] = '',
111
111
  auto_generate_json: Optional[bool] = False,
112
+ auto_generate_json_on_error: Optional[bool] = False,
112
113
  check_gpu_delegate_compatibility: Optional[bool] = False,
113
114
  check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
114
115
  check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
@@ -445,6 +446,12 @@ def convert(
445
446
  which can take a very long time depending on the model complexity.\n
446
447
  Default: False
447
448
 
449
+ auto_generate_json_on_error: Optional[bool]
450
+ When accuracy validation detects errors greater than the allowed threshold, automatically\n
451
+ generate a parameter replacement JSON as a best-effort fix.\n
452
+ This is now opt-in and requires explicitly enabling the feature.\n
453
+ Default: False
454
+
448
455
  check_gpu_delegate_compatibility: Optional[bool]
449
456
  Run TFLite ModelAnalyzer on the generated Float16 tflite model\n
450
457
  to check if the model can be supported by GPU Delegate.
@@ -1200,24 +1207,24 @@ def convert(
1200
1207
  error_onnx_op_name = graph_node.name if 'graph_node' in locals() else None
1201
1208
  # Attach it to the exception for later use
1202
1209
  ex.onnx_op_name = error_onnx_op_name
1203
-
1210
+
1204
1211
  # If no replacement file was provided, try to generate one automatically
1205
1212
  if not param_replacement_file and input_onnx_file_path:
1206
1213
  info('')
1207
1214
  info(Color.REVERSE(f'Attempting automatic JSON generation due to conversion error'), '=' * 30)
1208
1215
  if error_onnx_op_name:
1209
1216
  info(f'Error occurred at ONNX operation: {error_onnx_op_name}')
1210
-
1217
+
1211
1218
  # Try iterative JSON generation with multiple attempts
1212
1219
  max_attempts = 3
1213
1220
  attempt = 0
1214
1221
  successful_conversion = False
1215
1222
  best_json = None
1216
-
1223
+
1217
1224
  while attempt < max_attempts and not successful_conversion:
1218
1225
  attempt += 1
1219
1226
  info(f'\nJSON generation attempt {attempt}/{max_attempts}')
1220
-
1227
+
1221
1228
  try:
1222
1229
  # Generate JSON with unlimited mode for exhaustive search
1223
1230
  auto_json = generate_auto_replacement_json(
@@ -1230,28 +1237,28 @@ def convert(
1230
1237
  max_iterations=attempt * 3, # Increase iterations with each attempt
1231
1238
  unlimited_mode=True, # Enable unlimited mode
1232
1239
  )
1233
-
1240
+
1234
1241
  if auto_json.get('operations'):
1235
1242
  best_json = auto_json
1236
-
1243
+
1237
1244
  # Save temporary JSON
1238
1245
  temp_json_path = os.path.join(output_folder_path, f'_temp_attempt_{attempt}.json')
1239
1246
  with open(temp_json_path, 'w') as f:
1240
1247
  json.dump(auto_json, f, indent=2)
1241
-
1248
+
1242
1249
  info(f'Testing generated JSON with {len(auto_json["operations"])} operations...')
1243
-
1250
+
1244
1251
  # Try to re-run just the problematic operation with the JSON
1245
1252
  # This is a simplified test - in practice we'd need to re-run the full conversion
1246
1253
  # For now, we'll assume the JSON might work and save it
1247
-
1254
+
1248
1255
  # Clean up temp file
1249
1256
  if os.path.exists(temp_json_path):
1250
1257
  os.remove(temp_json_path)
1251
-
1258
+
1252
1259
  except Exception as json_ex:
1253
1260
  error(f"Error in attempt {attempt}: {type(json_ex).__name__}: {str(json_ex)}")
1254
-
1261
+
1255
1262
  # Save the best JSON we generated
1256
1263
  if best_json and best_json.get('operations'):
1257
1264
  json_path = save_auto_replacement_json(
@@ -2065,23 +2072,22 @@ def convert(
2065
2072
  rtol=check_onnx_tf_outputs_elementwise_close_rtol,
2066
2073
  atol=check_onnx_tf_outputs_elementwise_close_atol,
2067
2074
  )
2068
-
2069
- # Check if any errors exceed threshold and auto-generate JSON if needed
2070
- # Skip this if -agj is specified (will be handled separately)
2071
- if not param_replacement_file and input_onnx_file_path and not auto_generate_json:
2072
- max_error_found = 0.0
2073
- has_significant_errors = False
2074
- error_count = 0
2075
- for (onnx_name, tf_name), checked_value in check_results.items():
2076
- matched_flg = checked_value[1]
2077
- max_abs_err = checked_value[2]
2078
- if (matched_flg == 0 or matched_flg == False) and isinstance(max_abs_err, (int, float, np.float32, np.float64)):
2079
- if max_abs_err > 1e-2:
2080
- has_significant_errors = True
2081
- error_count += 1
2082
- max_error_found = max(max_error_found, max_abs_err)
2083
-
2084
- if has_significant_errors:
2075
+
2076
+ # Inspect validation errors for optional auto JSON generation on error
2077
+ max_error_found = 0.0
2078
+ has_significant_errors = False
2079
+ error_count = 0
2080
+ for (onnx_name, tf_name), checked_value in check_results.items():
2081
+ matched_flg = checked_value[1]
2082
+ max_abs_err = checked_value[2]
2083
+ if (matched_flg == 0 or matched_flg is False) and isinstance(max_abs_err, (int, float, np.float32, np.float64)):
2084
+ if max_abs_err > 1e-2:
2085
+ has_significant_errors = True
2086
+ error_count += 1
2087
+ max_error_found = max(max_error_found, max_abs_err)
2088
+
2089
+ if has_significant_errors and not auto_generate_json:
2090
+ if auto_generate_json_on_error and not param_replacement_file and input_onnx_file_path:
2085
2091
  info('')
2086
2092
  info(Color.REVERSE(f'Attempting automatic JSON generation due to accuracy errors > 1e-2'), '=' * 25)
2087
2093
  info(f'Found {error_count} operations with errors > 1e-2')
@@ -2106,9 +2112,14 @@ def convert(
2106
2112
  )
2107
2113
  else:
2108
2114
  warn(
2109
- f'Accuracy errors > 1e-2 found but automatic JSON generation could not find a solution.'
2115
+ 'Accuracy errors > 1e-2 found but automatic JSON generation could not find a solution.'
2110
2116
  )
2111
-
2117
+ elif not auto_generate_json_on_error:
2118
+ warn(
2119
+ 'Accuracy validation found errors > 1e-2. Automatic JSON generation on error is disabled by default.\n' +
2120
+ 'Re-run with --auto_generate_json_on_error or provide a parameter replacement JSON file.'
2121
+ )
2122
+
2112
2123
  for (onnx_output_name, tf_output_name), checked_value in check_results.items():
2113
2124
  validated_onnx_tensor: np.ndarray = checked_value[0]
2114
2125
  matched_flg: int = checked_value[1]
@@ -2143,34 +2154,34 @@ def convert(
2143
2154
  if auto_generate_json:
2144
2155
  # Store the generated JSON path for later use
2145
2156
  generated_json_path = None
2146
-
2157
+
2147
2158
  # Check if -cotof was already executed and we have check_results
2148
2159
  if check_onnx_tf_outputs_elementwise_close_full and 'check_results' in locals():
2149
2160
  # We already have validation results from -cotof
2150
2161
  info('')
2151
2162
  info(Color.REVERSE(f'Auto JSON generation started (using -cotof results)'), '=' * 35)
2152
-
2163
+
2153
2164
  # Check if any errors exist
2154
2165
  all_matched = True
2155
2166
  max_error = 0.0
2156
2167
  error_count = 0
2157
-
2168
+
2158
2169
  for (onnx_name, tf_name), checked_value in check_results.items():
2159
2170
  matched_flg = checked_value[1]
2160
2171
  max_abs_err = checked_value[2]
2161
-
2172
+
2162
2173
  if matched_flg == 0: # Unmatched
2163
2174
  all_matched = False
2164
2175
  if isinstance(max_abs_err, (int, float, np.float32, np.float64)):
2165
2176
  max_error = max(max_error, max_abs_err)
2166
2177
  error_count += 1
2167
-
2178
+
2168
2179
  if all_matched:
2169
2180
  info(Color.GREEN('All outputs already match! No JSON generation needed.'))
2170
2181
  else:
2171
2182
  info(f'Found {error_count} outputs with errors, max error: {max_error:.6f}')
2172
2183
  info('Generating optimal JSON...')
2173
-
2184
+
2174
2185
  # Generate auto replacement JSON
2175
2186
  auto_json = generate_auto_replacement_json(
2176
2187
  onnx_graph=gs.import_onnx(onnx_graph),
@@ -2183,7 +2194,7 @@ def convert(
2183
2194
  target_accuracy=check_onnx_tf_outputs_elementwise_close_atol,
2184
2195
  unlimited_mode=True,
2185
2196
  )
2186
-
2197
+
2187
2198
  if auto_json.get('operations'):
2188
2199
  # Save the JSON
2189
2200
  generated_json_path = save_auto_replacement_json(
@@ -2192,17 +2203,17 @@ def convert(
2192
2203
  output_dir=output_folder_path,
2193
2204
  )
2194
2205
  info(f'Generated JSON with {len(auto_json["operations"])} operations: {generated_json_path}')
2195
-
2206
+
2196
2207
  # If both -cotof and -agj are specified, re-run validation with the generated JSON
2197
2208
  info('')
2198
2209
  info(Color.REVERSE(f'Re-running validation with auto-generated JSON'), '=' * 35)
2199
-
2210
+
2200
2211
  # TODO: In a full implementation, we would need to:
2201
2212
  # 1. Re-run the entire conversion with the generated JSON
2202
2213
  # 2. Re-validate the outputs
2203
2214
  # 3. Display the new validation results
2204
2215
  # For now, we just inform the user
2205
-
2216
+
2206
2217
  info(Color.GREEN(f'\nAuto-generated JSON saved to: {generated_json_path}'))
2207
2218
  info(
2208
2219
  f'To see the validation results with the generated JSON, please re-run with:\n' +
@@ -2210,7 +2221,7 @@ def convert(
2210
2221
  )
2211
2222
  else:
2212
2223
  warn('No viable parameter replacements found.')
2213
-
2224
+
2214
2225
  else:
2215
2226
  # -agj is specified but -cotof is not, so we need to run our own validation
2216
2227
  try:
@@ -2222,16 +2233,16 @@ def convert(
2222
2233
  f'you must install onnxruntime and sne4onnx. pip install sne4onnx onnxruntime'
2223
2234
  )
2224
2235
  sys.exit(1)
2225
-
2236
+
2226
2237
  info('')
2227
2238
  info(Color.REVERSE(f'Auto JSON generation started'), '=' * 50)
2228
2239
  info(
2229
2240
  'Searching for optimal parameter replacement JSON to achieve minimum error...'
2230
2241
  )
2231
-
2242
+
2232
2243
  # Run validation for final outputs only
2233
2244
  ops_output_names = output_names
2234
-
2245
+
2235
2246
  # Rebuild model for validation
2236
2247
  outputs = [
2237
2248
  layer_info['tf_node'] \
@@ -2246,13 +2257,13 @@ def convert(
2246
2257
  and hasattr(layer_info['tf_node'], 'numpy')
2247
2258
  ]
2248
2259
  validation_model = tf_keras.Model(inputs=inputs, outputs=outputs)
2249
-
2260
+
2250
2261
  # Exclude output OPs not subject to validation
2251
2262
  ops_output_names = [
2252
2263
  ops_output_name for ops_output_name in ops_output_names \
2253
2264
  if ops_output_name not in exclude_output_names
2254
2265
  ]
2255
-
2266
+
2256
2267
  # Initial accuracy check
2257
2268
  try:
2258
2269
  # ONNX dummy inference
@@ -2266,7 +2277,7 @@ def convert(
2266
2277
  use_cuda=use_cuda,
2267
2278
  shape_hints=shape_hints,
2268
2279
  )
2269
-
2280
+
2270
2281
  # TF dummy inference
2271
2282
  tf_tensor_infos: Dict[Any] = \
2272
2283
  dummy_tf_inference(
@@ -2279,13 +2290,13 @@ def convert(
2279
2290
  keep_ncw_or_nchw_or_ncdhw_input_names=keep_ncw_or_nchw_or_ncdhw_input_names,
2280
2291
  keep_nwc_or_nhwc_or_ndhwc_input_names=keep_nwc_or_nhwc_or_ndhwc_input_names,
2281
2292
  )
2282
-
2293
+
2283
2294
  # Validation
2284
2295
  onnx_tensor_infos = {
2285
2296
  output_name: dummy_onnx_output \
2286
2297
  for output_name, dummy_onnx_output in zip(ops_output_names, dummy_onnx_outputs)
2287
2298
  }
2288
-
2299
+
2289
2300
  input_names = [k.name for k in inputs]
2290
2301
  for k, v in tf_layers_dict.items():
2291
2302
  if 'tf_node_info' in v:
@@ -2296,34 +2307,34 @@ def convert(
2296
2307
  for k, v in tf_layers_dict.items() \
2297
2308
  if k not in input_names and not hasattr(v['tf_node'], 'numpy') and k in onnx_tensor_infos
2298
2309
  }
2299
-
2310
+
2300
2311
  agj_check_results = onnx_tf_tensor_validation(
2301
2312
  output_pairs=onnx_tf_output_pairs,
2302
2313
  rtol=0.0,
2303
2314
  atol=1e-4,
2304
2315
  )
2305
-
2316
+
2306
2317
  # Check if all outputs match
2307
2318
  all_matched = True
2308
2319
  max_error = 0.0
2309
2320
  error_count = 0
2310
-
2321
+
2311
2322
  for (onnx_name, tf_name), checked_value in agj_check_results.items():
2312
2323
  matched_flg = checked_value[1]
2313
2324
  max_abs_err = checked_value[2]
2314
-
2325
+
2315
2326
  if matched_flg == 0: # Unmatched
2316
2327
  all_matched = False
2317
2328
  if isinstance(max_abs_err, (int, float, np.float32, np.float64)):
2318
2329
  max_error = max(max_error, max_abs_err)
2319
2330
  error_count += 1
2320
-
2331
+
2321
2332
  if all_matched:
2322
2333
  info(Color.GREEN('All outputs already match! No JSON generation needed.'))
2323
2334
  else:
2324
2335
  info(f'Initial validation: {error_count} outputs have errors, max error: {max_error:.6f}')
2325
2336
  info('Generating optimal JSON...')
2326
-
2337
+
2327
2338
  # Generate auto replacement JSON
2328
2339
  auto_json = generate_auto_replacement_json(
2329
2340
  onnx_graph=gs.import_onnx(onnx_graph),
@@ -2336,7 +2347,7 @@ def convert(
2336
2347
  target_accuracy=1e-4,
2337
2348
  unlimited_mode=True,
2338
2349
  )
2339
-
2350
+
2340
2351
  if auto_json.get('operations'):
2341
2352
  # Save the JSON
2342
2353
  generated_json_path = save_auto_replacement_json(
@@ -2345,7 +2356,7 @@ def convert(
2345
2356
  output_dir=output_folder_path,
2346
2357
  )
2347
2358
  info(f'Generated JSON with {len(auto_json["operations"])} operations: {generated_json_path}')
2348
-
2359
+
2349
2360
  info(Color.GREEN(f'\nAuto-generated JSON saved to: {generated_json_path}'))
2350
2361
  info(
2351
2362
  f'Please re-run the conversion with: -prf {generated_json_path}\n' +
@@ -2353,7 +2364,7 @@ def convert(
2353
2364
  )
2354
2365
  else:
2355
2366
  warn('No viable parameter replacements found.')
2356
-
2367
+
2357
2368
  except Exception as ex:
2358
2369
  warn(
2359
2370
  f'Auto JSON generation failed: {ex}'
@@ -2970,6 +2981,14 @@ def main():
2970
2981
  'The search stops when the final output OP accuracy check shows "Matches". ' +
2971
2982
  'Cannot be used together with -cotof. When -cotof is specified, JSON auto-generation is disabled.'
2972
2983
  )
2984
+ parser.add_argument(
2985
+ '-agje',
2986
+ '--auto_generate_json_on_error',
2987
+ action='store_true',
2988
+ help=\
2989
+ 'Attempts to generate a parameter replacement JSON when accuracy validation detects errors ' +
2990
+ 'greater than 1e-2. Requires -cotof to collect accuracy metrics. Disabled by default.'
2991
+ )
2973
2992
  parser.add_argument(
2974
2993
  '-dms',
2975
2994
  '--disable_model_save',
@@ -3077,6 +3096,7 @@ def main():
3077
3096
  replace_to_pseudo_operators=args.replace_to_pseudo_operators,
3078
3097
  param_replacement_file=args.param_replacement_file,
3079
3098
  auto_generate_json=args.auto_generate_json,
3099
+ auto_generate_json_on_error=args.auto_generate_json_on_error,
3080
3100
  check_gpu_delegate_compatibility=args.check_gpu_delegate_compatibility,
3081
3101
  check_onnx_tf_outputs_elementwise_close=args.check_onnx_tf_outputs_elementwise_close,
3082
3102
  check_onnx_tf_outputs_elementwise_close_full=args.check_onnx_tf_outputs_elementwise_close_full,
@@ -3092,4 +3112,3 @@ def main():
3092
3112
 
3093
3113
  if __name__ == '__main__':
3094
3114
  main()
3095
-
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.28.2
3
+ Version: 1.28.3
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -334,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
334
  docker run --rm -it \
335
335
  -v `pwd`:/workdir \
336
336
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.28.2
337
+ ghcr.io/pinto0309/onnx2tf:1.28.3
338
338
 
339
339
  or
340
340
 
@@ -342,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
342
  docker run --rm -it \
343
343
  -v `pwd`:/workdir \
344
344
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.28.2
345
+ docker.io/pinto0309/onnx2tf:1.28.3
346
346
 
347
347
  or
348
348
 
@@ -501,7 +501,7 @@ onnx2tf -i resnet18-v1-7.onnx -otfv1pb
501
501
  # Automatic JSON generation only
502
502
  # Generates an optimal parameter replacement JSON file for model conversion.
503
503
  # The JSON file is saved to {model_name}_auto.json when conversion errors occur
504
- # or accuracy issues are detected.
504
+ # or accuracy issues are detected and the feature is explicitly enabled.
505
505
  onnx2tf -i model.onnx -agj
506
506
 
507
507
  # Accuracy validation only (no JSON generation)
@@ -514,6 +514,11 @@ onnx2tf -i model.onnx -cotof
514
514
  # to validate the model accuracy. This ensures the best possible conversion accuracy.
515
515
  onnx2tf -i model.onnx -agj -cotof
516
516
 
517
+ # Accuracy validation with opt-in JSON generation on error
518
+ # Generates a parameter replacement JSON only when accuracy errors greater than 1e-2
519
+ # are detected during validation.
520
+ onnx2tf -i model.onnx -cotof -agje
521
+
517
522
  # INT8 Quantization, Full INT8 Quantization
518
523
  # INT8 Quantization with INT16 activation, Full INT8 Quantization with INT16 activation
519
524
  # Dynamic Range Quantization
@@ -2051,6 +2056,11 @@ optional arguments:
2051
2056
  WARNING: This option performs an exhaustive search to find the optimal conversion patterns,
2052
2057
  which can take a very long time depending on the model complexity.
2053
2058
 
2059
+ -agje, --auto_generate_json_on_error
2060
+ Attempts to generate a parameter replacement JSON when accuracy validation finds errors
2061
+ greater than 1e-2. Useful for quickly capturing fixes during -cotof runs.
2062
+ Disabled by default to avoid unexpected file generation.
2063
+
2054
2064
  -dms, --disable_model_save
2055
2065
  Does not save the converted model. For CIs RAM savings.
2056
2066
 
@@ -2121,6 +2131,7 @@ convert(
2121
2131
  mvn_epsilon: Union[float, NoneType] = 0.0000000001,
2122
2132
  param_replacement_file: Optional[str] = '',
2123
2133
  auto_generate_json: Optional[bool] = False,
2134
+ auto_generate_json_on_error: Optional[bool] = False,
2124
2135
  check_gpu_delegate_compatibility: Optional[bool] = False,
2125
2136
  check_onnx_tf_outputs_elementwise_close: Optional[bool] = False,
2126
2137
  check_onnx_tf_outputs_elementwise_close_full: Optional[bool] = False,
@@ -2474,6 +2485,11 @@ convert(
2474
2485
  the generated JSON is used to re-evaluate accuracy.
2475
2486
  Default: False
2476
2487
 
2488
+ auto_generate_json_on_error: Optional[bool]
2489
+ When accuracy validation detects errors greater than 1e-2, attempts to generate
2490
+ a parameter replacement JSON as a best-effort fix.
2491
+ Default: False
2492
+
2477
2493
  check_gpu_delegate_compatibility: Optional[bool]
2478
2494
  Run TFLite ModelAnalyzer on the generated Float16 tflite model
2479
2495
  to check if the model can be supported by GPU Delegate.
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=Q-EJ5Kmj8v4pyqNo39_LSMw82TTyA_BlTLfHEgfsSiE,66
1
+ onnx2tf/__init__.py,sha256=pm3Teh9F6BwBByFpjGnffCCqgioFoHoNpKCZLX4uJdU,66
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=ufjdjeokS96PyvhLAV4nOaiPZ69FCP2kbvBVGahzxxQ,146784
3
+ onnx2tf/onnx2tf.py,sha256=iQwYuLj1f7gHWU9r_4L1pyRnMsJgHkqEPeavxejtY4Y,146964
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -190,10 +190,10 @@ onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
190
190
  onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
191
191
  onnx2tf/utils/json_auto_generator.py,sha256=Vyy21SYEoSL0b-I1cUnaXR-CPoO8LJYQ3fAS2ulZSMM,61964
192
192
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
193
- onnx2tf-1.28.2.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
194
- onnx2tf-1.28.2.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
195
- onnx2tf-1.28.2.dist-info/METADATA,sha256=psCTn2IAkkXxmDaFe_1wLdA_lyCdP_ooOUvoiOluBfk,151177
196
- onnx2tf-1.28.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
197
- onnx2tf-1.28.2.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
198
- onnx2tf-1.28.2.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
199
- onnx2tf-1.28.2.dist-info/RECORD,,
193
+ onnx2tf-1.28.3.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
194
+ onnx2tf-1.28.3.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
195
+ onnx2tf-1.28.3.dist-info/METADATA,sha256=0ukqAWObTB52ZP5__Fr0UVTiVkkxDHzxTF7p6BBclwY,151968
196
+ onnx2tf-1.28.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
197
+ onnx2tf-1.28.3.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
198
+ onnx2tf-1.28.3.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
199
+ onnx2tf-1.28.3.dist-info/RECORD,,