onnx-diagnostic 0.7.12__py3-none-any.whl → 0.7.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. onnx_diagnostic/__init__.py +1 -1
  2. onnx_diagnostic/export/dynamic_shapes.py +11 -2
  3. onnx_diagnostic/helpers/helper.py +11 -5
  4. onnx_diagnostic/helpers/mini_onnx_builder.py +17 -0
  5. onnx_diagnostic/helpers/model_builder_helper.py +1 -0
  6. onnx_diagnostic/helpers/rt_helper.py +2 -1
  7. onnx_diagnostic/helpers/torch_helper.py +31 -7
  8. onnx_diagnostic/reference/torch_evaluator.py +2 -2
  9. onnx_diagnostic/tasks/data/__init__.py +13 -0
  10. onnx_diagnostic/tasks/data/dummies_imagetext2text_generation_gemma3.onnx +0 -0
  11. onnx_diagnostic/tasks/image_text_to_text.py +256 -141
  12. onnx_diagnostic/tasks/text_generation.py +15 -0
  13. onnx_diagnostic/torch_export_patches/eval/__init__.py +177 -150
  14. onnx_diagnostic/torch_export_patches/eval/model_cases.py +19 -1
  15. onnx_diagnostic/torch_export_patches/onnx_export_errors.py +29 -14
  16. onnx_diagnostic/torch_export_patches/patch_inputs.py +10 -6
  17. onnx_diagnostic/torch_export_patches/patches/patch_torch.py +116 -10
  18. onnx_diagnostic/torch_export_patches/patches/patch_transformers.py +269 -4
  19. onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py +36 -0
  20. onnx_diagnostic/torch_models/hghub/model_inputs.py +31 -3
  21. onnx_diagnostic/torch_models/validate.py +114 -36
  22. onnx_diagnostic/torch_onnx/sbs.py +2 -1
  23. {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.13.dist-info}/METADATA +11 -31
  24. {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.13.dist-info}/RECORD +27 -25
  25. {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.13.dist-info}/WHEEL +0 -0
  26. {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.13.dist-info}/licenses/LICENSE.txt +0 -0
  27. {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.13.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,4 @@
1
+ import gc
1
2
  import datetime
2
3
  import inspect
3
4
  import os
@@ -113,6 +114,8 @@ def _make_folder_name(
113
114
  subfolder: Optional[str] = None,
114
115
  opset: Optional[int] = None,
115
116
  drop_inputs: Optional[List[str]] = None,
117
+ same_as_pretrained: bool = False,
118
+ use_pretrained: bool = False,
116
119
  ) -> str:
117
120
  "Creates a filename unique based on the given options."
118
121
  els = [model_id.replace("/", "_")]
@@ -141,6 +144,10 @@ def _make_folder_name(
141
144
  if drop_inputs:
142
145
  ii = "-".join(f"{s[0]}{s[-1]}" for s in drop_inputs)
143
146
  els.append(f"I-{ii.upper()}")
147
+ if use_pretrained:
148
+ els.append("TRAINED")
149
+ elif same_as_pretrained:
150
+ els.append("SAMESIZE")
144
151
  return "-".join(els)
145
152
 
146
153
 
@@ -237,21 +244,35 @@ def _quiet_or_not_quiet(
237
244
  summary[f"{suffix}_output"] = string_type(res, with_shape=True, with_min_max=True)
238
245
  summary[f"{suffix}_warmup"] = warmup
239
246
  summary[f"{suffix}_repeat"] = repeat
240
- for _w in range(max(0, warmup - 1)):
247
+ last_ = None
248
+ end_w = max(0, warmup - 1)
249
+ for _w in range(end_w):
241
250
  t = fct()
242
- summary[f"io_{suffix}_{_w+1}"] = string_type(t, with_shape=True, with_min_max=True)
251
+ _ = string_type(t, with_shape=True, with_min_max=True)
252
+ if _ != last_ or _w == end_w - 1:
253
+ summary[f"io_{suffix}_{_w+1}"] = _
254
+ last_ = _
243
255
  summary[f"time_{suffix}_warmup"] = time.perf_counter() - begin
244
256
  times = []
245
257
  for _r in range(repeat):
246
258
  begin = time.perf_counter()
247
259
  t = fct()
248
260
  times.append(time.perf_counter() - begin)
249
- a = np.array(times)
261
+ a = np.array(times, dtype=np.float64)
262
+ a.sort()
263
+ i5 = max(1, a.shape[0] * 5 // 100)
264
+ i2 = max(1, a.shape[0] * 2 // 100)
250
265
  summary[f"time_{suffix}_latency"] = a.mean()
251
266
  summary[f"time_{suffix}_latency_std"] = a.std()
252
267
  summary[f"time_{suffix}_latency_min"] = a.min()
253
- summary[f"time_{suffix}_latency_min"] = a.max()
268
+ summary[f"time_{suffix}_latency_max"] = a.max()
269
+ summary[f"time_{suffix}_latency_098"] = a[-i2]
270
+ summary[f"time_{suffix}_latency_095"] = a[-i5]
271
+ summary[f"time_{suffix}_latency_005"] = a[i5]
272
+ summary[f"time_{suffix}_latency_002"] = a[i2]
254
273
  summary[f"time_{suffix}_n"] = len(a)
274
+ summary[f"time_{suffix}_latency_m98"] = a[i2:-i2].mean()
275
+
255
276
  return res
256
277
 
257
278
 
@@ -359,9 +380,10 @@ def validate_model(
359
380
  ``orteval10``, ``ref`` only if `do_run` is true
360
381
  :param repeat: number of time to measure the model
361
382
  :param warmup: warmup the model first
362
- :param inputs2: checks that the second set of inputs is reunning as well,
383
+ :param inputs2: checks that other sets of inputs are running as well,
363
384
  this ensures that the model does support dynamism, the value is used
364
- as an increment to the first set of values (added to dimensions)
385
+ as an increment to the first set of values (added to dimensions),
386
+ or an empty cache for example
365
387
  :param output_names: output names the onnx exporter should use
366
388
  :param ort_logs: increases onnxruntime verbosity when creating the session
367
389
  :return: two dictionaries, one with some metrics,
@@ -391,13 +413,20 @@ def validate_model(
391
413
  :class:`onnx_diagnostic.reference.ExtendedReferenceEvaluator`
392
414
  if ``runtime == 'ref'``,
393
415
  ``orteval10`` increases the verbosity.
416
+
417
+ .. versionchanged:: 0.7.13
418
+ *inputs2* not only means a second set of inputs but many
419
+ such as ``input_empty_cache``
420
+ which refers to a set of inputs using an empty cache.
394
421
  """
422
+ validation_begin = time.perf_counter()
395
423
  model_id, subfolder, same_as_pretrained, use_pretrained = _preprocess_model_id(
396
424
  model_id,
397
425
  subfolder,
398
426
  same_as_pretrained=same_as_pretrained,
399
427
  use_pretrained=use_pretrained,
400
428
  )
429
+ time_preprocess_model_id = time.perf_counter() - validation_begin
401
430
  default_patch = dict(patch_transformers=True, patch_diffusers=True, patch=True)
402
431
  if isinstance(patch, bool):
403
432
  patch_kwargs = default_patch if patch else dict(patch=False)
@@ -438,6 +467,7 @@ def validate_model(
438
467
  version_exporter=exporter or "",
439
468
  version_runtime=runtime,
440
469
  version_inputs2=inputs2,
470
+ time_preprocess_model_id=time_preprocess_model_id,
441
471
  )
442
472
  )
443
473
  if opset:
@@ -454,6 +484,8 @@ def validate_model(
454
484
  subfolder=subfolder,
455
485
  opset=opset,
456
486
  drop_inputs=drop_inputs,
487
+ use_pretrained=use_pretrained,
488
+ same_as_pretrained=same_as_pretrained,
457
489
  )
458
490
  dump_folder = os.path.join(dump_folder, folder_name)
459
491
  if not os.path.exists(dump_folder):
@@ -486,7 +518,7 @@ def validate_model(
486
518
  mop = model_options or {}
487
519
  data = _quiet_or_not_quiet(
488
520
  quiet,
489
- "create",
521
+ "create_torch_model",
490
522
  summary,
491
523
  None,
492
524
  (
@@ -505,10 +537,9 @@ def validate_model(
505
537
  )
506
538
  ),
507
539
  )
508
- assert not inputs2 or "inputs2" in data, (
509
- f"inputs2 is True but second set is missing in data for "
510
- f"model id {model_id!r}: {sorted(data)}"
511
- )
540
+
541
+ second_input_keys = [k for k in data if k.startswith("inputs") and k != "inputs"]
542
+
512
543
  if dump_folder:
513
544
  with open(os.path.join(dump_folder, "model_config.txt"), "w") as f:
514
545
  f.write(f"model_id: {model_id}\n------\n")
@@ -601,16 +632,14 @@ def validate_model(
601
632
  if verbose:
602
633
  print(f"[validate_model] new inputs: {string_type(data['inputs'])}")
603
634
  print(f"[validate_model] new dynamic_hapes: {string_type(data['dynamic_shapes'])}")
604
- if inputs2:
605
- assert (
606
- "inputs2" in data
607
- ), "Cannot test a second set of inputs as it was not defined."
608
- data["inputs2"], _ = filter_inputs(
609
- data["inputs2"],
610
- drop_names=drop_inputs,
611
- model=data["model"],
612
- dynamic_shapes=data["dynamic_shapes"],
613
- )
635
+ if second_input_keys:
636
+ for k in second_input_keys:
637
+ data[k], _ = filter_inputs(
638
+ data[k],
639
+ drop_names=drop_inputs,
640
+ model=data["model"],
641
+ dynamic_shapes=data["dynamic_shapes"],
642
+ )
614
643
 
615
644
  if not empty(dtype):
616
645
  if isinstance(dtype, str):
@@ -620,8 +649,9 @@ def validate_model(
620
649
  data["model"] = to_any(data["model"], dtype) # type: ignore
621
650
  data["inputs"] = to_any(data["inputs"], dtype) # type: ignore
622
651
  summary["model_dtype"] = str(dtype)
623
- if "inputs2" in data:
624
- data["inputs2"] = to_any(data["inputs2"], dtype) # type: ignore
652
+ if second_input_keys:
653
+ for k in second_input_keys:
654
+ data[k] = to_any(data[k], dtype) # type: ignore
625
655
 
626
656
  if not empty(device):
627
657
  if verbose:
@@ -629,11 +659,13 @@ def validate_model(
629
659
  data["model"] = to_any(data["model"], device) # type: ignore
630
660
  data["inputs"] = to_any(data["inputs"], device) # type: ignore
631
661
  summary["model_device"] = str(device)
632
- if "inputs2" in data:
633
- data["inputs2"] = to_any(data["inputs2"], device) # type: ignore
662
+ if second_input_keys:
663
+ for k in second_input_keys:
664
+ data[k] = to_any(data[k], device) # type: ignore
634
665
 
635
666
  for k in ["task", "size", "n_weights"]:
636
667
  summary[f"model_{k.replace('_','')}"] = data[k]
668
+ summary["second_input_keys"] = ",".join(second_input_keys)
637
669
  summary["model_inputs_options"] = str(input_options or "")
638
670
  summary["model_inputs"] = string_type(data["inputs"], with_shape=True)
639
671
  summary["model_shapes"] = string_type(data["dynamic_shapes"])
@@ -660,22 +692,37 @@ def validate_model(
660
692
  print(f"[validate_model] +INPUT {k}={string_type(v, with_shape=True)}")
661
693
  for k, v in data["dynamic_shapes"].items():
662
694
  print(f"[validate_model] +SHAPE {k}={string_type(v)}")
695
+ print(f"[validate_model] second_input_keys={second_input_keys}")
663
696
  print("[validate_model] --")
664
697
 
665
698
  if do_run:
699
+ validation_begin = time.perf_counter()
700
+
666
701
  _validate_do_run_model(
667
702
  data, summary, "inputs", "run", "run_expected", verbose, repeat, warmup, quiet
668
703
  )
669
- if inputs2:
670
- _validate_do_run_model(
671
- data, summary, "inputs2", "run2", "run_expected2", verbose, 1, 0, quiet
672
- )
704
+ if second_input_keys:
705
+ for k in second_input_keys:
706
+ _validate_do_run_model(
707
+ data,
708
+ summary,
709
+ k,
710
+ f"run2{k[6:]}",
711
+ f"run_expected2{k[6:]}",
712
+ verbose,
713
+ 1,
714
+ 0,
715
+ quiet,
716
+ )
717
+
718
+ summary["time_total_validation_torch"] = time.perf_counter() - validation_begin
673
719
 
674
720
  if exporter:
675
721
  print(
676
722
  f"[validate_model] -- export the model with {exporter!r}, "
677
723
  f"optimization={optimization!r}"
678
724
  )
725
+ exporter_begin = time.perf_counter()
679
726
  if patch_kwargs:
680
727
  if verbose:
681
728
  print(
@@ -718,7 +765,9 @@ def validate_model(
718
765
  dump_folder=dump_folder,
719
766
  output_names=output_names,
720
767
  )
768
+
721
769
  summary.update(summary_export)
770
+ summary["time_total_exporter"] = time.perf_counter() - exporter_begin
722
771
 
723
772
  dump_stats = None
724
773
  if dump_folder:
@@ -759,6 +808,8 @@ def validate_model(
759
808
  data["onnx_filename"] = onnx_filename
760
809
  summary["time_onnx_save"] = duration
761
810
  summary.update(compute_statistics(onnx_filename))
811
+ del epo
812
+
762
813
  if verbose:
763
814
  print(f"[validate_model] dumps statistics in {dump_folder!r}...")
764
815
  dump_stats = os.path.join(dump_folder, f"{folder_name}.stats")
@@ -781,6 +832,20 @@ def validate_model(
781
832
  return summary, data
782
833
 
783
834
  if do_run:
835
+ # Let's move the model to CPU to make sure it frees GPU memory.
836
+ if verbose:
837
+ # It does not really work for the time being and the model
838
+ # gets loaded twice, one by torch, one by onnxruntime
839
+ print("[validation_model] -- delete the model")
840
+ for key in ["model", "onnx_program", "config"]:
841
+ if key in data:
842
+ del data[key]
843
+ if device is not None and "cuda" in str(device).lower():
844
+ torch.cuda.empty_cache()
845
+ gc.collect()
846
+ print("[validation_model] -- done")
847
+
848
+ validation_begin = time.perf_counter()
784
849
  summary_valid, data = validate_onnx_model(
785
850
  data=data,
786
851
  quiet=quiet,
@@ -788,10 +853,11 @@ def validate_model(
788
853
  runtime=runtime,
789
854
  repeat=repeat,
790
855
  warmup=warmup,
791
- inputs2=inputs2,
856
+ second_input_keys=second_input_keys,
792
857
  ort_logs=ort_logs,
793
858
  )
794
859
  summary.update(summary_valid)
860
+ summary["time_total_validation_onnx"] = time.perf_counter() - validation_begin
795
861
 
796
862
  if ortfusiontype and "onnx_filename" in data:
797
863
  assert (
@@ -850,15 +916,17 @@ def validate_model(
850
916
  runtime=runtime,
851
917
  repeat=repeat,
852
918
  warmup=warmup,
853
- inputs2=inputs2,
919
+ second_input_keys=second_input_keys,
854
920
  )
855
921
  summary.update(summary_valid)
856
922
 
857
923
  _compute_final_statistics(summary)
924
+ summary["time_total"] = time.perf_counter() - validation_begin
858
925
 
859
926
  if verbose:
860
927
  print("[validate_model] -- done (final)")
861
928
  if dump_stats:
929
+ # Dumps again the statistics.
862
930
  with open(dump_stats, "w") as f:
863
931
  for k, v in sorted(summary.items()):
864
932
  f.write(f":{k}:{v};\n")
@@ -1232,7 +1300,7 @@ def validate_onnx_model(
1232
1300
  runtime: str = "onnxruntime",
1233
1301
  repeat: int = 1,
1234
1302
  warmup: int = 0,
1235
- inputs2: int = 1,
1303
+ second_input_keys: Optional[List[str]] = None,
1236
1304
  ort_logs: bool = False,
1237
1305
  ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
1238
1306
  """
@@ -1249,7 +1317,7 @@ def validate_onnx_model(
1249
1317
  :param runtime: onnx runtime to use, onnxruntime, torch, orteval, ref
1250
1318
  :param repeat: run that number of times the model
1251
1319
  :param warmup: warmup the model
1252
- :param inputs2: to validate the model on the second input set
1320
+ :param second_input_keys: to validate the model on other input sets
1253
1321
  to make sure the exported model supports dynamism, the value is
1254
1322
  used as an increment added to the first set of inputs (added to dimensions)
1255
1323
  :param ort_logs: triggers the logs for onnxruntime
@@ -1374,10 +1442,12 @@ def validate_onnx_model(
1374
1442
  print(f"[validate_onnx_model] done (ort_session) flavour={flavour!r}")
1375
1443
 
1376
1444
  keys = [("inputs", "run_expected", "")]
1377
- if inputs2:
1378
- keys.append(("inputs2", "run_expected2", "2"))
1445
+ if second_input_keys:
1446
+ keys.extend([(k, f"run_expected2{k[6:]}", f"2{k[6:]}") for k in second_input_keys])
1379
1447
  for k_input, k_expected, suffix in keys:
1380
1448
  # make_feeds
1449
+ assert k_input in data, f"Unable to find {k_input!r} in {sorted(data)}"
1450
+ assert k_expected in data, f"Unable to find {k_expected!r} in {sorted(data)}"
1381
1451
  if verbose:
1382
1452
  print(f"[validate_onnx_model] -- make_feeds for {k_input!r}...")
1383
1453
  print(
@@ -1679,6 +1749,7 @@ def process_statistics(data: Sequence[Dict[str, float]]) -> Dict[str, Any]:
1679
1749
  "check",
1680
1750
  "build_graph_for_pattern",
1681
1751
  "pattern_optimization",
1752
+ "topological_sort",
1682
1753
  ]:
1683
1754
  if s in p or s.replace("_", "-") in p:
1684
1755
  return s
@@ -1804,6 +1875,8 @@ def call_torch_export_custom(
1804
1875
  "custom-nostrict-noinline",
1805
1876
  "custom-nostrict-default-noinline",
1806
1877
  "custom-nostrict-all-noinline",
1878
+ "custom-dec",
1879
+ "custom-decall",
1807
1880
  }
1808
1881
  assert exporter in available, f"Unexpected value for exporter={exporter!r} in {available}"
1809
1882
  assert "model" in data, f"model is missing from data: {sorted(data)}"
@@ -1840,7 +1913,9 @@ def call_torch_export_custom(
1840
1913
  export_options = ExportOptions(
1841
1914
  strict=strict,
1842
1915
  decomposition_table=(
1843
- "default" if "-default" in exporter else ("all" if "-all" in exporter else None)
1916
+ "default"
1917
+ if ("-default" in exporter or "-dec" in exporter)
1918
+ else ("all" if ("-all" in exporter or "-decall" in exporter) else None)
1844
1919
  ),
1845
1920
  save_ep=(os.path.join(dump_folder, f"{exporter}.ep") if dump_folder else None),
1846
1921
  )
@@ -2020,4 +2095,7 @@ def _compute_final_statistics(summary: Dict[str, Any]):
2020
2095
  stats["stat_estimated_speedup_ort"] = (
2021
2096
  summary["time_run_latency"] / summary["time_run_onnx_ort_latency"]
2022
2097
  )
2098
+ stats["stat_estimated_speedup_ort_m98"] = (
2099
+ summary["time_run_latency_m98"] / summary["time_run_onnx_ort_latency_m98"]
2100
+ )
2023
2101
  summary.update(stats)
@@ -3,6 +3,7 @@ import onnx
3
3
  import torch
4
4
  from ..helpers import string_type, string_diff, max_diff
5
5
  from ..helpers.onnx_helper import to_array_extended
6
+ from ..helpers.torch_helper import to_numpy
6
7
 
7
8
 
8
9
  def validate_fx_tensor(
@@ -296,7 +297,7 @@ def run_aligned(
296
297
  )
297
298
 
298
299
  for inp, v in zip(onx.graph.input, args):
299
- onnx_results[inp.name] = v.cpu().numpy()
300
+ onnx_results[inp.name] = to_numpy(v)
300
301
  if verbose:
301
302
  print(
302
303
  f"[run_aligned] +onnx-input: {inp.name}: "
@@ -1,43 +1,23 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx-diagnostic
3
- Version: 0.7.12
4
- Summary: Investigate ONNX models
3
+ Version: 0.7.13
4
+ Summary: Tools to help converting pytorch models into ONNX.
5
5
  Home-page: https://github.com/sdpython/onnx-diagnostic
6
6
  Author: Xavier Dupré
7
- Author-email: xavier.dupre@gmail.com
8
- Classifier: Intended Audience :: Science/Research
9
- Classifier: Intended Audience :: Developers
10
- Classifier: License :: OSI Approved :: MIT License
11
- Classifier: Programming Language :: C
12
- Classifier: Programming Language :: Python
13
- Classifier: Topic :: Software Development
14
- Classifier: Topic :: Scientific/Engineering
15
- Classifier: Development Status :: 5 - Production/Stable
16
- Classifier: Operating System :: Microsoft :: Windows
17
- Classifier: Operating System :: POSIX
18
- Classifier: Operating System :: Unix
19
- Classifier: Operating System :: MacOS
20
- Classifier: Programming Language :: Python :: 3
21
- Classifier: Programming Language :: Python :: 3.10
22
- Classifier: Programming Language :: Python :: 3.11
23
- Classifier: Programming Language :: Python :: 3.12
7
+ Author-email: Xavier Dupré <xavier.dupre@gmail.com>
8
+ License: MIT
9
+ Project-URL: Homepage, https://sdpython.github.io/doc/onnx-diagnostic/dev/
10
+ Project-URL: Repository, https://github.com/sdpython/onnx-diagnostic/
11
+ Requires-Python: >=3.9
24
12
  Description-Content-Type: text/x-rst
25
13
  License-File: LICENSE.txt
26
- Requires-Dist: numpy
27
- Requires-Dist: onnx>=1.16.0
28
- Requires-Dist: onnxruntime>=1.23
29
- Requires-Dist: optree
30
- Requires-Dist: torch>=2.8
31
- Requires-Dist: torch_geometric
32
14
  Dynamic: author
33
- Dynamic: author-email
34
- Dynamic: classifier
35
- Dynamic: description
36
- Dynamic: description-content-type
37
15
  Dynamic: home-page
38
16
  Dynamic: license-file
39
- Dynamic: requires-dist
40
- Dynamic: summary
17
+
18
+
19
+ .. image:: https://github.com/sdpython/onnx-diagnostic/raw/main/_doc/_static/logo.png
20
+ :width: 120
41
21
 
42
22
  onnx-diagnostic: investigate onnx models
43
23
  ========================================
@@ -1,11 +1,11 @@
1
- onnx_diagnostic/__init__.py,sha256=dcCB9tAfK6HWFqGTvBN7m6WdJ5DFFu0P3gcwcKdA7MI,174
1
+ onnx_diagnostic/__init__.py,sha256=Sv9eg4qDNdyO5uUafa3e98pIerP4faa203FF3hqygOI,174
2
2
  onnx_diagnostic/__main__.py,sha256=YmyV_Aq_ianDlHyKLHMa6h8YK3ZmFPpLVHLKjM91aCk,79
3
3
  onnx_diagnostic/_command_lines_parser.py,sha256=wleBwnoCDyAWRYRREUSGkwAJKw2YI4Td_7ydxmdOXfI,33457
4
4
  onnx_diagnostic/api.py,sha256=BhCl_yCd78N7TlVtPOHjeYv1QBEy39TjZ647rcHqLh0,345
5
5
  onnx_diagnostic/doc.py,sha256=t3RELgfooYnVMAi0JSpggWkQEgUsREz8NmRvn0TnLI8,2829
6
6
  onnx_diagnostic/ext_test_case.py,sha256=emfQGiQSz5FVDhyJ1Acsv_Tast7tWl426TjtpNqxDBU,43558
7
7
  onnx_diagnostic/export/__init__.py,sha256=yEIoWiOeTwBsDhyYt2fTKuhtA0Ya1J9u9ZzMTOTWaWs,101
8
- onnx_diagnostic/export/dynamic_shapes.py,sha256=Go4_sIwiolCy_m1djQ3U_bX6C1EFw4al3x-ty-PsuME,41393
8
+ onnx_diagnostic/export/dynamic_shapes.py,sha256=80yvtxYNsRCplUNiyL7acb9qhl4-HXIes8C62yuQ8JE,41976
9
9
  onnx_diagnostic/export/shape_helper.py,sha256=PI_SgE1MNRKSrQ414eYoBZ54QGZbYisHSvqi9tstL2s,7795
10
10
  onnx_diagnostic/export/validate.py,sha256=_PGUql2DJhIgGKo0WjTGUc5AgsZUx8fEs00MePy-w98,6043
11
11
  onnx_diagnostic/helpers/__init__.py,sha256=GJ2GT7cgnlIveVUwMZhuvUwidbTJaKv8CsSIOpZDsJg,83
@@ -16,21 +16,21 @@ onnx_diagnostic/helpers/cache_helper.py,sha256=zxjm0-3lHs0A7wLEejz2r2KPMPjkkva--
16
16
  onnx_diagnostic/helpers/config_helper.py,sha256=H2mOcMXfrcolFnt8EuqmRFkpQ3YdNRDfvm9ToI1vNH0,5618
17
17
  onnx_diagnostic/helpers/doc_helper.py,sha256=pl5MZd3_FaE8BqQnqoBuSBxoNCFcd2OJd3eITUSku5c,5897
18
18
  onnx_diagnostic/helpers/graph_helper.py,sha256=hevQT5a7_QuriVPQcbT5qe18n99Doyl5h3-qshx1-uk,14093
19
- onnx_diagnostic/helpers/helper.py,sha256=mRQ-wo9P30m0Z0_v3EfEDwK_dZFTUhIVKo-5ut9DPW8,63194
19
+ onnx_diagnostic/helpers/helper.py,sha256=zl7vG6G4ueq931Z9iT8OlKfmtFxvRJD2WJQh_qsMiBs,63401
20
20
  onnx_diagnostic/helpers/log_helper.py,sha256=SKzxJ6DdP9uq4e2feA2nqd2Rreq4G-ujKZFUELfycP0,85674
21
21
  onnx_diagnostic/helpers/memory_peak.py,sha256=OT6mz0muBbBZY0pjgW2_eCk_lOtFRo-5w4jFo2Z6Kok,6380
22
- onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=FgK-Kws1WpSYdYJCPyONwQYY3AjbgUHimZlaYyiNUfE,21286
23
- onnx_diagnostic/helpers/model_builder_helper.py,sha256=tJi4VkP0TS2yyDSxQPNu9WRoSnPCAjr6L0J49X2LdXk,12810
22
+ onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=Cgx1ojmV0S_JpZ_UqwsNxeULMMDvMInXslhkE34fwec,22051
23
+ onnx_diagnostic/helpers/model_builder_helper.py,sha256=sK40KRN9GWK1vbNJHIXkYAojblbKD0bdom7BFmoNSv4,12860
24
24
  onnx_diagnostic/helpers/onnx_helper.py,sha256=oxl3x0EQowGP9kfz8aKDqnJZcvYY8FeZLsfoLJDiSUg,39826
25
25
  onnx_diagnostic/helpers/ort_session.py,sha256=UgUUeUslDxEFBc6w6f3HMq_a7bn4TBlItmojqWquSj4,29281
26
- onnx_diagnostic/helpers/rt_helper.py,sha256=E9fQ76lcLJqcOCNsAeZBdxmmEO_FH0oSIlFRU2gnQ6U,5229
27
- onnx_diagnostic/helpers/torch_helper.py,sha256=e0KkSTdoZthc5Yuf9e8XVGAx-lqOYy4DeRRe-N4QUYQ,33478
26
+ onnx_diagnostic/helpers/rt_helper.py,sha256=JnqsidpmX47ux5jaA_7Of_eS7KIRlOTqqDKo7ZUD-bI,5251
27
+ onnx_diagnostic/helpers/torch_helper.py,sha256=SY01uEx5tKtPcix91AifhgmsvNkDMGpTigT7w_0Nj98,34442
28
28
  onnx_diagnostic/reference/__init__.py,sha256=rLZsxOlnb7-81F2CzepGnZLejaROg4JvgFaGR9FwVQA,208
29
29
  onnx_diagnostic/reference/evaluator.py,sha256=RzNzjFDeMe-4X51Tb22N6aagazY5ktNq-mRmPcfY5EU,8848
30
30
  onnx_diagnostic/reference/ort_evaluator.py,sha256=nituItsP3IKDDWF9z-iGX_iAubrTcdk8pb1GVBp9sCU,26161
31
31
  onnx_diagnostic/reference/quantized_tensor.py,sha256=5u67uS2uGacdMD5VYCbpojNjiesDlV_kO0fAJ0vUWGE,1098
32
32
  onnx_diagnostic/reference/report_results_comparison.py,sha256=OsyQN8EHZZoj97u74RQP-7WFpebPOso5GEDpdkLWu6M,3645
33
- onnx_diagnostic/reference/torch_evaluator.py,sha256=gf8EPoX4C4yGgQ-DqxXxaGU26WdEhn8Gd6iesDLqAV0,27692
33
+ onnx_diagnostic/reference/torch_evaluator.py,sha256=Tx1teWvfGEX5RmkDnI83UiOlo5eBOC72vPhgTWdFUF0,27689
34
34
  onnx_diagnostic/reference/ops/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
35
35
  onnx_diagnostic/reference/ops/op_add_add_mul_mul.py,sha256=CXQVtgVrT066gDJFwxL4nDSY4G8r08XNu3EwhWqMapU,1521
36
36
  onnx_diagnostic/reference/ops/op_attention.py,sha256=ThALMDF53v3QeG1bohi0bvX2o90HZhGJbbAFOtwEHPE,2027
@@ -77,7 +77,7 @@ onnx_diagnostic/tasks/automatic_speech_recognition.py,sha256=umZmjGW1gDUFkqvBJnQ
77
77
  onnx_diagnostic/tasks/feature_extraction.py,sha256=Zh9p_Q8FqEO2_aqI0cCiq8OXuM3WUZbwItlLOmLnNl8,5537
78
78
  onnx_diagnostic/tasks/fill_mask.py,sha256=5Gt6zlj0p6vuifox7Wmj-TpHXJvPS0CEH8evgdBHDNA,2640
79
79
  onnx_diagnostic/tasks/image_classification.py,sha256=nLpBBB1Gkog3Fk6pu2waiHcuQr4ILPptc9FhQ-pn460,4682
80
- onnx_diagnostic/tasks/image_text_to_text.py,sha256=wkFrUaEvQAW-D-jql2xSnae1XvQBl-sSbhmAmJ76qGo,17428
80
+ onnx_diagnostic/tasks/image_text_to_text.py,sha256=EcaIdSYfaGLomSuO6G39lNd70tqFb19Xx3CjpQxQp9o,21538
81
81
  onnx_diagnostic/tasks/image_to_video.py,sha256=SoF2cVIJr6P30Abp-FCuixFDh5RvTuNEOL36QthGY6U,3860
82
82
  onnx_diagnostic/tasks/mask_generation.py,sha256=fjdD3rd-O-mFL0hQy3la3JXKth_0bH2HL7Eelq-3Dbs,5057
83
83
  onnx_diagnostic/tasks/mixture_of_expert.py,sha256=al4tk1BrHidtRiHlAaiflWiJaAte0d5M8WcBioANG9k,2808
@@ -86,41 +86,43 @@ onnx_diagnostic/tasks/sentence_similarity.py,sha256=vPqNZgAnIvY0rKWPUTs0IlU3RFQD
86
86
  onnx_diagnostic/tasks/summarization.py,sha256=8vB_JiRzDEacIvr8CYTuVQTH73xG_jNkndoS9RHJTSs,8292
87
87
  onnx_diagnostic/tasks/text2text_generation.py,sha256=35eF_RlSeMdLTZPooLMAnszs-z0bkKZ34Iej3JgA96A,8602
88
88
  onnx_diagnostic/tasks/text_classification.py,sha256=CGc72SpXFzTUyzAHEMPgyy_s187DaYGsRdrosxG80_Q,2711
89
- onnx_diagnostic/tasks/text_generation.py,sha256=hV-oK1bWjtepxkA491Va_0CWrELZbfP4E3N8xQ950zk,12823
89
+ onnx_diagnostic/tasks/text_generation.py,sha256=PRUcVF6XBmOkNA2yi2MUDAT7G8JS1w_6nvjIGcmhST8,13366
90
90
  onnx_diagnostic/tasks/text_to_image.py,sha256=mOS3Ruosi3hzRMxXLDN7ZkAbi7NnQb7MWwQP_okGVHs,2962
91
91
  onnx_diagnostic/tasks/zero_shot_image_classification.py,sha256=jJCMWuOqGv5ahCfjrcqxuYCJFhTgHV5KUf2yyv2yxYA,4624
92
+ onnx_diagnostic/tasks/data/__init__.py,sha256=uJoemrWgEjI6oA-tMX7r3__x-b3siPmkgqaY7bgIles,401
93
+ onnx_diagnostic/tasks/data/dummies_imagetext2text_generation_gemma3.onnx,sha256=UbtvmWMqcZOKJ-I-HXWI1A6YR6QDaFS5u_yXm5C3ZBw,10299
92
94
  onnx_diagnostic/torch_export_patches/__init__.py,sha256=0SaZedwznm1hQUCvXZsGZORV5vby954wEExr5faepGg,720
93
- onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=TUDY6sRf2Si-t7rK_hdKiFqSP2gjJbPpIGgnW2Mt5eA,28686
95
+ onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=fQGyk6IkapGYYlFxbly8hS5oLWkhIC4bHV3DfZA1Keg,29449
94
96
  onnx_diagnostic/torch_export_patches/onnx_export_serialization.py,sha256=klvqiMjccwGhiRnLRVbwTi5WWkMfvtnOV5ycirPcAdA,11354
95
97
  onnx_diagnostic/torch_export_patches/patch_expressions.py,sha256=vr4tt61cbDnaaaduzMj4UBZ8OUtr6GfDpIWwOYqjWzs,3213
96
- onnx_diagnostic/torch_export_patches/patch_inputs.py,sha256=3ySY1nAzINSS1hAzTycwfdbPas8G5CDL2MjnaAHBkMU,7825
98
+ onnx_diagnostic/torch_export_patches/patch_inputs.py,sha256=2HQZKQV6TM5430RIvKiMPe4cfGvFdx1UnP1w76CeGE4,8110
97
99
  onnx_diagnostic/torch_export_patches/patch_module.py,sha256=R2d9IHM-RwsBKDsxuBIJnEqMoxbS9gd4YWFGG2wwV5A,39881
98
100
  onnx_diagnostic/torch_export_patches/patch_module_helper.py,sha256=2U0AdyZuU0W54QTdE7tY7imVzMnpQ5091ADNtTCkT8Y,6967
99
- onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=57x62uZNA80XiWgkG8Fe0_8YJcIVrvKLPqvwLDPJwgc,24008
100
- onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=OU8-63VDhiWtQV3scBV9JyGXn8ds74OzY2-IOZkwg0A,26580
101
+ onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=6z8Fk4rcJKo1Nh2F0K3JGkmFH0XZSIfv5-HvO6bhhTY,24818
102
+ onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=SqaQU0zsvQsZXU0kIrxcURvVCp-ysZAaF01WLlgKZsw,27183
101
103
  onnx_diagnostic/torch_export_patches/patches/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
- onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=TFjuw--sTYPCoVEaYlYLJuElx_CUynJR6s6ypoZtRWw,18956
103
- onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=SsN-y2yoVaY3xRGDaIl0V449LcuwKAGBHPKm2JjQNhc,67942
104
+ onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=bQFxePwj9OwCFykhcZiLvqOV2sXPBcZXa4260XueHLE,23117
105
+ onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=hHO7XOaUw3XrhPSrG2hTpMNzGVm_zigLg8d7hMOK7Gs,79188
104
106
  onnx_diagnostic/torch_export_patches/serialization/__init__.py,sha256=BHLdRPtNAtNPAS-bPKEj3-foGSPvwAbZXrHzGGPDLEw,1876
105
107
  onnx_diagnostic/torch_export_patches/serialization/diffusers_impl.py,sha256=drq3EH_yjcSuIWYsVeUWm8Cx6YCZFU6bP_1PLtPfY5I,945
106
108
  onnx_diagnostic/torch_export_patches/serialization/transformers_impl.py,sha256=mcmZGekzQlLgE_o3SdKlRgCx4ewwyyAuNWZ9CaN_zrI,9317
107
109
  onnx_diagnostic/torch_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
108
110
  onnx_diagnostic/torch_models/llms.py,sha256=soyg4yC87ptGoeulJhKqw5opGmuLvH1pn_ZDXZ4Jr8E,90
109
- onnx_diagnostic/torch_models/validate.py,sha256=oDPnZDFpiPx7s0we4usaD4pQpJEgqnKYjW-L-TM8Bsw,76395
111
+ onnx_diagnostic/torch_models/validate.py,sha256=B5h9iBTtkjFNFP6VzqxCvCfwKe4XQOpre1WeKwKEWNA,79628
110
112
  onnx_diagnostic/torch_models/hghub/__init__.py,sha256=vi1Q7YHdddj1soiBN42MSvJdFqe2_KUoWafHISjwOu8,58
111
113
  onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=rFbiPNLET-KdBpnv-p0nKgwHX6d7C_Z0s9zZ86_92kQ,14307
112
114
  onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=8V_pAgACPLPsLRYUododg7MSL6str-T3tBEGY4OaeYQ,8724
113
- onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=3yH1pQbCYNDmRMNUCwMFf5ELnAa35ubTKD2JRF5y9Ls,287515
114
- onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=NgKFt3fwM5PYUOWwApKphiAWfQyJk3rjGXHr4kkSRiE,13707
115
+ onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=aSa_7Rjider6ruqQ2-fXQMyyDS8VhB1xKxcPNk8qUeU,288776
116
+ onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=Ioi92UHT3bsfA9oMi9IzY16FxnAKrPJHsEpFepBwr_o,14607
115
117
  onnx_diagnostic/torch_models/hghub/model_specific.py,sha256=j50Nu7wddJMoqmD4QzMbNdFDUUgUmSBKRzPDH55TlUQ,2498
116
118
  onnx_diagnostic/torch_models/untrained/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
117
119
  onnx_diagnostic/torch_models/untrained/llm_phi2.py,sha256=JbGZmW41MPJcQgqaJc9R2G00nI79nI-lABN-ffA1lmY,4037
118
120
  onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=QXw_Bs2SzfeiQMf-tmtVl83SmVOL4-Um7Qy-f0E48QI,2507
119
121
  onnx_diagnostic/torch_onnx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
120
122
  onnx_diagnostic/torch_onnx/runtime_info.py,sha256=1g9F_Jf9AAgYQU4stbsrFXwQl-30mWlQrFbQ7val8Ps,9268
121
- onnx_diagnostic/torch_onnx/sbs.py,sha256=fN799L_G1c2RKEuNcKt_MnQri5dwD4OzeCkBBFFoUBI,16865
122
- onnx_diagnostic-0.7.12.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
123
- onnx_diagnostic-0.7.12.dist-info/METADATA,sha256=aQ02curD3P5PXXiaUBlf6pLkpoqMR_F6721HDpsxhLE,7435
124
- onnx_diagnostic-0.7.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
125
- onnx_diagnostic-0.7.12.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
126
- onnx_diagnostic-0.7.12.dist-info/RECORD,,
123
+ onnx_diagnostic/torch_onnx/sbs.py,sha256=IoKLA5UwS6kY8g4OOf_bdQwCziIsQfBczZ3w8wo4wZM,16905
124
+ onnx_diagnostic-0.7.13.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
125
+ onnx_diagnostic-0.7.13.dist-info/METADATA,sha256=1ZoJZw78GxT1chXfFumfWyr-kcD8puKgaJ_qTHbfs60,6730
126
+ onnx_diagnostic-0.7.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
127
+ onnx_diagnostic-0.7.13.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
128
+ onnx_diagnostic-0.7.13.dist-info/RECORD,,