onnx-diagnostic 0.6.3__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. onnx_diagnostic/__init__.py +1 -1
  2. onnx_diagnostic/_command_lines_parser.py +281 -80
  3. onnx_diagnostic/doc.py +22 -0
  4. onnx_diagnostic/export/dynamic_shapes.py +48 -20
  5. onnx_diagnostic/export/shape_helper.py +126 -0
  6. onnx_diagnostic/ext_test_case.py +1 -1
  7. onnx_diagnostic/helpers/cache_helper.py +78 -8
  8. onnx_diagnostic/helpers/config_helper.py +8 -4
  9. onnx_diagnostic/helpers/helper.py +30 -3
  10. onnx_diagnostic/helpers/log_helper.py +1744 -0
  11. onnx_diagnostic/helpers/mini_onnx_builder.py +4 -1
  12. onnx_diagnostic/helpers/model_builder_helper.py +54 -73
  13. onnx_diagnostic/helpers/torch_helper.py +18 -2
  14. onnx_diagnostic/reference/__init__.py +1 -0
  15. onnx_diagnostic/reference/ort_evaluator.py +29 -4
  16. onnx_diagnostic/reference/report_results_comparison.py +95 -0
  17. onnx_diagnostic/reference/torch_evaluator.py +21 -0
  18. onnx_diagnostic/tasks/automatic_speech_recognition.py +3 -0
  19. onnx_diagnostic/tasks/feature_extraction.py +3 -0
  20. onnx_diagnostic/tasks/fill_mask.py +3 -0
  21. onnx_diagnostic/tasks/image_classification.py +7 -1
  22. onnx_diagnostic/tasks/image_text_to_text.py +72 -18
  23. onnx_diagnostic/tasks/mixture_of_expert.py +3 -0
  24. onnx_diagnostic/tasks/object_detection.py +3 -0
  25. onnx_diagnostic/tasks/sentence_similarity.py +3 -0
  26. onnx_diagnostic/tasks/summarization.py +3 -0
  27. onnx_diagnostic/tasks/text2text_generation.py +3 -0
  28. onnx_diagnostic/tasks/text_classification.py +3 -0
  29. onnx_diagnostic/tasks/text_generation.py +90 -43
  30. onnx_diagnostic/tasks/zero_shot_image_classification.py +3 -0
  31. onnx_diagnostic/torch_export_patches/onnx_export_errors.py +78 -25
  32. onnx_diagnostic/torch_export_patches/onnx_export_serialization.py +37 -0
  33. onnx_diagnostic/torch_export_patches/patches/patch_transformers.py +365 -17
  34. onnx_diagnostic/torch_models/hghub/hub_api.py +81 -8
  35. onnx_diagnostic/torch_models/hghub/hub_data.py +6 -2
  36. onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py +209 -0
  37. onnx_diagnostic/torch_models/hghub/model_inputs.py +58 -14
  38. onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py +23 -50
  39. onnx_diagnostic/torch_models/{test_helper.py → validate.py} +166 -106
  40. {onnx_diagnostic-0.6.3.dist-info → onnx_diagnostic-0.7.1.dist-info}/METADATA +2 -2
  41. {onnx_diagnostic-0.6.3.dist-info → onnx_diagnostic-0.7.1.dist-info}/RECORD +44 -41
  42. {onnx_diagnostic-0.6.3.dist-info → onnx_diagnostic-0.7.1.dist-info}/WHEEL +0 -0
  43. {onnx_diagnostic-0.6.3.dist-info → onnx_diagnostic-0.7.1.dist-info}/licenses/LICENSE.txt +0 -0
  44. {onnx_diagnostic-0.6.3.dist-info → onnx_diagnostic-0.7.1.dist-info}/top_level.txt +0 -0
@@ -147,7 +147,7 @@ def version_summary() -> Dict[str, Union[int, float, str]]:
147
147
  :showcode:
148
148
 
149
149
  import pprint
150
- from onnx_diagnostic.torch_models.test_helper import version_summary
150
+ from onnx_diagnostic.torch_models.validate import version_summary
151
151
 
152
152
  pprint.pprint(version_summary())
153
153
  """
@@ -259,7 +259,8 @@ def validate_model(
259
259
  verbose: int = 0,
260
260
  dtype: Optional[Union[str, torch.dtype]] = None,
261
261
  device: Optional[Union[str, torch.device]] = None,
262
- trained: bool = False,
262
+ same_as_pretrained: bool = False,
263
+ use_pretrained: bool = False,
263
264
  optimization: Optional[str] = None,
264
265
  quiet: bool = False,
265
266
  patch: bool = False,
@@ -275,6 +276,7 @@ def validate_model(
275
276
  runtime: str = "onnxruntime",
276
277
  repeat: int = 1,
277
278
  warmup: int = 0,
279
+ inputs2: bool = True,
278
280
  ) -> Tuple[Dict[str, Union[int, float, str]], Dict[str, Any]]:
279
281
  """
280
282
  Validates a model.
@@ -293,7 +295,9 @@ def validate_model(
293
295
  :param verbose: verbosity level
294
296
  :param dtype: uses this dtype to check the model
295
297
  :param device: do the verification on this device
296
- :param trained: use the trained model, not the untrained one
298
+ :param same_as_pretrained: use a model equivalent to the trained,
299
+ this is not always possible
300
+ :param use_pretrained: use the trained model, not the untrained one
297
301
  :param optimization: optimization to apply to the exported model,
298
302
  depend on the the exporter
299
303
  :param quiet: if quiet, catches exception if any issue
@@ -307,7 +311,7 @@ def validate_model(
307
311
  :param drop_inputs: drops this list of inputs (given their names)
308
312
  :param ortfusiontype: runs ort fusion, the parameters defines the fusion type,
309
313
  it accepts multiple values separated by ``|``,
310
- see :func:`onnx_diagnostic.torch_models.test_helper.run_ort_fusion`
314
+ see :func:`onnx_diagnostic.torch_models.validate.run_ort_fusion`
311
315
  :param input_options: additional options to define the dummy inputs
312
316
  used to export
313
317
  :param model_options: additional options when creating the model such as
@@ -318,6 +322,8 @@ def validate_model(
318
322
  only if `do_run` is true
319
323
  :param repeat: number of time to measure the model
320
324
  :param warmup: warmup the model first
325
+ :param inputs2: checks that the second set of inputs is reunning as well,
326
+ this ensures that the model does support dynamism
321
327
  :return: two dictionaries, one with some metrics,
322
328
  another one with whatever the function produces
323
329
 
@@ -350,7 +356,8 @@ def validate_model(
350
356
  version_do_run=str(do_run),
351
357
  version_dtype=str(dtype or ""),
352
358
  version_device=str(device or ""),
353
- version_trained=str(trained),
359
+ version_same_as_pretrained=str(same_as_pretrained),
360
+ version_use_pretrained=str(use_pretrained),
354
361
  version_optimization=optimization or "",
355
362
  version_quiet=str(quiet),
356
363
  version_patch=str(patch),
@@ -361,6 +368,7 @@ def validate_model(
361
368
  version_stop_if_static=str(stop_if_static),
362
369
  version_exporter=exporter or "",
363
370
  version_runtime=runtime,
371
+ version_inputs2=inputs2,
364
372
  )
365
373
  )
366
374
  if opset:
@@ -404,19 +412,25 @@ def validate_model(
404
412
  summary,
405
413
  None,
406
414
  (
407
- lambda mid=model_id, v=verbose, task=task, tr=trained, iop=iop, sub=subfolder: (
415
+ lambda mid=model_id, v=verbose, task=task, uptr=use_pretrained, tr=same_as_pretrained, iop=iop, sub=subfolder, i2=inputs2: ( # noqa: E501
408
416
  get_untrained_model_with_inputs(
409
417
  mid,
410
418
  verbose=v,
411
419
  task=task,
420
+ use_pretrained=uptr,
412
421
  same_as_pretrained=tr,
413
422
  inputs_kwargs=iop,
414
423
  model_kwargs=mop,
415
424
  subfolder=sub,
425
+ add_second_input=i2,
416
426
  )
417
427
  )
418
428
  ),
419
429
  )
430
+ assert not inputs2 or "inputs2" in data, (
431
+ f"inputs2 is True but second set is missing in data for "
432
+ f"model id {model_id!r}: {sorted(data)}"
433
+ )
420
434
 
421
435
  if exporter == "modelbuilder":
422
436
  # Models used with ModelBuilder do not like batch size > 1.
@@ -483,6 +497,16 @@ def validate_model(
483
497
  if verbose:
484
498
  print(f"[validate_model] new inputs: {string_type(data['inputs'])}")
485
499
  print(f"[validate_model] new dynamic_hapes: {string_type(data['dynamic_shapes'])}")
500
+ if inputs2:
501
+ assert (
502
+ "inputs2" in data
503
+ ), "Cannot test a second set of inputs as it was not defined."
504
+ data["inputs2"], _ = filter_inputs(
505
+ data["inputs2"],
506
+ drop_names=drop_inputs,
507
+ model=data["model"],
508
+ dynamic_shapes=data["dynamic_shapes"],
509
+ )
486
510
 
487
511
  if not empty(dtype):
488
512
  if isinstance(dtype, str):
@@ -492,6 +516,8 @@ def validate_model(
492
516
  data["model"] = to_any(data["model"], dtype) # type: ignore
493
517
  data["inputs"] = to_any(data["inputs"], dtype) # type: ignore
494
518
  summary["model_dtype"] = str(dtype)
519
+ if "inputs2" in data:
520
+ data["inputs2"] = to_any(data["inputs2"], dtype) # type: ignore
495
521
 
496
522
  if not empty(device):
497
523
  if verbose:
@@ -499,6 +525,8 @@ def validate_model(
499
525
  data["model"] = to_any(data["model"], device) # type: ignore
500
526
  data["inputs"] = to_any(data["inputs"], device) # type: ignore
501
527
  summary["model_device"] = str(device)
528
+ if "inputs2" in data:
529
+ data["inputs2"] = to_any(data["inputs2"], device) # type: ignore
502
530
 
503
531
  for k in ["task", "size", "n_weights"]:
504
532
  summary[f"model_{k.replace('_','')}"] = data[k]
@@ -527,35 +555,13 @@ def validate_model(
527
555
  print("[validate_model] --")
528
556
 
529
557
  if do_run:
530
- if verbose:
531
- print("[validate_model] -- run the model...")
532
- print(f"[validate_model] inputs={string_type(data['inputs'], with_shape=True)}")
533
- # We make a copy of the input just in case the model modifies them inplace
534
- hash_inputs = string_type(data["inputs"], with_shape=True)
535
- inputs = torch_deepcopy(data["inputs"])
536
- model = data["model"]
537
-
538
- expected = _quiet_or_not_quiet(
539
- quiet,
540
- "run",
541
- summary,
542
- data,
543
- (lambda m=model, inp=inputs: m(**torch_deepcopy(inp))),
544
- repeat=repeat,
545
- warmup=warmup,
546
- )
547
- if "ERR_run" in summary:
548
- return summary, data
549
-
550
- summary["model_expected"] = string_type(expected, with_shape=True)
551
- if verbose:
552
- print("[validate_model] done (run)")
553
- data["expected"] = expected
554
- assert hash_inputs == string_type(data["inputs"], with_shape=True), (
555
- f"The model execution did modified the inputs:\n"
556
- f"before: {hash_inputs}\n"
557
- f" after: {string_type(data['inputs'], with_shape=True)}"
558
+ _validate_do_run_model(
559
+ data, summary, "inputs", "run", "run_expected", verbose, repeat, warmup, quiet
558
560
  )
561
+ if inputs2:
562
+ _validate_do_run_model(
563
+ data, summary, "inputs2", "run2", "run_expected2", verbose, 1, 0, quiet
564
+ )
559
565
 
560
566
  if exporter:
561
567
  print(
@@ -578,43 +584,7 @@ def validate_model(
578
584
  data["inputs_export"] = modificator(data["inputs"]) # type: ignore
579
585
 
580
586
  if do_run:
581
- # We run a second time the model to check the patch did not
582
- # introduce any discrepancies
583
- if verbose:
584
- print("[validate_model] run patched model...")
585
- print(
586
- f"[validate_model] patched inputs="
587
- f"{string_type(data['inputs_export'], with_shape=True)}"
588
- )
589
- hash_inputs = string_type(data["inputs_export"], with_shape=True)
590
-
591
- # We make a copy of the input just in case the model modifies them inplace
592
- inputs = torch_deepcopy(data["inputs_export"])
593
- model = data["model"]
594
-
595
- expected = _quiet_or_not_quiet(
596
- quiet,
597
- "run_patched",
598
- summary,
599
- data,
600
- (lambda m=model, inp=inputs: m(**inp)),
601
- )
602
- if "ERR_run_patched" in summary:
603
- return summary, data
604
-
605
- disc = max_diff(data["expected"], expected)
606
- for k, v in disc.items():
607
- summary[f"disc_patched_{k}"] = str(v)
608
- if verbose:
609
- print("[validate_model] done (patched run)")
610
- print(f"[validate_model] patched discrepancies={string_diff(disc)}")
611
- assert hash_inputs == string_type(
612
- data["inputs_export"], with_shape=True
613
- ), (
614
- f"The model execution did modified the inputs:\n"
615
- f"before: {hash_inputs}\n"
616
- f" after: {string_type(data['inputs_export'], with_shape=True)}"
617
- )
587
+ _validate_do_run_exported_program(data, summary, verbose, quiet)
618
588
 
619
589
  # data is modified inplace
620
590
  summary_export, data = call_exporter(
@@ -707,6 +677,7 @@ def validate_model(
707
677
  runtime=runtime,
708
678
  repeat=repeat,
709
679
  warmup=warmup,
680
+ inputs2=inputs2,
710
681
  )
711
682
  summary.update(summary_valid)
712
683
 
@@ -767,6 +738,7 @@ def validate_model(
767
738
  runtime=runtime,
768
739
  repeat=repeat,
769
740
  warmup=warmup,
741
+ inputs2=inputs2,
770
742
  )
771
743
  summary.update(summary_valid)
772
744
 
@@ -779,6 +751,79 @@ def validate_model(
779
751
  return summary, data
780
752
 
781
753
 
754
+ def _validate_do_run_model(
755
+ data, summary, key, tag, expected_tag, verbose, repeat, warmup, quiet
756
+ ):
757
+ if verbose:
758
+ print(f"[validate_model] -- run the model inputs={key!r}...")
759
+ print(f"[validate_model] {key}={string_type(data[key], with_shape=True)}")
760
+ # We make a copy of the input just in case the model modifies them inplace
761
+ hash_inputs = string_type(data[key], with_shape=True)
762
+ inputs = torch_deepcopy(data[key])
763
+ model = data["model"]
764
+
765
+ expected = _quiet_or_not_quiet(
766
+ quiet,
767
+ tag,
768
+ summary,
769
+ data,
770
+ (lambda m=model, inp=inputs: m(**torch_deepcopy(inp))),
771
+ repeat=repeat,
772
+ warmup=warmup,
773
+ )
774
+ if f"ERR_{tag}" in summary:
775
+ return summary, data
776
+
777
+ summary[expected_tag] = string_type(expected, with_shape=True)
778
+ if verbose:
779
+ print(f"[validate_model] done ([{tag}])")
780
+ data[expected_tag] = expected
781
+ assert hash_inputs == string_type(data[key], with_shape=True), (
782
+ f"The model execution did modified the inputs:\n"
783
+ f"before: {hash_inputs}\n"
784
+ f" after: {string_type(data[key], with_shape=True)}"
785
+ )
786
+
787
+
788
+ def _validate_do_run_exported_program(data, summary, verbose, quiet):
789
+
790
+ # We run a second time the model to check the patch did not
791
+ # introduce any discrepancies
792
+ if verbose:
793
+ print("[validate_model] run patched model...")
794
+ print(
795
+ f"[validate_model] patched inputs="
796
+ f"{string_type(data['inputs_export'], with_shape=True)}"
797
+ )
798
+ hash_inputs = string_type(data["inputs_export"], with_shape=True)
799
+
800
+ # We make a copy of the input just in case the model modifies them inplace
801
+ inputs = torch_deepcopy(data["inputs_export"])
802
+ model = data["model"]
803
+
804
+ expected = _quiet_or_not_quiet(
805
+ quiet,
806
+ "run_patched",
807
+ summary,
808
+ data,
809
+ (lambda m=model, inp=inputs: m(**inp)),
810
+ )
811
+ if "ERR_run_patched" in summary:
812
+ return summary, data
813
+
814
+ disc = max_diff(data["run_expected"], expected)
815
+ for k, v in disc.items():
816
+ summary[f"disc_patched_{k}"] = str(v)
817
+ if verbose:
818
+ print("[validate_model] done (patched run)")
819
+ print(f"[validate_model] patched discrepancies={string_diff(disc)}")
820
+ assert hash_inputs == string_type(data["inputs_export"], with_shape=True), (
821
+ f"The model execution did modified the inputs:\n"
822
+ f"before: {hash_inputs}\n"
823
+ f" after: {string_type(data['inputs_export'], with_shape=True)}"
824
+ )
825
+
826
+
782
827
  def call_exporter(
783
828
  data: Dict[str, Any],
784
829
  exporter: str,
@@ -845,7 +890,11 @@ def call_exporter(
845
890
  )
846
891
  return summary, data
847
892
  raise NotImplementedError(
848
- f"export with {exporter!r} and optimization={optimization!r} not implemented yet"
893
+ f"export with {exporter!r} and optimization={optimization!r} not implemented yet, "
894
+ f"exporter must startswith 'onnx-', 'custom', 'export', 'modelbuilder' "
895
+ f"(onnx-dynamo, custom, export), optimization can 'ir', "
896
+ f"'default', 'default+onnxruntime', "
897
+ f"'default+onnxruntime+os_ort', 'ir', 'os_ort'"
849
898
  )
850
899
 
851
900
 
@@ -959,7 +1008,7 @@ def call_torch_export_export(
959
1008
  if "ERR_export_export" in summary:
960
1009
  return summary, data
961
1010
 
962
- disc = max_diff(data["expected"], expected)
1011
+ disc = max_diff(data["run_expected"], expected)
963
1012
  for k, v in disc.items():
964
1013
  summary[f"disc_exported_{k}"] = str(v)
965
1014
  if verbose:
@@ -981,6 +1030,7 @@ def validate_onnx_model(
981
1030
  runtime: str = "onnxruntime",
982
1031
  repeat: int = 1,
983
1032
  warmup: int = 0,
1033
+ inputs2: bool = True,
984
1034
  ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
985
1035
  """
986
1036
  Verifies that an onnx model produces the same
@@ -996,6 +1046,8 @@ def validate_onnx_model(
996
1046
  :param runtime: onnx runtime to use, onnxruntime or torch
997
1047
  :param repeat: run that number of times the model
998
1048
  :param warmup: warmup the model
1049
+ :param inputs: to validate the model on the second input set
1050
+ to make sure the exported model supports dynamism
999
1051
  :return: two dictionaries, one with some metrics,
1000
1052
  another one with whatever the function produces
1001
1053
  """
@@ -1065,43 +1117,51 @@ def validate_onnx_model(
1065
1117
  if verbose:
1066
1118
  print(f"[validate_onnx_model] done (ort_session) flavour={flavour!r}")
1067
1119
 
1068
- # make_feeds
1069
- if verbose:
1070
- print("[validate_onnx_model] -- make_feeds...")
1071
- print(f"[validate_onnx_model] inputs={string_type(data['inputs'], with_shape=True)}")
1072
- feeds = make_feeds(sess, data["inputs"], use_numpy=True, check_flatten=False)
1073
- if verbose:
1074
- print(f"[validate_onnx_model] ort inputs={string_type(feeds, with_shape=True)}")
1075
- summary[_mk("onnx_ort_inputs")] = string_type(feeds, with_shape=True)
1076
- if verbose:
1077
- print("[validate_onnx_model] done (make_feeds)")
1120
+ keys = [("inputs", "run_expected", "")]
1121
+ if inputs2:
1122
+ keys.append(("inputs2", "run_expected2", "2"))
1123
+ for k_input, k_expected, suffix in keys:
1124
+ # make_feeds
1125
+ if verbose:
1126
+ print(f"[validate_onnx_model] -- make_feeds for {k_input!r}...")
1127
+ print(
1128
+ f"[validate_onnx_model] inputs={string_type(data[k_input], with_shape=True)}"
1129
+ )
1130
+ feeds = make_feeds(sess, data[k_input], use_numpy=True, check_flatten=False)
1131
+ if verbose:
1132
+ print(f"[validate_onnx_model] ort inputs={string_type(feeds, with_shape=True)}")
1133
+ summary[_mk(f"onnx_ort_inputs{suffix}")] = string_type(feeds, with_shape=True)
1134
+ if verbose:
1135
+ print("[validate_onnx_model] done (make_feeds)")
1078
1136
 
1079
- # run ort
1080
- if verbose:
1081
- print("[validate_onnx_model] run session...")
1137
+ # run ort
1138
+ if verbose:
1139
+ print("[validate_onnx_model] run session...")
1082
1140
 
1083
- got = _quiet_or_not_quiet(
1084
- quiet,
1085
- _mk("time_onnx_ort_run"),
1086
- summary,
1087
- data,
1088
- (lambda sess=sess, feeds=feeds: sess.run(None, feeds)),
1089
- repeat=repeat,
1090
- warmup=warmup,
1091
- )
1092
- if f"ERR_{_mk('time_onnx_ort_run')}" in summary:
1093
- return summary, data
1141
+ got = _quiet_or_not_quiet(
1142
+ quiet,
1143
+ _mk(f"time_onnx_ort_run{suffix}"),
1144
+ summary,
1145
+ data,
1146
+ (lambda sess=sess, feeds=feeds: sess.run(None, feeds)),
1147
+ repeat=repeat,
1148
+ warmup=warmup,
1149
+ )
1150
+ if f"ERR_{_mk(f'time_onnx_ort_run{suffix}')}" in summary:
1151
+ return summary, data
1094
1152
 
1095
- if verbose:
1096
- print("[validate_onnx_model] done (run)")
1097
- print(f"[validate_onnx_model] got={string_type(got, with_shape=True)}")
1153
+ summary[f"run_feeds_{k_input}"] = string_type(feeds, with_shape=True, with_device=True)
1154
+ summary[f"run_output_{k_input}"] = string_type(got, with_shape=True, with_device=True)
1155
+ if verbose:
1156
+ print("[validate_onnx_model] done (run)")
1157
+ print(f"[validate_onnx_model] got={string_type(got, with_shape=True)}")
1098
1158
 
1099
- # compute discrepancies
1100
- disc = max_diff(data["expected"], got, flatten=True)
1101
- if verbose:
1102
- print(f"[validate_onnx_model] discrepancies={string_diff(disc)}")
1103
- for k, v in disc.items():
1104
- summary[_mk(f"disc_onnx_ort_run_{k}")] = v
1159
+ # compute discrepancies
1160
+ disc = max_diff(data[k_expected], got, flatten=True)
1161
+ if verbose:
1162
+ print(f"[validate_onnx_model] discrepancies={string_diff(disc)}")
1163
+ for k, v in disc.items():
1164
+ summary[_mk(f"disc_onnx_ort_run{suffix}_{k}")] = v
1105
1165
  return summary, data
1106
1166
 
1107
1167
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx-diagnostic
3
- Version: 0.6.3
3
+ Version: 0.7.1
4
4
  Summary: Investigate ONNX models
5
5
  Home-page: https://github.com/sdpython/onnx-diagnostic
6
6
  Author: Xavier Dupré
@@ -59,7 +59,7 @@ onnx-diagnostic: investigate onnx models
59
59
  .. image:: https://img.shields.io/badge/code%20style-black-000000.svg
60
60
  :target: https://github.com/psf/black
61
61
 
62
- .. image:: https://codecov.io/gh/sdpython/onnx-diagnostic/branch/main/graph/badge.svg?token=Wb9ZGDta8J
62
+ .. image:: https://codecov.io/gh/sdpython/onnx-diagnostic/graph/badge.svg?token=91T5ZVIP96
63
63
  :target: https://codecov.io/gh/sdpython/onnx-diagnostic
64
64
 
65
65
  The main feature is about `patches <https://github.com/sdpython/onnx-diagnostic/tree/main/onnx_diagnostic/torch_export_patches>`_:
@@ -1,32 +1,35 @@
1
- onnx_diagnostic/__init__.py,sha256=mRkq5dlSo05GQMct7d6mMZLb6s5T24eG_3mD5O3wBo0,173
1
+ onnx_diagnostic/__init__.py,sha256=YwjIZRhfTzRgTOBvmUSNNYX0SBBdmLsWfkMVwHkJloQ,173
2
2
  onnx_diagnostic/__main__.py,sha256=YmyV_Aq_ianDlHyKLHMa6h8YK3ZmFPpLVHLKjM91aCk,79
3
- onnx_diagnostic/_command_lines_parser.py,sha256=pxG3dYpTTpRCjBRzFGEZm4ewb7xprZihP7fG08kWL04,19989
3
+ onnx_diagnostic/_command_lines_parser.py,sha256=WpCri2dqc1a1KYthQcb4-eN0htfiWeLrAkncNK3cZaY,27466
4
4
  onnx_diagnostic/api.py,sha256=BhCl_yCd78N7TlVtPOHjeYv1QBEy39TjZ647rcHqLh0,345
5
- onnx_diagnostic/doc.py,sha256=O_ncetL0G4-oHIxLv8ofTIIxCT_5ESSkKxfYvgccJEc,2038
6
- onnx_diagnostic/ext_test_case.py,sha256=nhWz75caudvKn-svH1ppUY8uw8MoTD4cEdqMdj6PiPc,42399
5
+ onnx_diagnostic/doc.py,sha256=t3RELgfooYnVMAi0JSpggWkQEgUsREz8NmRvn0TnLI8,2829
6
+ onnx_diagnostic/ext_test_case.py,sha256=IX-DNabvsPw8UkUeXC1amw3nnzdmJ3DeERn4E1Y_omo,42416
7
7
  onnx_diagnostic/export/__init__.py,sha256=yEIoWiOeTwBsDhyYt2fTKuhtA0Ya1J9u9ZzMTOTWaWs,101
8
- onnx_diagnostic/export/dynamic_shapes.py,sha256=EHB7VoWNx8sVetvOgE1vgC7wHtIjWDLjanhbEJNpK88,39892
8
+ onnx_diagnostic/export/dynamic_shapes.py,sha256=HYf2OEi7PmRSj8uxMD-wbdVxxejkWXTPBAkxoFeM27A,40811
9
+ onnx_diagnostic/export/shape_helper.py,sha256=C9cEq_x8I40RKuD89qWIholN1XZoWhaKPfbZQhiPD3g,4725
9
10
  onnx_diagnostic/export/validate.py,sha256=_PGUql2DJhIgGKo0WjTGUc5AgsZUx8fEs00MePy-w98,6043
10
11
  onnx_diagnostic/helpers/__init__.py,sha256=GJ2GT7cgnlIveVUwMZhuvUwidbTJaKv8CsSIOpZDsJg,83
11
12
  onnx_diagnostic/helpers/args_helper.py,sha256=SRWnqC7EENg09RZlA50B_PcdiIhdbgA4C3ACfzl5nMs,4419
12
13
  onnx_diagnostic/helpers/bench_run.py,sha256=CGA6VMJZMH2gDhVueT9ypNm4PMcjGrrGFYp08nhWj9k,16539
13
- onnx_diagnostic/helpers/cache_helper.py,sha256=soKjyIXa7EQgALd9PAUGIKYzXlJGoLevYiQDsxoqkQ4,8349
14
- onnx_diagnostic/helpers/config_helper.py,sha256=aZATKVbZuw8L56KQpwMNcqJ3Qi5OplzS_N3ETR3hmj0,3351
14
+ onnx_diagnostic/helpers/cache_helper.py,sha256=E_D0z5ks4zUJb9H6K19MKhUZ-nZTC_dgeDO5zXNFE9g,10824
15
+ onnx_diagnostic/helpers/config_helper.py,sha256=CdMeUhmDe0LfKcdPv9-Lzt73RRs29NmUHg9uVrdFwTQ,3479
15
16
  onnx_diagnostic/helpers/doc_helper.py,sha256=pl5MZd3_FaE8BqQnqoBuSBxoNCFcd2OJd3eITUSku5c,5897
16
17
  onnx_diagnostic/helpers/graph_helper.py,sha256=hevQT5a7_QuriVPQcbT5qe18n99Doyl5h3-qshx1-uk,14093
17
- onnx_diagnostic/helpers/helper.py,sha256=oPybQruFcVLqvqLDhjphOZ8zZU1HHJWAlABMuTkAO8A,57090
18
+ onnx_diagnostic/helpers/helper.py,sha256=_6K0IvfK7ymBE8uWFAOA1ksU_fMvl2BRtlxj5SA9R2I,58203
19
+ onnx_diagnostic/helpers/log_helper.py,sha256=qZdvHHQqkYdZOf8UsIrByswMYSF_axca27JXRyQk52Y,69163
18
20
  onnx_diagnostic/helpers/memory_peak.py,sha256=OT6mz0muBbBZY0pjgW2_eCk_lOtFRo-5w4jFo2Z6Kok,6380
19
- onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=R1Vu4zHzN7GIUnbMVQzpkaXj8cCyyOweWOI9-TSgAHM,20966
20
- onnx_diagnostic/helpers/model_builder_helper.py,sha256=xIZmsVMFHfdtYeZHVEffBtxYObAaRPiaSmwwSKkmLwY,13502
21
+ onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=p0Xh2Br38xAqUjB2214GiNOIbCgiVZKeyVEnjdyqyFI,21091
22
+ onnx_diagnostic/helpers/model_builder_helper.py,sha256=RvDyPFqRboEU3HsQV_xi9oy-o3_4KuGFVzs5MhksduY,12552
21
23
  onnx_diagnostic/helpers/onnx_helper.py,sha256=pXXQjfyNTSUF-Kt72U4fnBDkYAnWYMxdSw8m0qk3xmE,39670
22
24
  onnx_diagnostic/helpers/ort_session.py,sha256=UgUUeUslDxEFBc6w6f3HMq_a7bn4TBlItmojqWquSj4,29281
23
25
  onnx_diagnostic/helpers/rt_helper.py,sha256=BXU_u1syk2RyM0HTFHKEiO6rHHhZW2UFPyUTVdeq8BU,4251
24
- onnx_diagnostic/helpers/torch_helper.py,sha256=mrmn4mBeRvMRJ9cEu7BbNG-AHq2OJfSm8dxgtzh-yQQ,31631
25
- onnx_diagnostic/reference/__init__.py,sha256=nrd09rRuwMDBCPTSZ6kSKZXp1W9W_ExO1t9duDlBnh8,146
26
+ onnx_diagnostic/helpers/torch_helper.py,sha256=L7qv14q4r1LcDKpEVobhySK6VE_X3h88Acvr6Kt4qEk,32244
27
+ onnx_diagnostic/reference/__init__.py,sha256=rLZsxOlnb7-81F2CzepGnZLejaROg4JvgFaGR9FwVQA,208
26
28
  onnx_diagnostic/reference/evaluator.py,sha256=RzNzjFDeMe-4X51Tb22N6aagazY5ktNq-mRmPcfY5EU,8848
27
- onnx_diagnostic/reference/ort_evaluator.py,sha256=OaWMREF8fuJwimmONpIjQ6WxQT1X2roDsdJsgR8H_Cg,24853
29
+ onnx_diagnostic/reference/ort_evaluator.py,sha256=1O7dHj8Aspolidg6rB2Nm7hT3HaGb4TxAgjCCD0XVcQ,26159
28
30
  onnx_diagnostic/reference/quantized_tensor.py,sha256=5u67uS2uGacdMD5VYCbpojNjiesDlV_kO0fAJ0vUWGE,1098
29
- onnx_diagnostic/reference/torch_evaluator.py,sha256=qAeYvSFwOCMDctc39evBEle_2bX8kuJW2QSLksofzn8,26600
31
+ onnx_diagnostic/reference/report_results_comparison.py,sha256=OsyQN8EHZZoj97u74RQP-7WFpebPOso5GEDpdkLWu6M,3645
32
+ onnx_diagnostic/reference/torch_evaluator.py,sha256=gf8EPoX4C4yGgQ-DqxXxaGU26WdEhn8Gd6iesDLqAV0,27692
30
33
  onnx_diagnostic/reference/ops/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
31
34
  onnx_diagnostic/reference/ops/op_add_add_mul_mul.py,sha256=CXQVtgVrT066gDJFwxL4nDSY4G8r08XNu3EwhWqMapU,1521
32
35
  onnx_diagnostic/reference/ops/op_attention.py,sha256=ThALMDF53v3QeG1bohi0bvX2o90HZhGJbbAFOtwEHPE,2027
@@ -69,22 +72,22 @@ onnx_diagnostic/reference/torch_ops/sequence_ops.py,sha256=3EiVKpGfN4d1Iry4hgnr3
69
72
  onnx_diagnostic/reference/torch_ops/shape_ops.py,sha256=pJrNR2UB4PlWl6cv4EDl1uGl8YTBUUMQkhJcsh5K4sA,4291
70
73
  onnx_diagnostic/reference/torch_ops/unary_ops.py,sha256=E8Ys1eZsOTsucBKoXb1_Kl5LbBDygniDvW2BvN4IPMo,1708
71
74
  onnx_diagnostic/tasks/__init__.py,sha256=5XXM-rv-Hk2gSHvqsww9DzVd9mcRifacgcPgvPCjnDM,2412
72
- onnx_diagnostic/tasks/automatic_speech_recognition.py,sha256=oRoYy56M0Yv_WOcn1hJXv-R9wgHkJ8rbym7j7y8oslw,6851
73
- onnx_diagnostic/tasks/feature_extraction.py,sha256=V-T5NpZ6EimOz00weWWxGfksZ9jQ5ZQyaP-mxuCEuJo,2223
74
- onnx_diagnostic/tasks/fill_mask.py,sha256=POUtgvOWv8wTOVLqxPNsj_C2WBiBWkmM72Z9mNlNqxI,2341
75
- onnx_diagnostic/tasks/image_classification.py,sha256=wtSkZB6Wqh2Y1O_zjfnYRZxUQPVVUlLQ_4D0H-SG-xU,4140
76
- onnx_diagnostic/tasks/image_text_to_text.py,sha256=6rKbts_p05VZL8wufJa6NP-MhxUOU-fuTAks5QfUVVQ,6037
77
- onnx_diagnostic/tasks/mixture_of_expert.py,sha256=orMx8Ly4DO0Po0tEmme4gi2flPIGip4TaAyxVik4Zgg,2685
78
- onnx_diagnostic/tasks/object_detection.py,sha256=o1T8NMztjdFAFA-Z5efx-8nd9W7YZZcbE8Ag5wKVxZA,3930
79
- onnx_diagnostic/tasks/sentence_similarity.py,sha256=okQ-TQR8j1a92_N-eT6xN56rjtu26CdlU_pk88gdbGs,2356
80
- onnx_diagnostic/tasks/summarization.py,sha256=qK9E7TdaB9g_Mu-f4Vdr_X8mMAUSlIapLlc8FlWSGFU,7989
81
- onnx_diagnostic/tasks/text2text_generation.py,sha256=x5p_5n72scF9hFNOVP8BjOr4Lga1nnqtjfStZ_n9EwQ,8406
82
- onnx_diagnostic/tasks/text_classification.py,sha256=OgC_G9iumzTjTNUEvMoFFNTHCD8_BkdvdYC4jUsfpHM,2412
83
- onnx_diagnostic/tasks/text_generation.py,sha256=jWKBFt7M-Neyjw6YSOrYY28xOkRkIerDF5YlugKK3qk,10386
84
- onnx_diagnostic/tasks/zero_shot_image_classification.py,sha256=N3cEG1Lq95wS1N_CWUUUCU5j-4Tp5eR8Ce68U8THYAk,4380
75
+ onnx_diagnostic/tasks/automatic_speech_recognition.py,sha256=7OspFypNHLSL6huvP9ms_euhHqYXyTZjAJ8Z7EXGimk,6967
76
+ onnx_diagnostic/tasks/feature_extraction.py,sha256=CbxbGsv3JvEQ2J9tO2DOpMHcJj5ZlCwY81ZB3hPB4D4,2339
77
+ onnx_diagnostic/tasks/fill_mask.py,sha256=ZWz8swzEeRbkmbY9oZ4CM1LYCWWUxnS5CqrKmUVw-u0,2457
78
+ onnx_diagnostic/tasks/image_classification.py,sha256=UjUAFYnwXIdPMXJdHR5MDzpsfMeIvyuKR4RqJVpGV_Q,4449
79
+ onnx_diagnostic/tasks/image_text_to_text.py,sha256=LmpMdH6oF_EN3WIACzSip4fPZOjZWFOoXg4k8qAio6Q,7639
80
+ onnx_diagnostic/tasks/mixture_of_expert.py,sha256=C0ugEc8OWmVyEZpsh8MJq_te1zgOHhpITtnSmGC16Ls,2801
81
+ onnx_diagnostic/tasks/object_detection.py,sha256=1lF5e2f2Coz1urSptEKgvUGCOSFBf0Anuq_QYOC00dA,4046
82
+ onnx_diagnostic/tasks/sentence_similarity.py,sha256=3MvNxjC1iEMtQL_jH1c8bmrVc5IG1lfUygrCZ0SORJk,2472
83
+ onnx_diagnostic/tasks/summarization.py,sha256=NLwqhpiQrU8UWd3u30VsNA3FsL315S3nlQ7ycUzJueo,8105
84
+ onnx_diagnostic/tasks/text2text_generation.py,sha256=mYvsq-O69fr5pitX0mWugT76QuK4xUs40Vsz9ru_XK8,8522
85
+ onnx_diagnostic/tasks/text_classification.py,sha256=NCCKobBQyCc7dSVj7_5N6S_RuvBlRMAdWkS2rVvrzck,2528
86
+ onnx_diagnostic/tasks/text_generation.py,sha256=PDh870BB-llzlu8h_aZX4Z-9QLzcGmDwX5aKJPy_K90,12504
87
+ onnx_diagnostic/tasks/zero_shot_image_classification.py,sha256=GKaXm8g7cK23h3wJEUc6Q-6mpmLAzQ4YkJbd-eGP7Y4,4496
85
88
  onnx_diagnostic/torch_export_patches/__init__.py,sha256=0SaZedwznm1hQUCvXZsGZORV5vby954wEExr5faepGg,720
86
- onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=MgOQwgLf6-uCGQaiUrhVNfZQ43dCp1iWGbzLbKEVyc8,18810
87
- onnx_diagnostic/torch_export_patches/onnx_export_serialization.py,sha256=l5HvE_FfdCtgLJBmJczH6nA8jZY1725AggiHwoAa-o0,15763
89
+ onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=op8jgnTa_1T_bGN172A6YFTtkQv_ALMNu1oukrsFt9U,20634
90
+ onnx_diagnostic/torch_export_patches/onnx_export_serialization.py,sha256=Nsf7HUJqu3ZRd0o9vUVCF6ifmS5UaQM6hB_Gmn19dNI,17095
88
91
  onnx_diagnostic/torch_export_patches/patch_expressions.py,sha256=vr4tt61cbDnaaaduzMj4UBZ8OUtr6GfDpIWwOYqjWzs,3213
89
92
  onnx_diagnostic/torch_export_patches/patch_inputs.py,sha256=9b4pmyT00BwLqi7WG-gliep1RUy3gXEgW6BDnlSSA-M,7689
90
93
  onnx_diagnostic/torch_export_patches/patch_module.py,sha256=R2d9IHM-RwsBKDsxuBIJnEqMoxbS9gd4YWFGG2wwV5A,39881
@@ -93,23 +96,23 @@ onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=VtkQB1o3Q2Fh99OOF6v
93
96
  onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=DTvdHPtNQh25Akv5o3D4Jxf1L1-SJ7w14tgvj8AAns8,26577
94
97
  onnx_diagnostic/torch_export_patches/patches/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
98
  onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=KaZ8TjDa9ATgT4HllYzzoNf_51q_yOj_GuF5NYjPCrU,18913
96
- onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=Hf-U50vzgzJ4iUjS2LAYkbfmzCEwX80Dzvdrr-Rhlp0,26456
99
+ onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=GwcPUaSm-Zys2pWHac8Wcvpmy2h4oiFQDmx_D3GZNBA,41007
97
100
  onnx_diagnostic/torch_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
98
101
  onnx_diagnostic/torch_models/llms.py,sha256=soyg4yC87ptGoeulJhKqw5opGmuLvH1pn_ZDXZ4Jr8E,90
99
- onnx_diagnostic/torch_models/test_helper.py,sha256=tt6bgLjGRayzvkXrTelKHTjr7XU9BvhX7uE4XJq6H6o,59927
102
+ onnx_diagnostic/torch_models/validate.py,sha256=NXFmJKmoO4reaeiu2ibuVgMRLS-l0WSdLhjn40_YsbU,62177
100
103
  onnx_diagnostic/torch_models/hghub/__init__.py,sha256=vi1Q7YHdddj1soiBN42MSvJdFqe2_KUoWafHISjwOu8,58
101
- onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=BgM_p57Q0gT9GOhdrmOYcnbuTTzCWp80jS4OQqWwFhs,9990
102
- onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=885wKyZkdM-Qp5Sg6C9Ol1dxigmA8FYAko-Ys08sppo,8096
103
- onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=V4Wv_73QsiWMFTw-xqj-5yZOri1NKuAub6VQa2UVIpw,259582
104
- onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=1h1jHJknqDHAa308_bjOzXVPgy3GIF-ikpce8CHNTmE,7804
104
+ onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=q4jUgSJ8AD28mpX7yDAUp0z7EQgb8euuD-P9Hayehds,12672
105
+ onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=lM9IDb5-3X8NSHcSPJFLS3tAvu_FqvcetyoHn-P2FIM,8272
106
+ onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=dE8tHksGOsTk77jpa7mldLYzaQ5joKxDxDB0ZnwQBV4,267246
107
+ onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=XCllU8_fJBjkCGSV8cdqlpF1QH6AN_OAErK0aAXNQts,10261
105
108
  onnx_diagnostic/torch_models/untrained/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
106
109
  onnx_diagnostic/torch_models/untrained/llm_phi2.py,sha256=ynBTDHJHCk44NjLT_t6OiFDBdPP0rFGPteiONDxvztw,3708
107
- onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=7N3fGvT_4Mn4NbIo0Qk57c6DMc3OXGWyvj_P41rjwSY,3513
110
+ onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=QXw_Bs2SzfeiQMf-tmtVl83SmVOL4-Um7Qy-f0E48QI,2507
108
111
  onnx_diagnostic/torch_onnx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
109
112
  onnx_diagnostic/torch_onnx/runtime_info.py,sha256=1g9F_Jf9AAgYQU4stbsrFXwQl-30mWlQrFbQ7val8Ps,9268
110
113
  onnx_diagnostic/torch_onnx/sbs.py,sha256=1EL25DeYFzlBSiFG_XjePBLvsiItRXbdDrr5-QZW2mA,16878
111
- onnx_diagnostic-0.6.3.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
112
- onnx_diagnostic-0.6.3.dist-info/METADATA,sha256=eJxj0KTPv1rXf-3T9KImWIF-u8g7wHXBGZm5zvXM7V8,6643
113
- onnx_diagnostic-0.6.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
114
- onnx_diagnostic-0.6.3.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
115
- onnx_diagnostic-0.6.3.dist-info/RECORD,,
114
+ onnx_diagnostic-0.7.1.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
115
+ onnx_diagnostic-0.7.1.dist-info/METADATA,sha256=7YUf_2f3gFaGkWUbOrZNqREtnQSpRXx5q65HJEDcDI8,6631
116
+ onnx_diagnostic-0.7.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
117
+ onnx_diagnostic-0.7.1.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
118
+ onnx_diagnostic-0.7.1.dist-info/RECORD,,