onnx-diagnostic 0.7.12__py3-none-any.whl → 0.7.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnx_diagnostic/__init__.py +1 -1
- onnx_diagnostic/_command_lines_parser.py +7 -2
- onnx_diagnostic/export/dynamic_shapes.py +11 -2
- onnx_diagnostic/helpers/helper.py +11 -5
- onnx_diagnostic/helpers/log_helper.py +53 -17
- onnx_diagnostic/helpers/mini_onnx_builder.py +17 -0
- onnx_diagnostic/helpers/model_builder_helper.py +1 -0
- onnx_diagnostic/helpers/rt_helper.py +2 -1
- onnx_diagnostic/helpers/torch_helper.py +31 -7
- onnx_diagnostic/reference/torch_evaluator.py +2 -2
- onnx_diagnostic/tasks/data/__init__.py +13 -0
- onnx_diagnostic/tasks/data/dummies_imagetext2text_generation_gemma3.onnx +0 -0
- onnx_diagnostic/tasks/image_text_to_text.py +256 -141
- onnx_diagnostic/tasks/text_generation.py +30 -0
- onnx_diagnostic/torch_export_patches/eval/__init__.py +184 -151
- onnx_diagnostic/torch_export_patches/eval/model_cases.py +20 -5
- onnx_diagnostic/torch_export_patches/onnx_export_errors.py +52 -20
- onnx_diagnostic/torch_export_patches/patch_inputs.py +10 -6
- onnx_diagnostic/torch_export_patches/patches/patch_torch.py +540 -10
- onnx_diagnostic/torch_export_patches/patches/patch_transformers.py +269 -4
- onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py +36 -0
- onnx_diagnostic/torch_models/hghub/model_inputs.py +55 -5
- onnx_diagnostic/torch_models/validate.py +116 -50
- onnx_diagnostic/torch_onnx/sbs.py +2 -1
- {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.14.dist-info}/METADATA +11 -31
- {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.14.dist-info}/RECORD +29 -27
- {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.14.dist-info}/WHEEL +0 -0
- {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.14.dist-info}/licenses/LICENSE.txt +0 -0
- {onnx_diagnostic-0.7.12.dist-info → onnx_diagnostic-0.7.14.dist-info}/top_level.txt +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import gc
|
|
1
2
|
import datetime
|
|
2
3
|
import inspect
|
|
3
4
|
import os
|
|
@@ -18,6 +19,7 @@ from ..tasks import random_input_kwargs
|
|
|
18
19
|
from ..torch_export_patches import torch_export_patches
|
|
19
20
|
from ..torch_export_patches.patch_inputs import use_dyn_not_str
|
|
20
21
|
from .hghub import get_untrained_model_with_inputs
|
|
22
|
+
from .hghub.model_inputs import _preprocess_model_id
|
|
21
23
|
|
|
22
24
|
|
|
23
25
|
def empty(value: Any) -> bool:
|
|
@@ -113,6 +115,8 @@ def _make_folder_name(
|
|
|
113
115
|
subfolder: Optional[str] = None,
|
|
114
116
|
opset: Optional[int] = None,
|
|
115
117
|
drop_inputs: Optional[List[str]] = None,
|
|
118
|
+
same_as_pretrained: bool = False,
|
|
119
|
+
use_pretrained: bool = False,
|
|
116
120
|
) -> str:
|
|
117
121
|
"Creates a filename unique based on the given options."
|
|
118
122
|
els = [model_id.replace("/", "_")]
|
|
@@ -141,6 +145,10 @@ def _make_folder_name(
|
|
|
141
145
|
if drop_inputs:
|
|
142
146
|
ii = "-".join(f"{s[0]}{s[-1]}" for s in drop_inputs)
|
|
143
147
|
els.append(f"I-{ii.upper()}")
|
|
148
|
+
if use_pretrained:
|
|
149
|
+
els.append("TRAINED")
|
|
150
|
+
elif same_as_pretrained:
|
|
151
|
+
els.append("SAMESIZE")
|
|
144
152
|
return "-".join(els)
|
|
145
153
|
|
|
146
154
|
|
|
@@ -237,21 +245,35 @@ def _quiet_or_not_quiet(
|
|
|
237
245
|
summary[f"{suffix}_output"] = string_type(res, with_shape=True, with_min_max=True)
|
|
238
246
|
summary[f"{suffix}_warmup"] = warmup
|
|
239
247
|
summary[f"{suffix}_repeat"] = repeat
|
|
240
|
-
|
|
248
|
+
last_ = None
|
|
249
|
+
end_w = max(0, warmup - 1)
|
|
250
|
+
for _w in range(end_w):
|
|
241
251
|
t = fct()
|
|
242
|
-
|
|
252
|
+
_ = string_type(t, with_shape=True, with_min_max=True)
|
|
253
|
+
if _ != last_ or _w == end_w - 1:
|
|
254
|
+
summary[f"io_{suffix}_{_w+1}"] = _
|
|
255
|
+
last_ = _
|
|
243
256
|
summary[f"time_{suffix}_warmup"] = time.perf_counter() - begin
|
|
244
257
|
times = []
|
|
245
258
|
for _r in range(repeat):
|
|
246
259
|
begin = time.perf_counter()
|
|
247
260
|
t = fct()
|
|
248
261
|
times.append(time.perf_counter() - begin)
|
|
249
|
-
a = np.array(times)
|
|
262
|
+
a = np.array(times, dtype=np.float64)
|
|
263
|
+
a.sort()
|
|
264
|
+
i5 = max(1, a.shape[0] * 5 // 100)
|
|
265
|
+
i2 = max(1, a.shape[0] * 2 // 100)
|
|
250
266
|
summary[f"time_{suffix}_latency"] = a.mean()
|
|
251
267
|
summary[f"time_{suffix}_latency_std"] = a.std()
|
|
252
268
|
summary[f"time_{suffix}_latency_min"] = a.min()
|
|
253
|
-
summary[f"time_{suffix}
|
|
269
|
+
summary[f"time_{suffix}_latency_max"] = a.max()
|
|
270
|
+
summary[f"time_{suffix}_latency_098"] = a[-i2]
|
|
271
|
+
summary[f"time_{suffix}_latency_095"] = a[-i5]
|
|
272
|
+
summary[f"time_{suffix}_latency_005"] = a[i5]
|
|
273
|
+
summary[f"time_{suffix}_latency_002"] = a[i2]
|
|
254
274
|
summary[f"time_{suffix}_n"] = len(a)
|
|
275
|
+
summary[f"time_{suffix}_latency_m98"] = a[i2:-i2].mean()
|
|
276
|
+
|
|
255
277
|
return res
|
|
256
278
|
|
|
257
279
|
|
|
@@ -268,20 +290,6 @@ def shrink_config(cfg: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
268
290
|
return new_cfg
|
|
269
291
|
|
|
270
292
|
|
|
271
|
-
def _preprocess_model_id(
|
|
272
|
-
model_id: str, subfolder: Optional[str], same_as_pretrained: bool, use_pretrained: bool
|
|
273
|
-
) -> Tuple[str, Optional[str], bool, bool]:
|
|
274
|
-
if subfolder or "//" not in model_id:
|
|
275
|
-
return model_id, subfolder, same_as_pretrained, use_pretrained
|
|
276
|
-
spl = model_id.split("//")
|
|
277
|
-
if spl[-1] == "pretrained":
|
|
278
|
-
return _preprocess_model_id("//".join(spl[:-1]), "", True, True)
|
|
279
|
-
if spl[-1] in {"transformer", "vae"}:
|
|
280
|
-
# known subfolder
|
|
281
|
-
return "//".join(spl[:-1]), spl[-1], same_as_pretrained, use_pretrained
|
|
282
|
-
return model_id, subfolder, same_as_pretrained, use_pretrained
|
|
283
|
-
|
|
284
|
-
|
|
285
293
|
def validate_model(
|
|
286
294
|
model_id: str,
|
|
287
295
|
task: Optional[str] = None,
|
|
@@ -359,9 +367,10 @@ def validate_model(
|
|
|
359
367
|
``orteval10``, ``ref`` only if `do_run` is true
|
|
360
368
|
:param repeat: number of time to measure the model
|
|
361
369
|
:param warmup: warmup the model first
|
|
362
|
-
:param inputs2: checks that
|
|
370
|
+
:param inputs2: checks that other sets of inputs are running as well,
|
|
363
371
|
this ensures that the model does support dynamism, the value is used
|
|
364
|
-
as an increment to the first set of values (added to dimensions)
|
|
372
|
+
as an increment to the first set of values (added to dimensions),
|
|
373
|
+
or an empty cache for example
|
|
365
374
|
:param output_names: output names the onnx exporter should use
|
|
366
375
|
:param ort_logs: increases onnxruntime verbosity when creating the session
|
|
367
376
|
:return: two dictionaries, one with some metrics,
|
|
@@ -391,13 +400,20 @@ def validate_model(
|
|
|
391
400
|
:class:`onnx_diagnostic.reference.ExtendedReferenceEvaluator`
|
|
392
401
|
if ``runtime == 'ref'``,
|
|
393
402
|
``orteval10`` increases the verbosity.
|
|
403
|
+
|
|
404
|
+
.. versionchanged:: 0.7.13
|
|
405
|
+
*inputs2* not only means a second set of inputs but many
|
|
406
|
+
such as ``input_empty_cache``
|
|
407
|
+
which refers to a set of inputs using an empty cache.
|
|
394
408
|
"""
|
|
409
|
+
main_validation_begin = time.perf_counter()
|
|
395
410
|
model_id, subfolder, same_as_pretrained, use_pretrained = _preprocess_model_id(
|
|
396
411
|
model_id,
|
|
397
412
|
subfolder,
|
|
398
413
|
same_as_pretrained=same_as_pretrained,
|
|
399
414
|
use_pretrained=use_pretrained,
|
|
400
415
|
)
|
|
416
|
+
time_preprocess_model_id = time.perf_counter() - main_validation_begin
|
|
401
417
|
default_patch = dict(patch_transformers=True, patch_diffusers=True, patch=True)
|
|
402
418
|
if isinstance(patch, bool):
|
|
403
419
|
patch_kwargs = default_patch if patch else dict(patch=False)
|
|
@@ -438,6 +454,7 @@ def validate_model(
|
|
|
438
454
|
version_exporter=exporter or "",
|
|
439
455
|
version_runtime=runtime,
|
|
440
456
|
version_inputs2=inputs2,
|
|
457
|
+
time_preprocess_model_id=time_preprocess_model_id,
|
|
441
458
|
)
|
|
442
459
|
)
|
|
443
460
|
if opset:
|
|
@@ -454,6 +471,8 @@ def validate_model(
|
|
|
454
471
|
subfolder=subfolder,
|
|
455
472
|
opset=opset,
|
|
456
473
|
drop_inputs=drop_inputs,
|
|
474
|
+
use_pretrained=use_pretrained,
|
|
475
|
+
same_as_pretrained=same_as_pretrained,
|
|
457
476
|
)
|
|
458
477
|
dump_folder = os.path.join(dump_folder, folder_name)
|
|
459
478
|
if not os.path.exists(dump_folder):
|
|
@@ -486,7 +505,7 @@ def validate_model(
|
|
|
486
505
|
mop = model_options or {}
|
|
487
506
|
data = _quiet_or_not_quiet(
|
|
488
507
|
quiet,
|
|
489
|
-
"
|
|
508
|
+
"create_torch_model",
|
|
490
509
|
summary,
|
|
491
510
|
None,
|
|
492
511
|
(
|
|
@@ -505,10 +524,9 @@ def validate_model(
|
|
|
505
524
|
)
|
|
506
525
|
),
|
|
507
526
|
)
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
)
|
|
527
|
+
|
|
528
|
+
second_input_keys = [k for k in data if k.startswith("inputs") and k != "inputs"]
|
|
529
|
+
|
|
512
530
|
if dump_folder:
|
|
513
531
|
with open(os.path.join(dump_folder, "model_config.txt"), "w") as f:
|
|
514
532
|
f.write(f"model_id: {model_id}\n------\n")
|
|
@@ -601,16 +619,14 @@ def validate_model(
|
|
|
601
619
|
if verbose:
|
|
602
620
|
print(f"[validate_model] new inputs: {string_type(data['inputs'])}")
|
|
603
621
|
print(f"[validate_model] new dynamic_hapes: {string_type(data['dynamic_shapes'])}")
|
|
604
|
-
if
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
dynamic_shapes=data["dynamic_shapes"],
|
|
613
|
-
)
|
|
622
|
+
if second_input_keys:
|
|
623
|
+
for k in second_input_keys:
|
|
624
|
+
data[k], _ = filter_inputs(
|
|
625
|
+
data[k],
|
|
626
|
+
drop_names=drop_inputs,
|
|
627
|
+
model=data["model"],
|
|
628
|
+
dynamic_shapes=data["dynamic_shapes"],
|
|
629
|
+
)
|
|
614
630
|
|
|
615
631
|
if not empty(dtype):
|
|
616
632
|
if isinstance(dtype, str):
|
|
@@ -620,8 +636,9 @@ def validate_model(
|
|
|
620
636
|
data["model"] = to_any(data["model"], dtype) # type: ignore
|
|
621
637
|
data["inputs"] = to_any(data["inputs"], dtype) # type: ignore
|
|
622
638
|
summary["model_dtype"] = str(dtype)
|
|
623
|
-
if
|
|
624
|
-
|
|
639
|
+
if second_input_keys:
|
|
640
|
+
for k in second_input_keys:
|
|
641
|
+
data[k] = to_any(data[k], dtype) # type: ignore
|
|
625
642
|
|
|
626
643
|
if not empty(device):
|
|
627
644
|
if verbose:
|
|
@@ -629,11 +646,13 @@ def validate_model(
|
|
|
629
646
|
data["model"] = to_any(data["model"], device) # type: ignore
|
|
630
647
|
data["inputs"] = to_any(data["inputs"], device) # type: ignore
|
|
631
648
|
summary["model_device"] = str(device)
|
|
632
|
-
if
|
|
633
|
-
|
|
649
|
+
if second_input_keys:
|
|
650
|
+
for k in second_input_keys:
|
|
651
|
+
data[k] = to_any(data[k], device) # type: ignore
|
|
634
652
|
|
|
635
653
|
for k in ["task", "size", "n_weights"]:
|
|
636
654
|
summary[f"model_{k.replace('_','')}"] = data[k]
|
|
655
|
+
summary["second_input_keys"] = ",".join(second_input_keys)
|
|
637
656
|
summary["model_inputs_options"] = str(input_options or "")
|
|
638
657
|
summary["model_inputs"] = string_type(data["inputs"], with_shape=True)
|
|
639
658
|
summary["model_shapes"] = string_type(data["dynamic_shapes"])
|
|
@@ -660,22 +679,37 @@ def validate_model(
|
|
|
660
679
|
print(f"[validate_model] +INPUT {k}={string_type(v, with_shape=True)}")
|
|
661
680
|
for k, v in data["dynamic_shapes"].items():
|
|
662
681
|
print(f"[validate_model] +SHAPE {k}={string_type(v)}")
|
|
682
|
+
print(f"[validate_model] second_input_keys={second_input_keys}")
|
|
663
683
|
print("[validate_model] --")
|
|
664
684
|
|
|
665
685
|
if do_run:
|
|
686
|
+
validation_begin = time.perf_counter()
|
|
687
|
+
|
|
666
688
|
_validate_do_run_model(
|
|
667
689
|
data, summary, "inputs", "run", "run_expected", verbose, repeat, warmup, quiet
|
|
668
690
|
)
|
|
669
|
-
if
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
691
|
+
if second_input_keys:
|
|
692
|
+
for k in second_input_keys:
|
|
693
|
+
_validate_do_run_model(
|
|
694
|
+
data,
|
|
695
|
+
summary,
|
|
696
|
+
k,
|
|
697
|
+
f"run2{k[6:]}",
|
|
698
|
+
f"run_expected2{k[6:]}",
|
|
699
|
+
verbose,
|
|
700
|
+
1,
|
|
701
|
+
0,
|
|
702
|
+
quiet,
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
summary["time_total_validation_torch"] = time.perf_counter() - validation_begin
|
|
673
706
|
|
|
674
707
|
if exporter:
|
|
675
708
|
print(
|
|
676
709
|
f"[validate_model] -- export the model with {exporter!r}, "
|
|
677
710
|
f"optimization={optimization!r}"
|
|
678
711
|
)
|
|
712
|
+
exporter_begin = time.perf_counter()
|
|
679
713
|
if patch_kwargs:
|
|
680
714
|
if verbose:
|
|
681
715
|
print(
|
|
@@ -718,7 +752,9 @@ def validate_model(
|
|
|
718
752
|
dump_folder=dump_folder,
|
|
719
753
|
output_names=output_names,
|
|
720
754
|
)
|
|
755
|
+
|
|
721
756
|
summary.update(summary_export)
|
|
757
|
+
summary["time_total_exporter"] = time.perf_counter() - exporter_begin
|
|
722
758
|
|
|
723
759
|
dump_stats = None
|
|
724
760
|
if dump_folder:
|
|
@@ -759,6 +795,8 @@ def validate_model(
|
|
|
759
795
|
data["onnx_filename"] = onnx_filename
|
|
760
796
|
summary["time_onnx_save"] = duration
|
|
761
797
|
summary.update(compute_statistics(onnx_filename))
|
|
798
|
+
del epo
|
|
799
|
+
|
|
762
800
|
if verbose:
|
|
763
801
|
print(f"[validate_model] dumps statistics in {dump_folder!r}...")
|
|
764
802
|
dump_stats = os.path.join(dump_folder, f"{folder_name}.stats")
|
|
@@ -781,6 +819,20 @@ def validate_model(
|
|
|
781
819
|
return summary, data
|
|
782
820
|
|
|
783
821
|
if do_run:
|
|
822
|
+
# Let's move the model to CPU to make sure it frees GPU memory.
|
|
823
|
+
if verbose:
|
|
824
|
+
# It does not really work for the time being and the model
|
|
825
|
+
# gets loaded twice, one by torch, one by onnxruntime
|
|
826
|
+
print("[validation_model] -- delete the model")
|
|
827
|
+
for key in ["model", "onnx_program", "config"]:
|
|
828
|
+
if key in data:
|
|
829
|
+
del data[key]
|
|
830
|
+
if device is not None and "cuda" in str(device).lower():
|
|
831
|
+
torch.cuda.empty_cache()
|
|
832
|
+
gc.collect()
|
|
833
|
+
print("[validation_model] -- done")
|
|
834
|
+
|
|
835
|
+
validation_begin = time.perf_counter()
|
|
784
836
|
summary_valid, data = validate_onnx_model(
|
|
785
837
|
data=data,
|
|
786
838
|
quiet=quiet,
|
|
@@ -788,10 +840,11 @@ def validate_model(
|
|
|
788
840
|
runtime=runtime,
|
|
789
841
|
repeat=repeat,
|
|
790
842
|
warmup=warmup,
|
|
791
|
-
|
|
843
|
+
second_input_keys=second_input_keys,
|
|
792
844
|
ort_logs=ort_logs,
|
|
793
845
|
)
|
|
794
846
|
summary.update(summary_valid)
|
|
847
|
+
summary["time_total_validation_onnx"] = time.perf_counter() - validation_begin
|
|
795
848
|
|
|
796
849
|
if ortfusiontype and "onnx_filename" in data:
|
|
797
850
|
assert (
|
|
@@ -850,15 +903,17 @@ def validate_model(
|
|
|
850
903
|
runtime=runtime,
|
|
851
904
|
repeat=repeat,
|
|
852
905
|
warmup=warmup,
|
|
853
|
-
|
|
906
|
+
second_input_keys=second_input_keys,
|
|
854
907
|
)
|
|
855
908
|
summary.update(summary_valid)
|
|
856
909
|
|
|
857
910
|
_compute_final_statistics(summary)
|
|
911
|
+
summary["time_total"] = time.perf_counter() - main_validation_begin
|
|
858
912
|
|
|
859
913
|
if verbose:
|
|
860
914
|
print("[validate_model] -- done (final)")
|
|
861
915
|
if dump_stats:
|
|
916
|
+
# Dumps again the statistics.
|
|
862
917
|
with open(dump_stats, "w") as f:
|
|
863
918
|
for k, v in sorted(summary.items()):
|
|
864
919
|
f.write(f":{k}:{v};\n")
|
|
@@ -1232,7 +1287,7 @@ def validate_onnx_model(
|
|
|
1232
1287
|
runtime: str = "onnxruntime",
|
|
1233
1288
|
repeat: int = 1,
|
|
1234
1289
|
warmup: int = 0,
|
|
1235
|
-
|
|
1290
|
+
second_input_keys: Optional[List[str]] = None,
|
|
1236
1291
|
ort_logs: bool = False,
|
|
1237
1292
|
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
|
1238
1293
|
"""
|
|
@@ -1249,7 +1304,7 @@ def validate_onnx_model(
|
|
|
1249
1304
|
:param runtime: onnx runtime to use, onnxruntime, torch, orteval, ref
|
|
1250
1305
|
:param repeat: run that number of times the model
|
|
1251
1306
|
:param warmup: warmup the model
|
|
1252
|
-
:param
|
|
1307
|
+
:param second_input_keys: to validate the model on other input sets
|
|
1253
1308
|
to make sure the exported model supports dynamism, the value is
|
|
1254
1309
|
used as an increment added to the first set of inputs (added to dimensions)
|
|
1255
1310
|
:param ort_logs: triggers the logs for onnxruntime
|
|
@@ -1374,10 +1429,12 @@ def validate_onnx_model(
|
|
|
1374
1429
|
print(f"[validate_onnx_model] done (ort_session) flavour={flavour!r}")
|
|
1375
1430
|
|
|
1376
1431
|
keys = [("inputs", "run_expected", "")]
|
|
1377
|
-
if
|
|
1378
|
-
keys.
|
|
1432
|
+
if second_input_keys:
|
|
1433
|
+
keys.extend([(k, f"run_expected2{k[6:]}", f"2{k[6:]}") for k in second_input_keys])
|
|
1379
1434
|
for k_input, k_expected, suffix in keys:
|
|
1380
1435
|
# make_feeds
|
|
1436
|
+
assert k_input in data, f"Unable to find {k_input!r} in {sorted(data)}"
|
|
1437
|
+
assert k_expected in data, f"Unable to find {k_expected!r} in {sorted(data)}"
|
|
1381
1438
|
if verbose:
|
|
1382
1439
|
print(f"[validate_onnx_model] -- make_feeds for {k_input!r}...")
|
|
1383
1440
|
print(
|
|
@@ -1674,11 +1731,13 @@ def process_statistics(data: Sequence[Dict[str, float]]) -> Dict[str, Any]:
|
|
|
1674
1731
|
"constant_folding",
|
|
1675
1732
|
"remove_identity",
|
|
1676
1733
|
"remove_duplicated_initializer",
|
|
1734
|
+
"remove_duplicated_shape",
|
|
1677
1735
|
"dynamic_dimension_naming",
|
|
1678
1736
|
"inline",
|
|
1679
1737
|
"check",
|
|
1680
1738
|
"build_graph_for_pattern",
|
|
1681
1739
|
"pattern_optimization",
|
|
1740
|
+
"topological_sort",
|
|
1682
1741
|
]:
|
|
1683
1742
|
if s in p or s.replace("_", "-") in p:
|
|
1684
1743
|
return s
|
|
@@ -1804,6 +1863,8 @@ def call_torch_export_custom(
|
|
|
1804
1863
|
"custom-nostrict-noinline",
|
|
1805
1864
|
"custom-nostrict-default-noinline",
|
|
1806
1865
|
"custom-nostrict-all-noinline",
|
|
1866
|
+
"custom-dec",
|
|
1867
|
+
"custom-decall",
|
|
1807
1868
|
}
|
|
1808
1869
|
assert exporter in available, f"Unexpected value for exporter={exporter!r} in {available}"
|
|
1809
1870
|
assert "model" in data, f"model is missing from data: {sorted(data)}"
|
|
@@ -1840,7 +1901,9 @@ def call_torch_export_custom(
|
|
|
1840
1901
|
export_options = ExportOptions(
|
|
1841
1902
|
strict=strict,
|
|
1842
1903
|
decomposition_table=(
|
|
1843
|
-
"default"
|
|
1904
|
+
"default"
|
|
1905
|
+
if ("-default" in exporter or "-dec" in exporter)
|
|
1906
|
+
else ("all" if ("-all" in exporter or "-decall" in exporter) else None)
|
|
1844
1907
|
),
|
|
1845
1908
|
save_ep=(os.path.join(dump_folder, f"{exporter}.ep") if dump_folder else None),
|
|
1846
1909
|
)
|
|
@@ -2020,4 +2083,7 @@ def _compute_final_statistics(summary: Dict[str, Any]):
|
|
|
2020
2083
|
stats["stat_estimated_speedup_ort"] = (
|
|
2021
2084
|
summary["time_run_latency"] / summary["time_run_onnx_ort_latency"]
|
|
2022
2085
|
)
|
|
2086
|
+
stats["stat_estimated_speedup_ort_m98"] = (
|
|
2087
|
+
summary["time_run_latency_m98"] / summary["time_run_onnx_ort_latency_m98"]
|
|
2088
|
+
)
|
|
2023
2089
|
summary.update(stats)
|
|
@@ -3,6 +3,7 @@ import onnx
|
|
|
3
3
|
import torch
|
|
4
4
|
from ..helpers import string_type, string_diff, max_diff
|
|
5
5
|
from ..helpers.onnx_helper import to_array_extended
|
|
6
|
+
from ..helpers.torch_helper import to_numpy
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
def validate_fx_tensor(
|
|
@@ -296,7 +297,7 @@ def run_aligned(
|
|
|
296
297
|
)
|
|
297
298
|
|
|
298
299
|
for inp, v in zip(onx.graph.input, args):
|
|
299
|
-
onnx_results[inp.name] = v
|
|
300
|
+
onnx_results[inp.name] = to_numpy(v)
|
|
300
301
|
if verbose:
|
|
301
302
|
print(
|
|
302
303
|
f"[run_aligned] +onnx-input: {inp.name}: "
|
|
@@ -1,43 +1,23 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx-diagnostic
|
|
3
|
-
Version: 0.7.
|
|
4
|
-
Summary:
|
|
3
|
+
Version: 0.7.14
|
|
4
|
+
Summary: Tools to help converting pytorch models into ONNX.
|
|
5
5
|
Home-page: https://github.com/sdpython/onnx-diagnostic
|
|
6
6
|
Author: Xavier Dupré
|
|
7
|
-
Author-email: xavier.dupre@gmail.com
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
Classifier: Programming Language :: Python
|
|
13
|
-
Classifier: Topic :: Software Development
|
|
14
|
-
Classifier: Topic :: Scientific/Engineering
|
|
15
|
-
Classifier: Development Status :: 5 - Production/Stable
|
|
16
|
-
Classifier: Operating System :: Microsoft :: Windows
|
|
17
|
-
Classifier: Operating System :: POSIX
|
|
18
|
-
Classifier: Operating System :: Unix
|
|
19
|
-
Classifier: Operating System :: MacOS
|
|
20
|
-
Classifier: Programming Language :: Python :: 3
|
|
21
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
22
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
23
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
7
|
+
Author-email: Xavier Dupré <xavier.dupre@gmail.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
Project-URL: Homepage, https://sdpython.github.io/doc/onnx-diagnostic/dev/
|
|
10
|
+
Project-URL: Repository, https://github.com/sdpython/onnx-diagnostic/
|
|
11
|
+
Requires-Python: >=3.9
|
|
24
12
|
Description-Content-Type: text/x-rst
|
|
25
13
|
License-File: LICENSE.txt
|
|
26
|
-
Requires-Dist: numpy
|
|
27
|
-
Requires-Dist: onnx>=1.16.0
|
|
28
|
-
Requires-Dist: onnxruntime>=1.23
|
|
29
|
-
Requires-Dist: optree
|
|
30
|
-
Requires-Dist: torch>=2.8
|
|
31
|
-
Requires-Dist: torch_geometric
|
|
32
14
|
Dynamic: author
|
|
33
|
-
Dynamic: author-email
|
|
34
|
-
Dynamic: classifier
|
|
35
|
-
Dynamic: description
|
|
36
|
-
Dynamic: description-content-type
|
|
37
15
|
Dynamic: home-page
|
|
38
16
|
Dynamic: license-file
|
|
39
|
-
|
|
40
|
-
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
.. image:: https://github.com/sdpython/onnx-diagnostic/raw/main/_doc/_static/logo.png
|
|
20
|
+
:width: 120
|
|
41
21
|
|
|
42
22
|
onnx-diagnostic: investigate onnx models
|
|
43
23
|
========================================
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
onnx_diagnostic/__init__.py,sha256=
|
|
1
|
+
onnx_diagnostic/__init__.py,sha256=fxgnYe-ZeX2ZhqiqehQfAUIDhdiy2BjpbzcaUtrI5g8,174
|
|
2
2
|
onnx_diagnostic/__main__.py,sha256=YmyV_Aq_ianDlHyKLHMa6h8YK3ZmFPpLVHLKjM91aCk,79
|
|
3
|
-
onnx_diagnostic/_command_lines_parser.py,sha256=
|
|
3
|
+
onnx_diagnostic/_command_lines_parser.py,sha256=qCPdI1_Za7OM1MuR1utyhTcSZQlM4UVmN8Su4HoRjvI,33670
|
|
4
4
|
onnx_diagnostic/api.py,sha256=BhCl_yCd78N7TlVtPOHjeYv1QBEy39TjZ647rcHqLh0,345
|
|
5
5
|
onnx_diagnostic/doc.py,sha256=t3RELgfooYnVMAi0JSpggWkQEgUsREz8NmRvn0TnLI8,2829
|
|
6
6
|
onnx_diagnostic/ext_test_case.py,sha256=emfQGiQSz5FVDhyJ1Acsv_Tast7tWl426TjtpNqxDBU,43558
|
|
7
7
|
onnx_diagnostic/export/__init__.py,sha256=yEIoWiOeTwBsDhyYt2fTKuhtA0Ya1J9u9ZzMTOTWaWs,101
|
|
8
|
-
onnx_diagnostic/export/dynamic_shapes.py,sha256=
|
|
8
|
+
onnx_diagnostic/export/dynamic_shapes.py,sha256=80yvtxYNsRCplUNiyL7acb9qhl4-HXIes8C62yuQ8JE,41976
|
|
9
9
|
onnx_diagnostic/export/shape_helper.py,sha256=PI_SgE1MNRKSrQ414eYoBZ54QGZbYisHSvqi9tstL2s,7795
|
|
10
10
|
onnx_diagnostic/export/validate.py,sha256=_PGUql2DJhIgGKo0WjTGUc5AgsZUx8fEs00MePy-w98,6043
|
|
11
11
|
onnx_diagnostic/helpers/__init__.py,sha256=GJ2GT7cgnlIveVUwMZhuvUwidbTJaKv8CsSIOpZDsJg,83
|
|
@@ -16,21 +16,21 @@ onnx_diagnostic/helpers/cache_helper.py,sha256=zxjm0-3lHs0A7wLEejz2r2KPMPjkkva--
|
|
|
16
16
|
onnx_diagnostic/helpers/config_helper.py,sha256=H2mOcMXfrcolFnt8EuqmRFkpQ3YdNRDfvm9ToI1vNH0,5618
|
|
17
17
|
onnx_diagnostic/helpers/doc_helper.py,sha256=pl5MZd3_FaE8BqQnqoBuSBxoNCFcd2OJd3eITUSku5c,5897
|
|
18
18
|
onnx_diagnostic/helpers/graph_helper.py,sha256=hevQT5a7_QuriVPQcbT5qe18n99Doyl5h3-qshx1-uk,14093
|
|
19
|
-
onnx_diagnostic/helpers/helper.py,sha256=
|
|
20
|
-
onnx_diagnostic/helpers/log_helper.py,sha256=
|
|
19
|
+
onnx_diagnostic/helpers/helper.py,sha256=zl7vG6G4ueq931Z9iT8OlKfmtFxvRJD2WJQh_qsMiBs,63401
|
|
20
|
+
onnx_diagnostic/helpers/log_helper.py,sha256=xBKz5rj2-jEtN_tFKsOV4RpBGermrv7CWqG3KUm2psI,87335
|
|
21
21
|
onnx_diagnostic/helpers/memory_peak.py,sha256=OT6mz0muBbBZY0pjgW2_eCk_lOtFRo-5w4jFo2Z6Kok,6380
|
|
22
|
-
onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=
|
|
23
|
-
onnx_diagnostic/helpers/model_builder_helper.py,sha256=
|
|
22
|
+
onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=Cgx1ojmV0S_JpZ_UqwsNxeULMMDvMInXslhkE34fwec,22051
|
|
23
|
+
onnx_diagnostic/helpers/model_builder_helper.py,sha256=sK40KRN9GWK1vbNJHIXkYAojblbKD0bdom7BFmoNSv4,12860
|
|
24
24
|
onnx_diagnostic/helpers/onnx_helper.py,sha256=oxl3x0EQowGP9kfz8aKDqnJZcvYY8FeZLsfoLJDiSUg,39826
|
|
25
25
|
onnx_diagnostic/helpers/ort_session.py,sha256=UgUUeUslDxEFBc6w6f3HMq_a7bn4TBlItmojqWquSj4,29281
|
|
26
|
-
onnx_diagnostic/helpers/rt_helper.py,sha256=
|
|
27
|
-
onnx_diagnostic/helpers/torch_helper.py,sha256=
|
|
26
|
+
onnx_diagnostic/helpers/rt_helper.py,sha256=JnqsidpmX47ux5jaA_7Of_eS7KIRlOTqqDKo7ZUD-bI,5251
|
|
27
|
+
onnx_diagnostic/helpers/torch_helper.py,sha256=SY01uEx5tKtPcix91AifhgmsvNkDMGpTigT7w_0Nj98,34442
|
|
28
28
|
onnx_diagnostic/reference/__init__.py,sha256=rLZsxOlnb7-81F2CzepGnZLejaROg4JvgFaGR9FwVQA,208
|
|
29
29
|
onnx_diagnostic/reference/evaluator.py,sha256=RzNzjFDeMe-4X51Tb22N6aagazY5ktNq-mRmPcfY5EU,8848
|
|
30
30
|
onnx_diagnostic/reference/ort_evaluator.py,sha256=nituItsP3IKDDWF9z-iGX_iAubrTcdk8pb1GVBp9sCU,26161
|
|
31
31
|
onnx_diagnostic/reference/quantized_tensor.py,sha256=5u67uS2uGacdMD5VYCbpojNjiesDlV_kO0fAJ0vUWGE,1098
|
|
32
32
|
onnx_diagnostic/reference/report_results_comparison.py,sha256=OsyQN8EHZZoj97u74RQP-7WFpebPOso5GEDpdkLWu6M,3645
|
|
33
|
-
onnx_diagnostic/reference/torch_evaluator.py,sha256=
|
|
33
|
+
onnx_diagnostic/reference/torch_evaluator.py,sha256=Tx1teWvfGEX5RmkDnI83UiOlo5eBOC72vPhgTWdFUF0,27689
|
|
34
34
|
onnx_diagnostic/reference/ops/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
35
35
|
onnx_diagnostic/reference/ops/op_add_add_mul_mul.py,sha256=CXQVtgVrT066gDJFwxL4nDSY4G8r08XNu3EwhWqMapU,1521
|
|
36
36
|
onnx_diagnostic/reference/ops/op_attention.py,sha256=ThALMDF53v3QeG1bohi0bvX2o90HZhGJbbAFOtwEHPE,2027
|
|
@@ -77,7 +77,7 @@ onnx_diagnostic/tasks/automatic_speech_recognition.py,sha256=umZmjGW1gDUFkqvBJnQ
|
|
|
77
77
|
onnx_diagnostic/tasks/feature_extraction.py,sha256=Zh9p_Q8FqEO2_aqI0cCiq8OXuM3WUZbwItlLOmLnNl8,5537
|
|
78
78
|
onnx_diagnostic/tasks/fill_mask.py,sha256=5Gt6zlj0p6vuifox7Wmj-TpHXJvPS0CEH8evgdBHDNA,2640
|
|
79
79
|
onnx_diagnostic/tasks/image_classification.py,sha256=nLpBBB1Gkog3Fk6pu2waiHcuQr4ILPptc9FhQ-pn460,4682
|
|
80
|
-
onnx_diagnostic/tasks/image_text_to_text.py,sha256=
|
|
80
|
+
onnx_diagnostic/tasks/image_text_to_text.py,sha256=EcaIdSYfaGLomSuO6G39lNd70tqFb19Xx3CjpQxQp9o,21538
|
|
81
81
|
onnx_diagnostic/tasks/image_to_video.py,sha256=SoF2cVIJr6P30Abp-FCuixFDh5RvTuNEOL36QthGY6U,3860
|
|
82
82
|
onnx_diagnostic/tasks/mask_generation.py,sha256=fjdD3rd-O-mFL0hQy3la3JXKth_0bH2HL7Eelq-3Dbs,5057
|
|
83
83
|
onnx_diagnostic/tasks/mixture_of_expert.py,sha256=al4tk1BrHidtRiHlAaiflWiJaAte0d5M8WcBioANG9k,2808
|
|
@@ -86,41 +86,43 @@ onnx_diagnostic/tasks/sentence_similarity.py,sha256=vPqNZgAnIvY0rKWPUTs0IlU3RFQD
|
|
|
86
86
|
onnx_diagnostic/tasks/summarization.py,sha256=8vB_JiRzDEacIvr8CYTuVQTH73xG_jNkndoS9RHJTSs,8292
|
|
87
87
|
onnx_diagnostic/tasks/text2text_generation.py,sha256=35eF_RlSeMdLTZPooLMAnszs-z0bkKZ34Iej3JgA96A,8602
|
|
88
88
|
onnx_diagnostic/tasks/text_classification.py,sha256=CGc72SpXFzTUyzAHEMPgyy_s187DaYGsRdrosxG80_Q,2711
|
|
89
|
-
onnx_diagnostic/tasks/text_generation.py,sha256
|
|
89
|
+
onnx_diagnostic/tasks/text_generation.py,sha256=-oWq_I1lAUm9wxJnvFM1kXDJAmHbCiM6lUG3waR3o2k,13909
|
|
90
90
|
onnx_diagnostic/tasks/text_to_image.py,sha256=mOS3Ruosi3hzRMxXLDN7ZkAbi7NnQb7MWwQP_okGVHs,2962
|
|
91
91
|
onnx_diagnostic/tasks/zero_shot_image_classification.py,sha256=jJCMWuOqGv5ahCfjrcqxuYCJFhTgHV5KUf2yyv2yxYA,4624
|
|
92
|
+
onnx_diagnostic/tasks/data/__init__.py,sha256=uJoemrWgEjI6oA-tMX7r3__x-b3siPmkgqaY7bgIles,401
|
|
93
|
+
onnx_diagnostic/tasks/data/dummies_imagetext2text_generation_gemma3.onnx,sha256=UbtvmWMqcZOKJ-I-HXWI1A6YR6QDaFS5u_yXm5C3ZBw,10299
|
|
92
94
|
onnx_diagnostic/torch_export_patches/__init__.py,sha256=0SaZedwznm1hQUCvXZsGZORV5vby954wEExr5faepGg,720
|
|
93
|
-
onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=
|
|
95
|
+
onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=ZMsUeU3Hx5YD8xNgQTaW8Br88HvPSiCmqmKLhMz5jw0,30459
|
|
94
96
|
onnx_diagnostic/torch_export_patches/onnx_export_serialization.py,sha256=klvqiMjccwGhiRnLRVbwTi5WWkMfvtnOV5ycirPcAdA,11354
|
|
95
97
|
onnx_diagnostic/torch_export_patches/patch_expressions.py,sha256=vr4tt61cbDnaaaduzMj4UBZ8OUtr6GfDpIWwOYqjWzs,3213
|
|
96
|
-
onnx_diagnostic/torch_export_patches/patch_inputs.py,sha256=
|
|
98
|
+
onnx_diagnostic/torch_export_patches/patch_inputs.py,sha256=2HQZKQV6TM5430RIvKiMPe4cfGvFdx1UnP1w76CeGE4,8110
|
|
97
99
|
onnx_diagnostic/torch_export_patches/patch_module.py,sha256=R2d9IHM-RwsBKDsxuBIJnEqMoxbS9gd4YWFGG2wwV5A,39881
|
|
98
100
|
onnx_diagnostic/torch_export_patches/patch_module_helper.py,sha256=2U0AdyZuU0W54QTdE7tY7imVzMnpQ5091ADNtTCkT8Y,6967
|
|
99
|
-
onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=
|
|
100
|
-
onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=
|
|
101
|
+
onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=YQoOGt9XQLWqnJ15NnT7ri_jDevfvpuQwEJo38E-VRU,25056
|
|
102
|
+
onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=joDJV1YfrhYBR_6eXYvNO1jbiJM8Whb47NWZxo8SBwg,27172
|
|
101
103
|
onnx_diagnostic/torch_export_patches/patches/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
102
|
-
onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=
|
|
103
|
-
onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=
|
|
104
|
+
onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=muA2i6Krd6iB2-nIteplxo_pvQEx4LQMZTxDmLe1n44,40825
|
|
105
|
+
onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=hHO7XOaUw3XrhPSrG2hTpMNzGVm_zigLg8d7hMOK7Gs,79188
|
|
104
106
|
onnx_diagnostic/torch_export_patches/serialization/__init__.py,sha256=BHLdRPtNAtNPAS-bPKEj3-foGSPvwAbZXrHzGGPDLEw,1876
|
|
105
107
|
onnx_diagnostic/torch_export_patches/serialization/diffusers_impl.py,sha256=drq3EH_yjcSuIWYsVeUWm8Cx6YCZFU6bP_1PLtPfY5I,945
|
|
106
108
|
onnx_diagnostic/torch_export_patches/serialization/transformers_impl.py,sha256=mcmZGekzQlLgE_o3SdKlRgCx4ewwyyAuNWZ9CaN_zrI,9317
|
|
107
109
|
onnx_diagnostic/torch_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
108
110
|
onnx_diagnostic/torch_models/llms.py,sha256=soyg4yC87ptGoeulJhKqw5opGmuLvH1pn_ZDXZ4Jr8E,90
|
|
109
|
-
onnx_diagnostic/torch_models/validate.py,sha256=
|
|
111
|
+
onnx_diagnostic/torch_models/validate.py,sha256=XNGZi7qSSytUczDfJ-X2ff5xvGdWdWkwjyz8ejxUSCE,79107
|
|
110
112
|
onnx_diagnostic/torch_models/hghub/__init__.py,sha256=vi1Q7YHdddj1soiBN42MSvJdFqe2_KUoWafHISjwOu8,58
|
|
111
113
|
onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=rFbiPNLET-KdBpnv-p0nKgwHX6d7C_Z0s9zZ86_92kQ,14307
|
|
112
114
|
onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=8V_pAgACPLPsLRYUododg7MSL6str-T3tBEGY4OaeYQ,8724
|
|
113
|
-
onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=
|
|
114
|
-
onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=
|
|
115
|
+
onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=aSa_7Rjider6ruqQ2-fXQMyyDS8VhB1xKxcPNk8qUeU,288776
|
|
116
|
+
onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=FaNFmWmzAqQQ7nM-L0eypeHG-YmCReXxwOOAb3UN7D0,15493
|
|
115
117
|
onnx_diagnostic/torch_models/hghub/model_specific.py,sha256=j50Nu7wddJMoqmD4QzMbNdFDUUgUmSBKRzPDH55TlUQ,2498
|
|
116
118
|
onnx_diagnostic/torch_models/untrained/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
117
119
|
onnx_diagnostic/torch_models/untrained/llm_phi2.py,sha256=JbGZmW41MPJcQgqaJc9R2G00nI79nI-lABN-ffA1lmY,4037
|
|
118
120
|
onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=QXw_Bs2SzfeiQMf-tmtVl83SmVOL4-Um7Qy-f0E48QI,2507
|
|
119
121
|
onnx_diagnostic/torch_onnx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
120
122
|
onnx_diagnostic/torch_onnx/runtime_info.py,sha256=1g9F_Jf9AAgYQU4stbsrFXwQl-30mWlQrFbQ7val8Ps,9268
|
|
121
|
-
onnx_diagnostic/torch_onnx/sbs.py,sha256=
|
|
122
|
-
onnx_diagnostic-0.7.
|
|
123
|
-
onnx_diagnostic-0.7.
|
|
124
|
-
onnx_diagnostic-0.7.
|
|
125
|
-
onnx_diagnostic-0.7.
|
|
126
|
-
onnx_diagnostic-0.7.
|
|
123
|
+
onnx_diagnostic/torch_onnx/sbs.py,sha256=IoKLA5UwS6kY8g4OOf_bdQwCziIsQfBczZ3w8wo4wZM,16905
|
|
124
|
+
onnx_diagnostic-0.7.14.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
|
|
125
|
+
onnx_diagnostic-0.7.14.dist-info/METADATA,sha256=id7f09epUAspAc4BxIlxRp0HhfGpR4SXI3BnYx0bjts,6730
|
|
126
|
+
onnx_diagnostic-0.7.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
127
|
+
onnx_diagnostic-0.7.14.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
|
|
128
|
+
onnx_diagnostic-0.7.14.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|