onnx-diagnostic 0.7.13__py3-none-any.whl → 0.7.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1019,6 +1019,26 @@ def patched__compute_dynamic_ntk_parameters(
1019
1019
  return inv_freq, attention_factor
1020
1020
 
1021
1021
 
1022
+ def _get_rope_init_fn(self, layer_type=None) -> Callable:
1023
+ if hasattr(self, "rope_init_fn"):
1024
+ # transformers<=5.0
1025
+ rope_init_fn = (
1026
+ patched__compute_dynamic_ntk_parameters
1027
+ if self.rope_init_fn
1028
+ is transformers.modeling_rope_utils._compute_dynamic_ntk_parameters
1029
+ else self.rope_init_fn
1030
+ )
1031
+ return rope_init_fn
1032
+
1033
+ rope_type = self.rope_type if layer_type is None else self.rope_type[layer_type]
1034
+ rope_init_fn = self.compute_default_rope_parameters
1035
+ if rope_type != "default":
1036
+ rope_init_fn = transformers.modeling_rope_utils.ROPE_INIT_FUNCTIONS[self.rope_type]
1037
+ if rope_init_fn is transformers.modeling_rope_utils._compute_dynamic_ntk_parameters:
1038
+ return patched__compute_dynamic_ntk_parameters
1039
+ return rope_init_fn
1040
+
1041
+
1022
1042
  def patched_dynamic_rope_update(rope_forward):
1023
1043
  """manual patch: ``[patch:transformers.modeling_rope_utils.dynamic_rope_update]``
1024
1044
 
@@ -1082,22 +1102,27 @@ def patched_dynamic_rope_update(rope_forward):
1082
1102
 
1083
1103
  """
1084
1104
 
1085
- def longrope_frequency_update(self, position_ids, device):
1105
+ def longrope_frequency_update(self, position_ids, device, layer_type=None):
1086
1106
  # It is no use to patch the function after the model is created
1087
1107
  # as rope_init_fn is an attribute set to one function when the model
1088
1108
  # is created and when no patch is applied yet.
1089
1109
  # So we select the patched version here.
1090
- rope_init_fn = (
1091
- patched__compute_dynamic_ntk_parameters
1092
- if self.rope_init_fn
1093
- is transformers.modeling_rope_utils._compute_dynamic_ntk_parameters
1094
- else self.rope_init_fn
1095
- )
1110
+ rope_init_fn = _get_rope_init_fn(self, layer_type=layer_type)
1096
1111
  seq_len = torch.max(position_ids) + 1
1097
1112
  if hasattr(self.config, "original_max_position_embeddings"):
1098
1113
  original_max_position_embeddings = self.config.original_max_position_embeddings
1099
1114
  else:
1100
1115
  original_max_position_embeddings = self.config.max_position_embeddings
1116
+
1117
+ if layer_type is None:
1118
+ # rope_type = self.rope_type
1119
+ original_inv_freq = self.original_inv_freq
1120
+ prefix = ""
1121
+ else:
1122
+ # rope_type = self.rope_type[layer_type]
1123
+ original_inv_freq = getattr(self, f"{layer_type}_original_inv_freq")
1124
+ prefix = f"{layer_type}_"
1125
+
1101
1126
  # At export time, seq_len is unknown.
1102
1127
  long_inv_freq, _ = rope_init_fn(
1103
1128
  self.config, device, seq_len=original_max_position_embeddings + 1
@@ -1112,13 +1137,13 @@ def patched_dynamic_rope_update(rope_forward):
1112
1137
  (lambda x, y: y.clone()),
1113
1138
  [long_inv_freq, original_inv_freq],
1114
1139
  )
1115
- self.inv_freq = inv_freq
1140
+ setattr(self, f"{prefix}inv_freq", inv_freq)
1116
1141
  # if seq_len > original_max_position_embeddings:
1117
1142
  # self.inv_freq = self.long_inv_freq
1118
1143
  # else:
1119
1144
  # self.inv_freq = self.original_inv_freq
1120
1145
 
1121
- def dynamic_frequency_update(self, position_ids, device):
1146
+ def dynamic_frequency_update(self, position_ids, device, layer_type=None):
1122
1147
  # constructor:
1123
1148
  # - self.max_seq_len_cached = config.max_position_embeddings
1124
1149
  # - self.original_max_seq_len = config.max_position_embeddings
@@ -1128,12 +1153,7 @@ def patched_dynamic_rope_update(rope_forward):
1128
1153
  # as rope_init_fn is an attribute set to one function when the model
1129
1154
  # is created and when no patch is applied yet.
1130
1155
  # So we select the patched version here.
1131
- rope_init_fn = (
1132
- patched__compute_dynamic_ntk_parameters
1133
- if self.rope_init_fn
1134
- is transformers.modeling_rope_utils._compute_dynamic_ntk_parameters
1135
- else self.rope_init_fn
1136
- )
1156
+ rope_init_fn = _get_rope_init_fn(self, layer_type=layer_type)
1137
1157
 
1138
1158
  # This behaviour is difficult to translate.
1139
1159
  # The sequence always grows.
@@ -1162,6 +1182,19 @@ def patched_dynamic_rope_update(rope_forward):
1162
1182
  self.config, device, seq_len=seq_len
1163
1183
  )
1164
1184
 
1185
+ if layer_type is None:
1186
+ # rope_type = self.rope_type
1187
+ # max_seq_len_cached = self.max_seq_len_cached
1188
+ original_inv_freq = self.original_inv_freq
1189
+ prefix = ""
1190
+ else:
1191
+ # rope_type = self.rope_type[layer_type]
1192
+ # max_seq_len_cached = getattr(
1193
+ # self, f"{layer_type}_max_seq_len_cached", self.max_seq_len_cached
1194
+ # )
1195
+ original_inv_freq = getattr(self, f"{layer_type}_original_inv_freq")
1196
+ prefix = f"{layer_type}_"
1197
+
1165
1198
  # Second test to translate.
1166
1199
  # Let's keep in mind, self.max_seq_len_cached = seq_len is likely to be True.
1167
1200
  # But in that case the following condition is a way to restore the original cache.
@@ -1183,15 +1216,26 @@ def patched_dynamic_rope_update(rope_forward):
1183
1216
  (lambda x, y: y.clone()),
1184
1217
  [long_inv_freq, original_inv_freq],
1185
1218
  )
1186
- self.inv_freq = inv_freq
1219
+ setattr(self, f"{prefix}inv_freq", inv_freq)
1187
1220
 
1188
1221
  @wraps(rope_forward)
1189
- def wrapper(self, x, position_ids):
1222
+ def wrapper(self, x, position_ids, layer_type=None):
1223
+ if layer_type is None:
1224
+ if "dynamic" in self.rope_type:
1225
+ dynamic_frequency_update(self, position_ids, device=x.device)
1226
+ elif self.rope_type == "longrope":
1227
+ longrope_frequency_update(self, position_ids, device=x.device)
1228
+ return rope_forward(self, x, position_ids)
1229
+
1190
1230
  if "dynamic" in self.rope_type:
1191
- dynamic_frequency_update(self, position_ids, device=x.device)
1231
+ dynamic_frequency_update(
1232
+ self, position_ids, device=x.device, layer_type=layer_type
1233
+ )
1192
1234
  elif self.rope_type == "longrope":
1193
- longrope_frequency_update(self, position_ids, device=x.device)
1194
- return rope_forward(self, x, position_ids)
1235
+ longrope_frequency_update(
1236
+ self, position_ids, device=x.device, layer_type=layer_type
1237
+ )
1238
+ return rope_forward(self, x, position_ids, layer_type=layer_type)
1195
1239
 
1196
1240
  return wrapper
1197
1241
 
@@ -1287,12 +1331,18 @@ class common_RotaryEmbedding(torch.nn.Module):
1287
1331
  # @torch.no_grad()
1288
1332
  # PATCHED: the decorator
1289
1333
  @patched_dynamic_rope_update
1290
- def forward(self, x, position_ids):
1334
+ def forward(self, x, position_ids, layer_type=None):
1335
+ if layer_type is not None:
1336
+ # transformers>=5.0
1337
+ inv_freq = getattr(self, f"{layer_type}_inv_freq")
1338
+ attention_scaling = getattr(self, f"{layer_type}_attention_scaling")
1339
+ else:
1340
+ # transformers<5.0
1341
+ inv_freq = self.inv_freq
1342
+ attention_scaling = self.attention_scaling
1343
+
1291
1344
  inv_freq_expanded = (
1292
- self.inv_freq[None, :, None]
1293
- .float()
1294
- .expand(position_ids.shape[0], -1, 1)
1295
- .to(x.device)
1345
+ inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
1296
1346
  )
1297
1347
  position_ids_expanded = position_ids[:, None, :].float()
1298
1348
 
@@ -1304,8 +1354,8 @@ class common_RotaryEmbedding(torch.nn.Module):
1304
1354
  with torch.autocast(device_type=device_type, enabled=False): # Force float32
1305
1355
  freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
1306
1356
  emb = torch.cat((freqs, freqs), dim=-1)
1307
- cos = emb.cos() * self.attention_scaling
1308
- sin = emb.sin() * self.attention_scaling
1357
+ cos = emb.cos() * attention_scaling
1358
+ sin = emb.sin() * attention_scaling
1309
1359
 
1310
1360
  return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
1311
1361
 
@@ -1380,7 +1430,8 @@ class patched_IdeficsEmbedding(torch.nn.Module):
1380
1430
 
1381
1431
  def _set_cos_sin_cache_then(x, inv_freq, seq_len, _cos_cached, _sin_cached):
1382
1432
  t = torch.arange(seq_len, device=x.device, dtype=torch.int64).type_as(inv_freq)
1383
- freqs = torch.einsum("i,j->ij", t, inv_freq)
1433
+ # freqs = torch.einsum("i,j->ij", t, inv_freq)
1434
+ freqs = t.reshape((-1, 1)) * inv_freq.reshape((1, -1))
1384
1435
  emb = torch.cat((freqs, freqs), dim=-1)
1385
1436
  return emb.cos().to(x.dtype), emb.sin().to(x.dtype)
1386
1437
 
@@ -25,6 +25,20 @@ def _code_needing_rewriting(model: Any) -> Any:
25
25
  return code_needing_rewriting(model)
26
26
 
27
27
 
28
+ def _preprocess_model_id(
29
+ model_id: str, subfolder: Optional[str], same_as_pretrained: bool, use_pretrained: bool
30
+ ) -> Tuple[str, Optional[str], bool, bool]:
31
+ if subfolder or "//" not in model_id:
32
+ return model_id, subfolder, same_as_pretrained, use_pretrained
33
+ spl = model_id.split("//")
34
+ if spl[-1] == "pretrained":
35
+ return _preprocess_model_id("//".join(spl[:-1]), "", True, True)
36
+ if spl[-1] in {"transformer", "vae"}:
37
+ # known subfolder
38
+ return "//".join(spl[:-1]), spl[-1], same_as_pretrained, use_pretrained
39
+ return model_id, subfolder, same_as_pretrained, use_pretrained
40
+
41
+
28
42
  def get_untrained_model_with_inputs(
29
43
  model_id: str,
30
44
  config: Optional[Any] = None,
@@ -81,12 +95,22 @@ def get_untrained_model_with_inputs(
81
95
  print("-- dynamic shapes:", pprint.pformat(data['dynamic_shapes']))
82
96
  print("-- configuration:", pprint.pformat(data['configuration']))
83
97
  """
98
+ if task == "":
99
+ task = None
84
100
  assert not use_preinstalled or not use_only_preinstalled, (
85
101
  f"model_id={model_id!r}, preinstalled model is only available "
86
102
  f"if use_only_preinstalled is False."
87
103
  )
104
+ model_id, subfolder, same_as_pretrained, use_pretrained = _preprocess_model_id(
105
+ model_id,
106
+ subfolder,
107
+ same_as_pretrained=same_as_pretrained,
108
+ use_pretrained=use_pretrained,
109
+ )
88
110
  if verbose:
89
- print(f"[get_untrained_model_with_inputs] model_id={model_id!r}")
111
+ print(
112
+ f"[get_untrained_model_with_inputs] model_id={model_id!r}, subfolder={subfolder!r}"
113
+ )
90
114
  if use_preinstalled:
91
115
  print(f"[get_untrained_model_with_inputs] use preinstalled {model_id!r}")
92
116
  if config is None:
@@ -98,14 +122,16 @@ def get_untrained_model_with_inputs(
98
122
  **(model_kwargs or {}),
99
123
  )
100
124
 
101
- model, task, mkwargs, diff_config = None, None, {}, None
125
+ model, task_, mkwargs, diff_config = None, None, {}, None
102
126
  if use_pretrained and same_as_pretrained:
103
127
  if model_id in HANDLED_MODELS:
104
- model, task, config = load_specific_model(model_id, verbose=verbose)
128
+ model, task_, config = load_specific_model(model_id, verbose=verbose)
105
129
 
130
+ if task is None:
131
+ task = task_
106
132
  if model is None:
107
133
  arch = architecture_from_config(config)
108
- if arch is None:
134
+ if task is None and arch is None:
109
135
  task = task_from_id(model_id, subfolder=subfolder)
110
136
  assert task is not None or arch is not None, (
111
137
  f"Unable to determine the architecture for model {model_id!r}, "
@@ -178,7 +204,7 @@ def get_untrained_model_with_inputs(
178
204
 
179
205
  if verbose:
180
206
  print(
181
- f"[get_untrained_model_with_inputs] package_source={package_source.__name__} é"
207
+ f"[get_untrained_model_with_inputs] package_source={package_source.__name__} "
182
208
  f"from {package_source.__file__}"
183
209
  )
184
210
  if use_pretrained:
@@ -4,7 +4,7 @@ import inspect
4
4
  import os
5
5
  import pprint
6
6
  import sys
7
- from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
7
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
8
8
  import time
9
9
  import numpy as np
10
10
  import onnx
@@ -19,6 +19,7 @@ from ..tasks import random_input_kwargs
19
19
  from ..torch_export_patches import torch_export_patches
20
20
  from ..torch_export_patches.patch_inputs import use_dyn_not_str
21
21
  from .hghub import get_untrained_model_with_inputs
22
+ from .hghub.model_inputs import _preprocess_model_id
22
23
 
23
24
 
24
25
  def empty(value: Any) -> bool:
@@ -116,11 +117,21 @@ def _make_folder_name(
116
117
  drop_inputs: Optional[List[str]] = None,
117
118
  same_as_pretrained: bool = False,
118
119
  use_pretrained: bool = False,
120
+ task: Optional[str] = None,
119
121
  ) -> str:
120
122
  "Creates a filename unique based on the given options."
121
123
  els = [model_id.replace("/", "_")]
122
124
  if subfolder:
123
125
  els.append(subfolder.replace("/", "_"))
126
+ if not task:
127
+ els.append(task) # type: ignore[arg-type]
128
+ if drop_inputs:
129
+ ii = "-".join(f"{s[0]}{s[-1]}" for s in drop_inputs)
130
+ els.append(f"I-{ii.upper()}")
131
+ if use_pretrained:
132
+ els.append("TRAINED")
133
+ elif same_as_pretrained:
134
+ els.append("SAMESIZE")
124
135
  if exporter:
125
136
  els.append(exporter)
126
137
  if optimization:
@@ -141,14 +152,7 @@ def _make_folder_name(
141
152
  els.append(sdev)
142
153
  if opset is not None:
143
154
  els.append(f"op{opset}")
144
- if drop_inputs:
145
- ii = "-".join(f"{s[0]}{s[-1]}" for s in drop_inputs)
146
- els.append(f"I-{ii.upper()}")
147
- if use_pretrained:
148
- els.append("TRAINED")
149
- elif same_as_pretrained:
150
- els.append("SAMESIZE")
151
- return "-".join(els)
155
+ return "/".join([e for e in els if e])
152
156
 
153
157
 
154
158
  def version_summary() -> Dict[str, Union[int, float, str]]:
@@ -289,20 +293,6 @@ def shrink_config(cfg: Dict[str, Any]) -> Dict[str, Any]:
289
293
  return new_cfg
290
294
 
291
295
 
292
- def _preprocess_model_id(
293
- model_id: str, subfolder: Optional[str], same_as_pretrained: bool, use_pretrained: bool
294
- ) -> Tuple[str, Optional[str], bool, bool]:
295
- if subfolder or "//" not in model_id:
296
- return model_id, subfolder, same_as_pretrained, use_pretrained
297
- spl = model_id.split("//")
298
- if spl[-1] == "pretrained":
299
- return _preprocess_model_id("//".join(spl[:-1]), "", True, True)
300
- if spl[-1] in {"transformer", "vae"}:
301
- # known subfolder
302
- return "//".join(spl[:-1]), spl[-1], same_as_pretrained, use_pretrained
303
- return model_id, subfolder, same_as_pretrained, use_pretrained
304
-
305
-
306
296
  def validate_model(
307
297
  model_id: str,
308
298
  task: Optional[str] = None,
@@ -332,6 +322,7 @@ def validate_model(
332
322
  inputs2: int = 1,
333
323
  output_names: Optional[List[str]] = None,
334
324
  ort_logs: bool = False,
325
+ quiet_input_sets: Optional[Set[str]] = None,
335
326
  ) -> Tuple[Dict[str, Union[int, float, str]], Dict[str, Any]]:
336
327
  """
337
328
  Validates a model.
@@ -386,6 +377,8 @@ def validate_model(
386
377
  or an empty cache for example
387
378
  :param output_names: output names the onnx exporter should use
388
379
  :param ort_logs: increases onnxruntime verbosity when creating the session
380
+ :param quiet_input_sets: avoid raising an exception if the inputs belongs to that set
381
+ even if quiet is False
389
382
  :return: two dictionaries, one with some metrics,
390
383
  another one with whatever the function produces
391
384
 
@@ -419,14 +412,14 @@ def validate_model(
419
412
  such as ``input_empty_cache``
420
413
  which refers to a set of inputs using an empty cache.
421
414
  """
422
- validation_begin = time.perf_counter()
415
+ main_validation_begin = time.perf_counter()
423
416
  model_id, subfolder, same_as_pretrained, use_pretrained = _preprocess_model_id(
424
417
  model_id,
425
418
  subfolder,
426
419
  same_as_pretrained=same_as_pretrained,
427
420
  use_pretrained=use_pretrained,
428
421
  )
429
- time_preprocess_model_id = time.perf_counter() - validation_begin
422
+ time_preprocess_model_id = time.perf_counter() - main_validation_begin
430
423
  default_patch = dict(patch_transformers=True, patch_diffusers=True, patch=True)
431
424
  if isinstance(patch, bool):
432
425
  patch_kwargs = default_patch if patch else dict(patch=False)
@@ -486,6 +479,7 @@ def validate_model(
486
479
  drop_inputs=drop_inputs,
487
480
  use_pretrained=use_pretrained,
488
481
  same_as_pretrained=same_as_pretrained,
482
+ task=task,
489
483
  )
490
484
  dump_folder = os.path.join(dump_folder, folder_name)
491
485
  if not os.path.exists(dump_folder):
@@ -500,6 +494,8 @@ def validate_model(
500
494
  print(f"[validate_model] validate model id {model_id!r}, subfolder={subfolder!r}")
501
495
  else:
502
496
  print(f"[validate_model] validate model id {model_id!r}")
497
+ if task:
498
+ print(f"[validate_model] with task {task!r}")
503
499
  print(f"[validate_model] patch={patch!r}")
504
500
  if model_options:
505
501
  print(f"[validate_model] model_options={model_options!r}")
@@ -775,6 +771,10 @@ def validate_model(
775
771
  ep = data["exported_program"]
776
772
  if verbose:
777
773
  print(f"[validate_model] -- dumps exported program in {dump_folder!r}...")
774
+ assert isinstance(
775
+ folder_name, str
776
+ ), f"folder_name={folder_name!r} should be a string"
777
+ folder_name = folder_name.replace("/", "-")
778
778
  with open(os.path.join(dump_folder, f"{folder_name}.ep"), "w") as f:
779
779
  f.write(str(ep))
780
780
  torch.export.save(ep, os.path.join(dump_folder, f"{folder_name}.pt2"))
@@ -783,6 +783,10 @@ def validate_model(
783
783
  if verbose:
784
784
  print("[validate_model] done (dump ep)")
785
785
  if "onnx_program" in data:
786
+ assert isinstance(
787
+ folder_name, str
788
+ ), f"folder_name={folder_name!r} should be a string"
789
+ folder_name = folder_name.replace("/", "-")
786
790
  epo = data["onnx_program"]
787
791
  if verbose:
788
792
  print(f"[validate_model] dumps onnx program in {dump_folder!r}...")
@@ -855,6 +859,7 @@ def validate_model(
855
859
  warmup=warmup,
856
860
  second_input_keys=second_input_keys,
857
861
  ort_logs=ort_logs,
862
+ quiet_input_sets=quiet_input_sets,
858
863
  )
859
864
  summary.update(summary_valid)
860
865
  summary["time_total_validation_onnx"] = time.perf_counter() - validation_begin
@@ -917,11 +922,12 @@ def validate_model(
917
922
  repeat=repeat,
918
923
  warmup=warmup,
919
924
  second_input_keys=second_input_keys,
925
+ quiet_input_sets=quiet_input_sets,
920
926
  )
921
927
  summary.update(summary_valid)
922
928
 
923
929
  _compute_final_statistics(summary)
924
- summary["time_total"] = time.perf_counter() - validation_begin
930
+ summary["time_total"] = time.perf_counter() - main_validation_begin
925
931
 
926
932
  if verbose:
927
933
  print("[validate_model] -- done (final)")
@@ -1302,6 +1308,7 @@ def validate_onnx_model(
1302
1308
  warmup: int = 0,
1303
1309
  second_input_keys: Optional[List[str]] = None,
1304
1310
  ort_logs: bool = False,
1311
+ quiet_input_sets: Optional[Set[str]] = None,
1305
1312
  ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
1306
1313
  """
1307
1314
  Verifies that an onnx model produces the same
@@ -1321,6 +1328,7 @@ def validate_onnx_model(
1321
1328
  to make sure the exported model supports dynamism, the value is
1322
1329
  used as an increment added to the first set of inputs (added to dimensions)
1323
1330
  :param ort_logs: triggers the logs for onnxruntime
1331
+ :param quiet_input_sets: avoid raising an exception for these sets of inputs
1324
1332
  :return: two dictionaries, one with some metrics,
1325
1333
  another one with whatever the function produces
1326
1334
  """
@@ -1444,6 +1452,8 @@ def validate_onnx_model(
1444
1452
  keys = [("inputs", "run_expected", "")]
1445
1453
  if second_input_keys:
1446
1454
  keys.extend([(k, f"run_expected2{k[6:]}", f"2{k[6:]}") for k in second_input_keys])
1455
+ if verbose:
1456
+ print(f"[validate_onnx_model] -- keys={keys}")
1447
1457
  for k_input, k_expected, suffix in keys:
1448
1458
  # make_feeds
1449
1459
  assert k_input in data, f"Unable to find {k_input!r} in {sorted(data)}"
@@ -1468,10 +1478,12 @@ def validate_onnx_model(
1468
1478
 
1469
1479
  # run ort
1470
1480
  if verbose:
1471
- print("[validate_onnx_model] run session...")
1481
+ print(f"[validate_onnx_model] run session on inputs 'inputs{suffix}'...")
1482
+ if quiet_input_sets and f"inputs{suffix}" in quiet_input_sets:
1483
+ print(f"[validate_onnx_model] quiet_input_sets={quiet_input_sets}")
1472
1484
 
1473
1485
  got = _quiet_or_not_quiet(
1474
- quiet,
1486
+ quiet or (quiet_input_sets is not None and f"inputs{suffix}" in quiet_input_sets),
1475
1487
  _mk(f"run_onnx_ort{suffix}"),
1476
1488
  summary,
1477
1489
  data,
@@ -1744,6 +1756,7 @@ def process_statistics(data: Sequence[Dict[str, float]]) -> Dict[str, Any]:
1744
1756
  "constant_folding",
1745
1757
  "remove_identity",
1746
1758
  "remove_duplicated_initializer",
1759
+ "remove_duplicated_shape",
1747
1760
  "dynamic_dimension_naming",
1748
1761
  "inline",
1749
1762
  "check",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx-diagnostic
3
- Version: 0.7.13
3
+ Version: 0.7.15
4
4
  Summary: Tools to help converting pytorch models into ONNX.
5
5
  Home-page: https://github.com/sdpython/onnx-diagnostic
6
6
  Author: Xavier Dupré
@@ -1,6 +1,6 @@
1
- onnx_diagnostic/__init__.py,sha256=Sv9eg4qDNdyO5uUafa3e98pIerP4faa203FF3hqygOI,174
1
+ onnx_diagnostic/__init__.py,sha256=rsSRl1QPe3XLEW8cPAdFVV6eLYgcRFg37pNNVTw5FAc,174
2
2
  onnx_diagnostic/__main__.py,sha256=YmyV_Aq_ianDlHyKLHMa6h8YK3ZmFPpLVHLKjM91aCk,79
3
- onnx_diagnostic/_command_lines_parser.py,sha256=wleBwnoCDyAWRYRREUSGkwAJKw2YI4Td_7ydxmdOXfI,33457
3
+ onnx_diagnostic/_command_lines_parser.py,sha256=bl6lorcodFk14dm2lgeCjr4OkRziUrKXn3GGkbxIuVA,33987
4
4
  onnx_diagnostic/api.py,sha256=BhCl_yCd78N7TlVtPOHjeYv1QBEy39TjZ647rcHqLh0,345
5
5
  onnx_diagnostic/doc.py,sha256=t3RELgfooYnVMAi0JSpggWkQEgUsREz8NmRvn0TnLI8,2829
6
6
  onnx_diagnostic/ext_test_case.py,sha256=emfQGiQSz5FVDhyJ1Acsv_Tast7tWl426TjtpNqxDBU,43558
@@ -12,18 +12,18 @@ onnx_diagnostic/helpers/__init__.py,sha256=GJ2GT7cgnlIveVUwMZhuvUwidbTJaKv8CsSIO
12
12
  onnx_diagnostic/helpers/_log_helper.py,sha256=OTwQH0OIxs9B6nrSvR7MoxMimSw_8mU0mj133NvLk5o,16832
13
13
  onnx_diagnostic/helpers/args_helper.py,sha256=SRWnqC7EENg09RZlA50B_PcdiIhdbgA4C3ACfzl5nMs,4419
14
14
  onnx_diagnostic/helpers/bench_run.py,sha256=CGA6VMJZMH2gDhVueT9ypNm4PMcjGrrGFYp08nhWj9k,16539
15
- onnx_diagnostic/helpers/cache_helper.py,sha256=zxjm0-3lHs0A7wLEejz2r2KPMPjkkva--8511MaSy74,24846
16
- onnx_diagnostic/helpers/config_helper.py,sha256=H2mOcMXfrcolFnt8EuqmRFkpQ3YdNRDfvm9ToI1vNH0,5618
15
+ onnx_diagnostic/helpers/cache_helper.py,sha256=4fkPKLG590l1Gbqaw_QubXeJqY17X2Z6CBmLp23-BYI,24849
16
+ onnx_diagnostic/helpers/config_helper.py,sha256=cWRETgFhZ7tayIZPnMqF8BF5AvTU64G2BMqyzgO7lzs,5670
17
17
  onnx_diagnostic/helpers/doc_helper.py,sha256=pl5MZd3_FaE8BqQnqoBuSBxoNCFcd2OJd3eITUSku5c,5897
18
18
  onnx_diagnostic/helpers/graph_helper.py,sha256=hevQT5a7_QuriVPQcbT5qe18n99Doyl5h3-qshx1-uk,14093
19
19
  onnx_diagnostic/helpers/helper.py,sha256=zl7vG6G4ueq931Z9iT8OlKfmtFxvRJD2WJQh_qsMiBs,63401
20
- onnx_diagnostic/helpers/log_helper.py,sha256=SKzxJ6DdP9uq4e2feA2nqd2Rreq4G-ujKZFUELfycP0,85674
20
+ onnx_diagnostic/helpers/log_helper.py,sha256=xBKz5rj2-jEtN_tFKsOV4RpBGermrv7CWqG3KUm2psI,87335
21
21
  onnx_diagnostic/helpers/memory_peak.py,sha256=OT6mz0muBbBZY0pjgW2_eCk_lOtFRo-5w4jFo2Z6Kok,6380
22
22
  onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=Cgx1ojmV0S_JpZ_UqwsNxeULMMDvMInXslhkE34fwec,22051
23
23
  onnx_diagnostic/helpers/model_builder_helper.py,sha256=sK40KRN9GWK1vbNJHIXkYAojblbKD0bdom7BFmoNSv4,12860
24
24
  onnx_diagnostic/helpers/onnx_helper.py,sha256=oxl3x0EQowGP9kfz8aKDqnJZcvYY8FeZLsfoLJDiSUg,39826
25
25
  onnx_diagnostic/helpers/ort_session.py,sha256=UgUUeUslDxEFBc6w6f3HMq_a7bn4TBlItmojqWquSj4,29281
26
- onnx_diagnostic/helpers/rt_helper.py,sha256=JnqsidpmX47ux5jaA_7Of_eS7KIRlOTqqDKo7ZUD-bI,5251
26
+ onnx_diagnostic/helpers/rt_helper.py,sha256=mmxQ0RQ7mhG0ybHOtzbZiV2mt503JVaKcErQQ79ydWs,5208
27
27
  onnx_diagnostic/helpers/torch_helper.py,sha256=SY01uEx5tKtPcix91AifhgmsvNkDMGpTigT7w_0Nj98,34442
28
28
  onnx_diagnostic/reference/__init__.py,sha256=rLZsxOlnb7-81F2CzepGnZLejaROg4JvgFaGR9FwVQA,208
29
29
  onnx_diagnostic/reference/evaluator.py,sha256=RzNzjFDeMe-4X51Tb22N6aagazY5ktNq-mRmPcfY5EU,8848
@@ -77,7 +77,7 @@ onnx_diagnostic/tasks/automatic_speech_recognition.py,sha256=umZmjGW1gDUFkqvBJnQ
77
77
  onnx_diagnostic/tasks/feature_extraction.py,sha256=Zh9p_Q8FqEO2_aqI0cCiq8OXuM3WUZbwItlLOmLnNl8,5537
78
78
  onnx_diagnostic/tasks/fill_mask.py,sha256=5Gt6zlj0p6vuifox7Wmj-TpHXJvPS0CEH8evgdBHDNA,2640
79
79
  onnx_diagnostic/tasks/image_classification.py,sha256=nLpBBB1Gkog3Fk6pu2waiHcuQr4ILPptc9FhQ-pn460,4682
80
- onnx_diagnostic/tasks/image_text_to_text.py,sha256=EcaIdSYfaGLomSuO6G39lNd70tqFb19Xx3CjpQxQp9o,21538
80
+ onnx_diagnostic/tasks/image_text_to_text.py,sha256=HDXuk1bEE3qTR0mUR_6rw-5RAXSyUvGY-dMNamIpvn0,21577
81
81
  onnx_diagnostic/tasks/image_to_video.py,sha256=SoF2cVIJr6P30Abp-FCuixFDh5RvTuNEOL36QthGY6U,3860
82
82
  onnx_diagnostic/tasks/mask_generation.py,sha256=fjdD3rd-O-mFL0hQy3la3JXKth_0bH2HL7Eelq-3Dbs,5057
83
83
  onnx_diagnostic/tasks/mixture_of_expert.py,sha256=al4tk1BrHidtRiHlAaiflWiJaAte0d5M8WcBioANG9k,2808
@@ -86,34 +86,34 @@ onnx_diagnostic/tasks/sentence_similarity.py,sha256=vPqNZgAnIvY0rKWPUTs0IlU3RFQD
86
86
  onnx_diagnostic/tasks/summarization.py,sha256=8vB_JiRzDEacIvr8CYTuVQTH73xG_jNkndoS9RHJTSs,8292
87
87
  onnx_diagnostic/tasks/text2text_generation.py,sha256=35eF_RlSeMdLTZPooLMAnszs-z0bkKZ34Iej3JgA96A,8602
88
88
  onnx_diagnostic/tasks/text_classification.py,sha256=CGc72SpXFzTUyzAHEMPgyy_s187DaYGsRdrosxG80_Q,2711
89
- onnx_diagnostic/tasks/text_generation.py,sha256=PRUcVF6XBmOkNA2yi2MUDAT7G8JS1w_6nvjIGcmhST8,13366
89
+ onnx_diagnostic/tasks/text_generation.py,sha256=FwpmI4c_cO9uYQwJFfsHRMArPdwaeU5TBan2lisoHZk,14205
90
90
  onnx_diagnostic/tasks/text_to_image.py,sha256=mOS3Ruosi3hzRMxXLDN7ZkAbi7NnQb7MWwQP_okGVHs,2962
91
91
  onnx_diagnostic/tasks/zero_shot_image_classification.py,sha256=jJCMWuOqGv5ahCfjrcqxuYCJFhTgHV5KUf2yyv2yxYA,4624
92
92
  onnx_diagnostic/tasks/data/__init__.py,sha256=uJoemrWgEjI6oA-tMX7r3__x-b3siPmkgqaY7bgIles,401
93
93
  onnx_diagnostic/tasks/data/dummies_imagetext2text_generation_gemma3.onnx,sha256=UbtvmWMqcZOKJ-I-HXWI1A6YR6QDaFS5u_yXm5C3ZBw,10299
94
94
  onnx_diagnostic/torch_export_patches/__init__.py,sha256=0SaZedwznm1hQUCvXZsGZORV5vby954wEExr5faepGg,720
95
- onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=fQGyk6IkapGYYlFxbly8hS5oLWkhIC4bHV3DfZA1Keg,29449
96
- onnx_diagnostic/torch_export_patches/onnx_export_serialization.py,sha256=klvqiMjccwGhiRnLRVbwTi5WWkMfvtnOV5ycirPcAdA,11354
95
+ onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=T2FaIBSU3NfUyt54whwBmRHPuAzmZKFVHuwu-mikNz4,30475
96
+ onnx_diagnostic/torch_export_patches/onnx_export_serialization.py,sha256=K78uX5EHTuu0ah3mkZWNcGow4775GKH-EnDs3ZlIEhE,11778
97
97
  onnx_diagnostic/torch_export_patches/patch_expressions.py,sha256=vr4tt61cbDnaaaduzMj4UBZ8OUtr6GfDpIWwOYqjWzs,3213
98
98
  onnx_diagnostic/torch_export_patches/patch_inputs.py,sha256=2HQZKQV6TM5430RIvKiMPe4cfGvFdx1UnP1w76CeGE4,8110
99
99
  onnx_diagnostic/torch_export_patches/patch_module.py,sha256=R2d9IHM-RwsBKDsxuBIJnEqMoxbS9gd4YWFGG2wwV5A,39881
100
100
  onnx_diagnostic/torch_export_patches/patch_module_helper.py,sha256=2U0AdyZuU0W54QTdE7tY7imVzMnpQ5091ADNtTCkT8Y,6967
101
- onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=6z8Fk4rcJKo1Nh2F0K3JGkmFH0XZSIfv5-HvO6bhhTY,24818
102
- onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=SqaQU0zsvQsZXU0kIrxcURvVCp-ysZAaF01WLlgKZsw,27183
101
+ onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=YQoOGt9XQLWqnJ15NnT7ri_jDevfvpuQwEJo38E-VRU,25056
102
+ onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=joDJV1YfrhYBR_6eXYvNO1jbiJM8Whb47NWZxo8SBwg,27172
103
103
  onnx_diagnostic/torch_export_patches/patches/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
104
- onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=bQFxePwj9OwCFykhcZiLvqOV2sXPBcZXa4260XueHLE,23117
105
- onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=hHO7XOaUw3XrhPSrG2hTpMNzGVm_zigLg8d7hMOK7Gs,79188
104
+ onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=QIm3GabPnoJDIM1HJl0reyUKf7fg7h57TsHkWfDWjF4,41408
105
+ onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=aVYEhrn48YUGn0rim5o2oygWFkwm3-HsGRpS1rGySeQ,81496
106
106
  onnx_diagnostic/torch_export_patches/serialization/__init__.py,sha256=BHLdRPtNAtNPAS-bPKEj3-foGSPvwAbZXrHzGGPDLEw,1876
107
107
  onnx_diagnostic/torch_export_patches/serialization/diffusers_impl.py,sha256=drq3EH_yjcSuIWYsVeUWm8Cx6YCZFU6bP_1PLtPfY5I,945
108
108
  onnx_diagnostic/torch_export_patches/serialization/transformers_impl.py,sha256=mcmZGekzQlLgE_o3SdKlRgCx4ewwyyAuNWZ9CaN_zrI,9317
109
109
  onnx_diagnostic/torch_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
110
110
  onnx_diagnostic/torch_models/llms.py,sha256=soyg4yC87ptGoeulJhKqw5opGmuLvH1pn_ZDXZ4Jr8E,90
111
- onnx_diagnostic/torch_models/validate.py,sha256=B5h9iBTtkjFNFP6VzqxCvCfwKe4XQOpre1WeKwKEWNA,79628
111
+ onnx_diagnostic/torch_models/validate.py,sha256=0KL1vQmB9DTFuJqc8_CyddIztuwFx9qpRRPULHd-C04,80434
112
112
  onnx_diagnostic/torch_models/hghub/__init__.py,sha256=vi1Q7YHdddj1soiBN42MSvJdFqe2_KUoWafHISjwOu8,58
113
113
  onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=rFbiPNLET-KdBpnv-p0nKgwHX6d7C_Z0s9zZ86_92kQ,14307
114
114
  onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=8V_pAgACPLPsLRYUododg7MSL6str-T3tBEGY4OaeYQ,8724
115
115
  onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=aSa_7Rjider6ruqQ2-fXQMyyDS8VhB1xKxcPNk8qUeU,288776
116
- onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=Ioi92UHT3bsfA9oMi9IzY16FxnAKrPJHsEpFepBwr_o,14607
116
+ onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=xIY_CWOp3m5-cJUvDLTZiH9GwiXi6xTYwONgFY4o45g,15593
117
117
  onnx_diagnostic/torch_models/hghub/model_specific.py,sha256=j50Nu7wddJMoqmD4QzMbNdFDUUgUmSBKRzPDH55TlUQ,2498
118
118
  onnx_diagnostic/torch_models/untrained/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
119
119
  onnx_diagnostic/torch_models/untrained/llm_phi2.py,sha256=JbGZmW41MPJcQgqaJc9R2G00nI79nI-lABN-ffA1lmY,4037
@@ -121,8 +121,8 @@ onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=QXw_Bs2SzfeiQMf-tm
121
121
  onnx_diagnostic/torch_onnx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
122
122
  onnx_diagnostic/torch_onnx/runtime_info.py,sha256=1g9F_Jf9AAgYQU4stbsrFXwQl-30mWlQrFbQ7val8Ps,9268
123
123
  onnx_diagnostic/torch_onnx/sbs.py,sha256=IoKLA5UwS6kY8g4OOf_bdQwCziIsQfBczZ3w8wo4wZM,16905
124
- onnx_diagnostic-0.7.13.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
125
- onnx_diagnostic-0.7.13.dist-info/METADATA,sha256=1ZoJZw78GxT1chXfFumfWyr-kcD8puKgaJ_qTHbfs60,6730
126
- onnx_diagnostic-0.7.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
127
- onnx_diagnostic-0.7.13.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
128
- onnx_diagnostic-0.7.13.dist-info/RECORD,,
124
+ onnx_diagnostic-0.7.15.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
125
+ onnx_diagnostic-0.7.15.dist-info/METADATA,sha256=8PCb8jeG1AwC10iaBQRqNBE_JF7huNn2o-l_7BnwzzE,6730
126
+ onnx_diagnostic-0.7.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
127
+ onnx_diagnostic-0.7.15.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
128
+ onnx_diagnostic-0.7.15.dist-info/RECORD,,