onnx-diagnostic 0.7.10__py3-none-any.whl → 0.7.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,5 +3,5 @@ Patches, Investigates onnx models.
3
3
  Functions, classes to dig into a model when this one is right, slow, wrong...
4
4
  """
5
5
 
6
- __version__ = "0.7.10"
6
+ __version__ = "0.7.11"
7
7
  __author__ = "Xavier Dupré"
@@ -474,7 +474,7 @@ def get_parser_validate() -> ArgumentParser:
474
474
  )
475
475
  parser.add_argument(
476
476
  "--runtime",
477
- choices=["onnxruntime", "torch", "ref"],
477
+ choices=["onnxruntime", "torch", "ref", "orteval", "orteval10"],
478
478
  default="onnxruntime",
479
479
  help="onnx runtime to use, `onnxruntime` by default",
480
480
  )
@@ -542,6 +542,12 @@ def get_parser_validate() -> ArgumentParser:
542
542
  "the onnx exporter should use.",
543
543
  default="",
544
544
  )
545
+ parser.add_argument(
546
+ "--ort-logs",
547
+ default=False,
548
+ action=BooleanOptionalAction,
549
+ help="Enables onnxruntime logging when the session is created",
550
+ )
545
551
  return parser
546
552
 
547
553
 
@@ -601,6 +607,7 @@ def _cmd_validate(argv: List[Any]):
601
607
  repeat=args.repeat,
602
608
  warmup=args.warmup,
603
609
  inputs2=args.inputs2,
610
+ ort_logs=args.ort_logs,
604
611
  output_names=(
605
612
  None if len(args.outnames.strip()) < 2 else args.outnames.strip().split(",")
606
613
  ),
@@ -4,11 +4,6 @@ import torch
4
4
  import transformers
5
5
  import transformers.cache_utils
6
6
 
7
- try:
8
- from transformers.models.mamba.modeling_mamba import MambaCache
9
- except ImportError:
10
- from transformers.cache_utils import MambaCache
11
-
12
7
 
13
8
  class CacheKeyValue:
14
9
  """
@@ -354,8 +349,15 @@ def make_encoder_decoder_cache(
354
349
  )
355
350
 
356
351
 
357
- def make_mamba_cache(key_value_pairs: List[Tuple[torch.Tensor, torch.Tensor]]) -> MambaCache:
352
+ def make_mamba_cache(
353
+ key_value_pairs: List[Tuple[torch.Tensor, torch.Tensor]],
354
+ ) -> "MambaCache": # noqa: F821
358
355
  "Creates a ``MambaCache``."
356
+ # import is moved here because this part is slow.
357
+ try:
358
+ from transformers.models.mamba.modeling_mamba import MambaCache
359
+ except ImportError:
360
+ from transformers.cache_utils import MambaCache
359
361
  dtype = key_value_pairs[0][0].dtype
360
362
 
361
363
  class _config:
@@ -5,6 +5,8 @@ from . import (
5
5
  fill_mask,
6
6
  image_classification,
7
7
  image_text_to_text,
8
+ image_to_video,
9
+ mask_generation,
8
10
  mixture_of_expert,
9
11
  object_detection,
10
12
  sentence_similarity,
@@ -14,7 +16,6 @@ from . import (
14
16
  text_to_image,
15
17
  text2text_generation,
16
18
  zero_shot_image_classification,
17
- mask_generation,
18
19
  )
19
20
 
20
21
  __TASKS__ = [
@@ -23,6 +24,8 @@ __TASKS__ = [
23
24
  fill_mask,
24
25
  image_classification,
25
26
  image_text_to_text,
27
+ image_to_video,
28
+ mask_generation,
26
29
  mixture_of_expert,
27
30
  object_detection,
28
31
  sentence_similarity,
@@ -32,7 +35,6 @@ __TASKS__ = [
32
35
  text_to_image,
33
36
  text2text_generation,
34
37
  zero_shot_image_classification,
35
- mask_generation,
36
38
  ]
37
39
 
38
40
 
@@ -0,0 +1,127 @@
1
+ from typing import Any, Callable, Dict, Optional, Tuple
2
+ import torch
3
+ from ..helpers.config_helper import (
4
+ update_config,
5
+ check_hasattr,
6
+ default_num_hidden_layers as nhl,
7
+ )
8
+
9
+ __TASK__ = "image-to-video"
10
+
11
+
12
+ def reduce_model_config(config: Any) -> Dict[str, Any]:
13
+ """Reduces a model size."""
14
+ if not hasattr(config, "num_hidden_layers") and not hasattr(config, "num_layers"):
15
+ # We cannot reduce.
16
+ return {}
17
+ check_hasattr(config, ("num_hidden_layers", "num_layers"))
18
+ kwargs = {}
19
+ if hasattr(config, "num_layers"):
20
+ kwargs["num_layers"] = min(config.num_layers, nhl())
21
+ if hasattr(config, "num_hidden_layers"):
22
+ kwargs["num_hidden_layers"] = min(config.num_hidden_layers, nhl())
23
+
24
+ update_config(config, kwargs)
25
+ return kwargs
26
+
27
+
28
+ def get_inputs(
29
+ model: torch.nn.Module,
30
+ config: Optional[Any],
31
+ text_embed_dim: int,
32
+ latent_channels: int,
33
+ batch_size: int = 2,
34
+ image_height: int = 704,
35
+ image_width: int = 1280,
36
+ latent_frames: int = 1,
37
+ text_maxlen: int = 512,
38
+ add_second_input: int = 1,
39
+ **kwargs, # unused
40
+ ):
41
+ """
42
+ Generates inputs for task ``image-to-video``.
43
+ """
44
+ assert (
45
+ "cls_cache" not in kwargs
46
+ ), f"Not yet implemented for cls_cache={kwargs['cls_cache']!r}."
47
+ latent_height = image_height // 8
48
+ latent_width = image_width // 8
49
+ dtype = torch.float32
50
+
51
+ inputs = dict(
52
+ hidden_states=torch.randn(
53
+ batch_size,
54
+ latent_channels,
55
+ latent_frames,
56
+ latent_height,
57
+ latent_width,
58
+ dtype=dtype,
59
+ ),
60
+ timestep=torch.tensor([1.0] * batch_size, dtype=dtype),
61
+ encoder_hidden_states=torch.randn(
62
+ batch_size, text_maxlen, text_embed_dim, dtype=dtype
63
+ ),
64
+ padding_mask=torch.ones(1, 1, image_height, image_width, dtype=dtype),
65
+ fps=torch.tensor([16] * batch_size, dtype=dtype),
66
+ condition_mask=torch.randn(
67
+ batch_size, 1, latent_frames, latent_height, latent_width, dtype=dtype
68
+ ),
69
+ )
70
+ shapes = dict(
71
+ hidden_states={
72
+ 0: "batch_size",
73
+ 2: "latent_frames",
74
+ 3: "latent_height",
75
+ 4: "latent_width",
76
+ },
77
+ timestep={0: "batch_size"},
78
+ encoder_hidden_states={0: "batch_size"},
79
+ padding_mask={0: "batch_size", 2: "height", 3: "width"},
80
+ fps={0: "batch_size"},
81
+ condition_mask={
82
+ 0: "batch_size",
83
+ 2: "latent_frames",
84
+ 3: "latent_height",
85
+ 4: "latent_width",
86
+ },
87
+ )
88
+ res = dict(inputs=inputs, dynamic_shapes=shapes)
89
+
90
+ if add_second_input:
91
+ assert (
92
+ add_second_input > 0
93
+ ), f"Not implemented for add_second_input={add_second_input}."
94
+ res["inputs2"] = get_inputs(
95
+ model=model,
96
+ config=config,
97
+ text_embed_dim=text_embed_dim,
98
+ latent_channels=latent_channels,
99
+ batch_size=batch_size,
100
+ image_height=image_height,
101
+ image_width=image_width,
102
+ latent_frames=latent_frames,
103
+ text_maxlen=text_maxlen,
104
+ add_second_input=0,
105
+ **kwargs,
106
+ )["inputs"]
107
+ return res
108
+
109
+
110
+ def random_input_kwargs(config: Any) -> Tuple[Dict[str, Any], Callable]:
111
+ """
112
+ Inputs kwargs.
113
+
114
+ If the configuration is None, the function selects typical dimensions.
115
+ """
116
+ if config is not None:
117
+ check_hasattr(config, "in_channels", "text_embed_dim"),
118
+ kwargs = dict(
119
+ text_embed_dim=1024 if config is None else config.text_embed_dim,
120
+ latent_channels=16 if config is None else config.in_channels - 1,
121
+ batch_size=1,
122
+ image_height=8 * 50,
123
+ image_width=8 * 80,
124
+ latent_frames=1,
125
+ text_maxlen=512,
126
+ )
127
+ return kwargs, get_inputs
@@ -35,6 +35,9 @@ except ImportError:
35
35
  from ...ext_test_case import has_transformers
36
36
  from ...helpers.torch_helper import is_torchdynamo_exporting
37
37
 
38
+ patch_is_initialized = pv.Version(transformers.__version__) > pv.Version("4.56.99")
39
+
40
+
38
41
  if patch_masking_utils:
39
42
  # Introduced in 4.52
40
43
  from transformers.masking_utils import (
@@ -213,6 +216,8 @@ if patch_DynamicLayer:
213
216
  new_shape[-2] = 0
214
217
  self.keys = torch.empty(new_shape, dtype=self.dtype, device=self.device)
215
218
  self.values = torch.empty(new_shape, dtype=self.dtype, device=self.device)
219
+ if patch_is_initialized:
220
+ self.is_initialized = True
216
221
 
217
222
 
218
223
  def _patch_make_causal_mask(
@@ -177,6 +177,51 @@ def task_from_arch(
177
177
  return data[arch]
178
178
 
179
179
 
180
+ def _trygetattr(config, attname):
181
+ try:
182
+ return getattr(config, attname)
183
+ except AttributeError:
184
+ return None
185
+
186
+
187
+ def architecture_from_config(config) -> Optional[str]:
188
+ """Guesses the architecture (class) of the model described by this config."""
189
+ if isinstance(config, dict):
190
+ if "_class_name" in config:
191
+ return config["_class_name"]
192
+ if "architecture" in config:
193
+ return config["architecture"]
194
+ if config.get("architectures", []):
195
+ return config["architectures"][0]
196
+ if hasattr(config, "_class_name"):
197
+ return config._class_name
198
+ if hasattr(config, "architecture"):
199
+ return config.architecture
200
+ if hasattr(config, "architectures") and config.architectures:
201
+ return config.architectures[0]
202
+ if hasattr(config, "__dict__"):
203
+ if "_class_name" in config.__dict__:
204
+ return config.__dict__["_class_name"]
205
+ if "architecture" in config.__dict__:
206
+ return config.__dict__["architecture"]
207
+ if config.__dict__.get("architectures", []):
208
+ return config.__dict__["architectures"][0]
209
+ return None
210
+
211
+
212
+ def find_package_source(config) -> Optional[str]:
213
+ """Guesses the package the class models from."""
214
+ if isinstance(config, dict):
215
+ if "_diffusers_version" in config:
216
+ return "diffusers"
217
+ if hasattr(config, "_diffusers_version"):
218
+ return "diffusers"
219
+ if hasattr(config, "__dict__"):
220
+ if "_diffusers_version" in config.__dict__:
221
+ return "diffusers"
222
+ return "transformers"
223
+
224
+
180
225
  def task_from_id(
181
226
  model_id: str,
182
227
  default_value: Optional[str] = None,
@@ -202,28 +247,30 @@ def task_from_id(
202
247
  if not fall_back_to_pretrained:
203
248
  raise
204
249
  config = get_pretrained_config(model_id, subfolder=subfolder)
205
- try:
206
- return config.pipeline_tag
207
- except AttributeError:
208
- guess = _guess_task_from_config(config)
209
- if guess is not None:
210
- return guess
211
- data = load_architecture_task()
212
- if model_id in data:
213
- return data[model_id]
214
- if type(config) is dict and "_class_name" in config:
215
- return task_from_arch(config["_class_name"], default_value=default_value)
216
- if not config.architectures or not config.architectures:
217
- # Some hardcoded values until a better solution is found.
218
- if model_id.startswith("google/bert_"):
219
- return "fill-mask"
220
- assert config.architectures is not None and len(config.architectures) == 1, (
221
- f"Cannot return the task of {model_id!r}, pipeline_tag is not setup, "
222
- f"architectures={config.architectures} in config={config}. "
223
- f"The task can be added in "
224
- f"``onnx_diagnostic.torch_models.hghub.hub_data.__data_arch__``."
225
- )
226
- return task_from_arch(config.architectures[0], default_value=default_value)
250
+ tag = _trygetattr(config, "pipeline_tag")
251
+ if tag is not None:
252
+ return tag
253
+
254
+ guess = _guess_task_from_config(config)
255
+ if guess is not None:
256
+ return guess
257
+ data = load_architecture_task()
258
+ if subfolder:
259
+ full_id = f"{model_id}//{subfolder}"
260
+ if full_id in data:
261
+ return data[full_id]
262
+ if model_id in data:
263
+ return data[model_id]
264
+ arch = architecture_from_config(config)
265
+ if arch is None:
266
+ if model_id.startswith("google/bert_"):
267
+ return "fill-mask"
268
+ assert arch is not None, (
269
+ f"Cannot return the task of {model_id!r}, pipeline_tag is not setup, "
270
+ f"config={config}. The task can be added in "
271
+ f"``onnx_diagnostic.torch_models.hghub.hub_data.__data_arch__``."
272
+ )
273
+ return task_from_arch(arch, default_value=default_value)
227
274
 
228
275
 
229
276
  def task_from_tags(tags: Union[str, List[str]]) -> str:
@@ -30,6 +30,7 @@ __data_arch__ = textwrap.dedent(
30
30
  ConvBertModel,feature-extraction
31
31
  ConvNextForImageClassification,image-classification
32
32
  ConvNextV2Model,image-feature-extraction
33
+ CosmosTransformer3DModel,image-to-video
33
34
  CvtModel,feature-extraction
34
35
  DPTModel,image-feature-extraction
35
36
  Data2VecAudioModel,feature-extraction
@@ -156,7 +157,8 @@ __data_arch__ = textwrap.dedent(
156
157
  YolosForObjectDetection,object-detection
157
158
  YolosModel,image-feature-extraction
158
159
  Alibaba-NLP/gte-large-en-v1.5,sentence-similarity
159
- emilyalsentzer/Bio_ClinicalBERT,fill-mask"""
160
+ emilyalsentzer/Bio_ClinicalBERT,fill-mask
161
+ nvidia/Cosmos-Predict2-2B-Video2World//transformer,image-to-video"""
160
162
  )
161
163
 
162
164
  __data_tasks__ = [
@@ -2,13 +2,21 @@ import copy
2
2
  import inspect
3
3
  import os
4
4
  import pprint
5
+ import time
5
6
  from typing import Any, Dict, Optional, Tuple
6
7
  import torch
7
8
  import transformers
8
9
  from ...helpers.config_helper import update_config, build_diff_config
9
10
  from ...tasks import reduce_model_config, random_input_kwargs
10
- from .hub_api import task_from_arch, task_from_id, get_pretrained_config, download_code_modelid
11
- from .model_specific import HANDLED_MODELS, load_specific_model
11
+ from .hub_api import (
12
+ task_from_arch,
13
+ task_from_id,
14
+ get_pretrained_config,
15
+ download_code_modelid,
16
+ architecture_from_config,
17
+ find_package_source,
18
+ )
19
+ from .model_specific import HANDLED_MODELS, load_specific_model, instantiate_specific_model
12
20
 
13
21
 
14
22
  def _code_needing_rewriting(model: Any) -> Any:
@@ -96,27 +104,18 @@ def get_untrained_model_with_inputs(
96
104
  model, task, config = load_specific_model(model_id, verbose=verbose)
97
105
 
98
106
  if model is None:
99
- if hasattr(config, "architecture") and config.architecture:
100
- archs = [config.architecture]
101
- if type(config) is dict:
102
- assert (
103
- "_class_name" in config
104
- ), f"Unable to get the architecture from config={config}"
105
- archs = [config["_class_name"]]
106
- else:
107
- archs = config.architectures # type: ignore
108
- task = None
109
- if archs is None:
110
- task = task_from_id(model_id)
111
- assert task is not None or (archs is not None and len(archs) == 1), (
107
+ arch = architecture_from_config(config)
108
+ if arch is None:
109
+ task = task_from_id(model_id, subfolder=subfolder)
110
+ assert task is not None or arch is not None, (
112
111
  f"Unable to determine the architecture for model {model_id!r}, "
113
- f"architectures={archs!r}, conf={config}"
112
+ f"archs={arch!r}, conf={config}"
114
113
  )
115
114
  if verbose:
116
- print(f"[get_untrained_model_with_inputs] architectures={archs!r}")
115
+ print(f"[get_untrained_model_with_inputs] architecture={arch!r}")
117
116
  print(f"[get_untrained_model_with_inputs] cls={config.__class__.__name__!r}")
118
117
  if task is None:
119
- task = task_from_arch(archs[0], model_id=model_id, subfolder=subfolder)
118
+ task = task_from_arch(arch, model_id=model_id, subfolder=subfolder)
120
119
  if verbose:
121
120
  print(f"[get_untrained_model_with_inputs] task={task!r}")
122
121
 
@@ -170,36 +169,58 @@ def get_untrained_model_with_inputs(
170
169
  f"{getattr(config, '_attn_implementation', '?')!r}" # type: ignore[union-attr]
171
170
  )
172
171
 
173
- if type(config) is dict and "_diffusers_version" in config:
172
+ if find_package_source(config) == "diffusers":
174
173
  import diffusers
175
174
 
176
175
  package_source = diffusers
177
176
  else:
178
177
  package_source = transformers
179
178
 
179
+ if verbose:
180
+ print(
181
+ f"[get_untrained_model_with_inputs] package_source={package_source.__name__} é"
182
+ f"from {package_source.__file__}"
183
+ )
180
184
  if use_pretrained:
185
+ begin = time.perf_counter()
186
+ if verbose:
187
+ print(
188
+ f"[get_untrained_model_with_inputs] pretrained model_id {model_id!r}, "
189
+ f"subfolder={subfolder!r}"
190
+ )
181
191
  model = transformers.AutoModel.from_pretrained(
182
- model_id, trust_remote_code=True, **mkwargs
192
+ model_id, subfolder=subfolder, trust_remote_code=True, **mkwargs
183
193
  )
194
+ if verbose:
195
+ print(
196
+ f"[get_untrained_model_with_inputs] -- done in "
197
+ f"{time.perf_counter() - begin}s"
198
+ )
184
199
  else:
185
- if archs is not None:
200
+ begin = time.perf_counter()
201
+ if verbose:
202
+ print(
203
+ f"[get_untrained_model_with_inputs] instantiate model_id {model_id!r}, "
204
+ f"subfolder={subfolder!r}"
205
+ )
206
+ if arch is not None:
186
207
  try:
187
- cls_model = getattr(package_source, archs[0])
208
+ cls_model = getattr(package_source, arch)
188
209
  except AttributeError as e:
189
210
  # The code of the models is not in transformers but in the
190
211
  # repository of the model. We need to download it.
191
212
  pyfiles = download_code_modelid(model_id, verbose=verbose)
192
213
  if pyfiles:
193
- if "." in archs[0]:
194
- cls_name = archs[0]
214
+ if "." in arch:
215
+ cls_name = arch
195
216
  else:
196
217
  modeling = [_ for _ in pyfiles if "/modeling_" in _]
197
218
  assert len(modeling) == 1, (
198
219
  f"Unable to guess the main file implemented class "
199
- f"{archs[0]!r} from {pyfiles}, found={modeling}."
220
+ f"{arch!r} from {pyfiles}, found={modeling}."
200
221
  )
201
222
  last_name = os.path.splitext(os.path.split(modeling[0])[-1])[0]
202
- cls_name = f"{last_name}.{archs[0]}"
223
+ cls_name = f"{last_name}.{arch}"
203
224
  if verbose:
204
225
  print(
205
226
  f"[get_untrained_model_with_inputs] "
@@ -217,7 +238,7 @@ def get_untrained_model_with_inputs(
217
238
  )
218
239
  else:
219
240
  raise AttributeError(
220
- f"Unable to find class 'tranformers.{archs[0]}'. "
241
+ f"Unable to find class 'tranformers.{arch}'. "
221
242
  f"The code needs to be downloaded, config="
222
243
  f"\n{pprint.pformat(config)}."
223
244
  ) from e
@@ -225,20 +246,31 @@ def get_untrained_model_with_inputs(
225
246
  assert same_as_pretrained and use_pretrained, (
226
247
  f"Model {model_id!r} cannot be built, the model cannot be built. "
227
248
  f"It must be downloaded. Use same_as_pretrained=True "
228
- f"and use_pretrained=True."
249
+ f"and use_pretrained=True, arch={arch!r}, config={config}"
250
+ )
251
+ if verbose:
252
+ print(
253
+ f"[get_untrained_model_with_inputs] -- done in "
254
+ f"{time.perf_counter() - begin}s"
229
255
  )
230
256
 
231
- try:
232
- if type(config) is dict:
233
- model = cls_model(**config)
234
- else:
235
- model = cls_model(config)
236
- except RuntimeError as e:
237
- raise RuntimeError(
238
- f"Unable to instantiate class {cls_model.__name__} with\n{config}"
239
- ) from e
257
+ seed = int(os.environ.get("SEED", "17"))
258
+ torch.manual_seed(seed)
259
+ model = instantiate_specific_model(cls_model, config)
260
+ if model is None:
261
+ try:
262
+ if type(config) is dict:
263
+ model = cls_model(**config)
264
+ else:
265
+ model = cls_model(config)
266
+ except RuntimeError as e:
267
+ raise RuntimeError(
268
+ f"Unable to instantiate class {cls_model.__name__} with\n{config}"
269
+ ) from e
240
270
 
241
271
  # input kwargs
272
+ seed = int(os.environ.get("SEED", "17")) + 1
273
+ torch.manual_seed(seed)
242
274
  kwargs, fct = random_input_kwargs(config, task) # type: ignore[arg-type]
243
275
  if verbose:
244
276
  print(f"[get_untrained_model_with_inputs] use fct={fct}")
@@ -250,7 +282,7 @@ def get_untrained_model_with_inputs(
250
282
 
251
283
  # This line is important. Some models may produce different
252
284
  # outputs even with the same inputs in training mode.
253
- model.eval()
285
+ model.eval() # type: ignore[union-attr]
254
286
  res = fct(model, config, add_second_input=add_second_input, **kwargs)
255
287
 
256
288
  res["input_kwargs"] = kwargs
@@ -1,6 +1,33 @@
1
1
  from typing import Any, Dict, Tuple
2
2
 
3
3
 
4
+ def instantiate_specific_model(cls_model: type, config: Any) -> object:
5
+ """
6
+ Instantiates some model requiring some specific code.
7
+ """
8
+ if cls_model.__name__ == "CosmosTransformer3DModel":
9
+ return instantiate_CosmosTransformer3DModel(cls_model, config)
10
+ return None
11
+
12
+
13
+ def instantiate_CosmosTransformer3DModel(cls_model: type, config: Any) -> object:
14
+ kwargs = dict(
15
+ in_channels=config.in_channels,
16
+ out_channels=config.out_channels,
17
+ attention_head_dim=config.attention_head_dim,
18
+ mlp_ratio=config.mlp_ratio,
19
+ num_layers=config.num_layers,
20
+ text_embed_dim=config.text_embed_dim,
21
+ adaln_lora_dim=config.adaln_lora_dim,
22
+ max_size=config.max_size,
23
+ patch_size=config.patch_size,
24
+ rope_scale=config.rope_scale,
25
+ concat_padding_mask=config.concat_padding_mask,
26
+ extra_pos_embed_type=config.extra_pos_embed_type,
27
+ )
28
+ return cls_model(**kwargs)
29
+
30
+
4
31
  class SpecificConfig:
5
32
  """Creates a specific configuration for the loaded model."""
6
33
 
@@ -7,8 +7,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7
7
  import time
8
8
  import numpy as np
9
9
  import onnx
10
- import onnxscript
11
- import onnxscript.rewriter.ort_fusions as ort_fusions
12
10
  import torch
13
11
  from ..export import CoupleInputsDynamicShapes
14
12
  from ..helpers import max_diff, string_type, string_diff
@@ -113,6 +111,7 @@ def _make_folder_name(
113
111
  dtype: Optional[Union[str, torch.dtype]] = None,
114
112
  device: Optional[Union[str, torch.device]] = None,
115
113
  subfolder: Optional[str] = None,
114
+ opset: Optional[int] = None,
116
115
  ) -> str:
117
116
  "Creates a filename unique based on the given options."
118
117
  els = [model_id.replace("/", "_")]
@@ -136,6 +135,8 @@ def _make_folder_name(
136
135
  else:
137
136
  raise AssertionError(f"unexpected value for device={device}, sdev={sdev!r}")
138
137
  els.append(sdev)
138
+ if opset is not None:
139
+ els.append(f"op{opset}")
139
140
  return "-".join(els)
140
141
 
141
142
 
@@ -246,6 +247,7 @@ def _quiet_or_not_quiet(
246
247
  summary[f"time_{suffix}_latency_std"] = a.std()
247
248
  summary[f"time_{suffix}_latency_min"] = a.min()
248
249
  summary[f"time_{suffix}_latency_min"] = a.max()
250
+ summary[f"time_{suffix}_n"] = len(a)
249
251
  return res
250
252
 
251
253
 
@@ -262,6 +264,16 @@ def shrink_config(cfg: Dict[str, Any]) -> Dict[str, Any]:
262
264
  return new_cfg
263
265
 
264
266
 
267
+ def _preprocess_model_id(model_id, subfolder):
268
+ if subfolder or "//" not in model_id:
269
+ return model_id, subfolder
270
+ spl = model_id.split("//")
271
+ if spl[-1] in {"transformer", "vae"}:
272
+ # known subfolder
273
+ return "//".join(spl[:-1]), spl[-1]
274
+ return model_id, subfolder
275
+
276
+
265
277
  def validate_model(
266
278
  model_id: str,
267
279
  task: Optional[str] = None,
@@ -290,6 +302,7 @@ def validate_model(
290
302
  warmup: int = 0,
291
303
  inputs2: int = 1,
292
304
  output_names: Optional[List[str]] = None,
305
+ ort_logs: bool = False,
293
306
  ) -> Tuple[Dict[str, Union[int, float, str]], Dict[str, Any]]:
294
307
  """
295
308
  Validates a model.
@@ -334,13 +347,15 @@ def validate_model(
334
347
  :param subfolder: version or subfolders to uses when retrieving a model id
335
348
  :param opset: onnx opset to use for the conversion
336
349
  :param runtime: onnx runtime to use to check about discrepancies,
337
- only if `do_run` is true
350
+ possible values ``onnxruntime``, ``torch``, ``orteval``,
351
+ ``orteval10``, ``ref`` only if `do_run` is true
338
352
  :param repeat: number of time to measure the model
339
353
  :param warmup: warmup the model first
340
354
  :param inputs2: checks that the second set of inputs is reunning as well,
341
355
  this ensures that the model does support dynamism, the value is used
342
356
  as an increment to the first set of values (added to dimensions)
343
357
  :param output_names: output names the onnx exporter should use
358
+ :param ort_logs: increases onnxruntime verbosity when creating the session
344
359
  :return: two dictionaries, one with some metrics,
345
360
  another one with whatever the function produces
346
361
 
@@ -361,8 +376,15 @@ def validate_model(
361
376
 
362
377
  The default runtime, :epkg:`onnxruntime` is used to validate a model and check the
363
378
  exported model returns the same outputs as the original one, otherwise,
364
- :class:`onnx_diagnostic.reference.TorchOnnxEvaluator` is used.
379
+ :class:`onnx_diagnostic.reference.TorchOnnxEvaluator`
380
+ if ``runtime == 'torch'`` or
381
+ :class:`onnx_diagnostic.reference.OnnxruntimeEvaluator`
382
+ if ``runtime == 'orteval'`` or
383
+ :class:`onnx_diagnostic.reference.ExtendedReferenceEvaluator`
384
+ if ``runtime == 'ref'``,
385
+ ``orteval10`` increases the verbosity.
365
386
  """
387
+ model_id, subfolder = _preprocess_model_id(model_id, subfolder)
366
388
  if isinstance(patch, bool):
367
389
  patch_kwargs = (
368
390
  dict(patch_transformers=True, patch_diffusers=True, patch=True)
@@ -412,7 +434,13 @@ def validate_model(
412
434
  folder_name = None
413
435
  if dump_folder:
414
436
  folder_name = _make_folder_name(
415
- model_id, exporter, optimization, dtype=dtype, device=device, subfolder=subfolder
437
+ model_id,
438
+ exporter,
439
+ optimization,
440
+ dtype=dtype,
441
+ device=device,
442
+ subfolder=subfolder,
443
+ opset=opset,
416
444
  )
417
445
  dump_folder = os.path.join(dump_folder, folder_name)
418
446
  if not os.path.exists(dump_folder):
@@ -743,6 +771,7 @@ def validate_model(
743
771
  repeat=repeat,
744
772
  warmup=warmup,
745
773
  inputs2=inputs2,
774
+ ort_logs=ort_logs,
746
775
  )
747
776
  summary.update(summary_valid)
748
777
 
@@ -837,15 +866,24 @@ def compute_statistics(onnx_filename: str) -> Dict[str, Union[float, int]]:
837
866
  raise NotImplementedError(f"Unexpected type={type(proto)}")
838
867
 
839
868
  counts: Dict[str, Union[float, int]] = {}
869
+ n_nodes = 0
870
+ n_nodes_nocst = 0
840
871
  for proto in node_iter(onx):
841
872
  if isinstance(proto, onnx.NodeProto):
842
873
  key = f"n_node_{proto.op_type}"
874
+ n_nodes += 1
875
+ if proto.op_type != "Constant":
876
+ n_nodes_nocst += 1
843
877
  else:
844
878
  key = f"n_node_initializer_{proto.data_type}"
845
879
 
846
880
  if key not in counts:
847
881
  counts[key] = 0
848
882
  counts[key] += 1
883
+
884
+ counts["n_node_nodes"] = n_nodes
885
+ counts["n_node_nodes_nocst"] = n_nodes_nocst
886
+ counts["n_node_functions"] = len(onx.functions)
849
887
  return counts
850
888
 
851
889
 
@@ -1134,6 +1172,7 @@ def validate_onnx_model(
1134
1172
  repeat: int = 1,
1135
1173
  warmup: int = 0,
1136
1174
  inputs2: int = 1,
1175
+ ort_logs: bool = False,
1137
1176
  ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
1138
1177
  """
1139
1178
  Verifies that an onnx model produces the same
@@ -1146,12 +1185,13 @@ def validate_onnx_model(
1146
1185
  :param quiet: catch exception or not
1147
1186
  :param verbose: verbosity
1148
1187
  :param flavour: use a different version of the inputs
1149
- :param runtime: onnx runtime to use, onnxruntime or torch
1188
+ :param runtime: onnx runtime to use, onnxruntime, torch, orteval, ref
1150
1189
  :param repeat: run that number of times the model
1151
1190
  :param warmup: warmup the model
1152
1191
  :param inputs2: to validate the model on the second input set
1153
1192
  to make sure the exported model supports dynamism, the value is
1154
1193
  used as an increment added to the first set of inputs (added to dimensions)
1194
+ :param ort_logs: triggers the logs for onnxruntime
1155
1195
  :return: two dictionaries, one with some metrics,
1156
1196
  another one with whatever the function produces
1157
1197
  """
@@ -1193,23 +1233,71 @@ def validate_onnx_model(
1193
1233
  f"{providers}..., flavour={flavour!r}"
1194
1234
  )
1195
1235
 
1196
- if runtime != "onnxruntime":
1236
+ if runtime == "onnxruntime":
1237
+ if os.environ.get("DUMPORTOPT", "") in ("1", "true", "True"):
1238
+ opts = onnxruntime.SessionOptions()
1239
+ opts.optimized_model_filepath = f"{data['onnx_filename']}.rtopt.onnx"
1240
+ if verbose:
1241
+ print(
1242
+ f"[validate_onnx_model] saved optimized onnxruntime "
1243
+ f"in {opts.optimized_model_filepath!r}"
1244
+ )
1245
+ onnxruntime.InferenceSession(data["onnx_filename"], opts, providers=providers)
1246
+ if verbose:
1247
+ print("[validate_onnx_model] -- done")
1248
+
1249
+ if verbose:
1250
+ print("[validate_onnx_model] runtime is onnxruntime")
1251
+ sess_opts = onnxruntime.SessionOptions()
1252
+ if ort_logs:
1253
+ sess_opts.log_severity_level = 0
1254
+ sess_opts.log_verbosity_level = 4
1255
+ cls_runtime = lambda model, providers, _o=sess_opts: onnxruntime.InferenceSession(
1256
+ (model.SerializeToString() if isinstance(model, onnx.ModelProto) else model),
1257
+ _o,
1258
+ providers=providers,
1259
+ )
1260
+ elif runtime == "torch":
1197
1261
  from ..reference import TorchOnnxEvaluator
1198
1262
 
1199
- cls_runtime = (
1200
- (
1201
- lambda model, providers: onnxruntime.InferenceSession(
1202
- (model.SerializeToString() if isinstance(model, onnx.ModelProto) else model),
1203
- providers=providers,
1263
+ if verbose:
1264
+ print("[validate_onnx_model] runtime is TorchOnnxEvaluator")
1265
+ cls_runtime = (
1266
+ lambda model, providers, _cls_=TorchOnnxEvaluator: _cls_( # type: ignore[misc]
1267
+ model, providers=providers, verbose=max(verbose - 1, 0)
1204
1268
  )
1205
1269
  )
1206
- if runtime == "onnxruntime"
1207
- else (
1208
- lambda model, providers, _cls_=TorchOnnxEvaluator: _cls_( # type: ignore[misc]
1270
+ elif runtime == "orteval":
1271
+ from ..reference import OnnxruntimeEvaluator
1272
+
1273
+ if verbose:
1274
+ print("[validate_onnx_model] runtime is OnnxruntimeEvaluator")
1275
+ cls_runtime = (
1276
+ lambda model, providers, _cls_=OnnxruntimeEvaluator: _cls_( # type: ignore[misc]
1209
1277
  model, providers=providers, verbose=max(verbose - 1, 0)
1210
1278
  )
1211
1279
  )
1212
- )
1280
+ elif runtime == "orteval10":
1281
+ from ..reference import OnnxruntimeEvaluator
1282
+
1283
+ if verbose:
1284
+ print("[validate_onnx_model] runtime is OnnxruntimeEvaluator(verbose=10)")
1285
+ cls_runtime = (
1286
+ lambda model, providers, _cls_=OnnxruntimeEvaluator: _cls_( # type: ignore[misc]
1287
+ model, providers=providers, verbose=10
1288
+ )
1289
+ )
1290
+ elif runtime == "ref":
1291
+ from ..reference import ExtendedReferenceEvaluator
1292
+
1293
+ if verbose:
1294
+ print("[validate_onnx_model] runtime is ExtendedReferenceEvaluator")
1295
+ cls_runtime = lambda model, providers, _cls_=ExtendedReferenceEvaluator: _cls_( # type: ignore[misc]
1296
+ model, verbose=max(verbose - 1, 0)
1297
+ )
1298
+ else:
1299
+ raise ValueError(f"Unexpecteed runtime={runtime!r}")
1300
+
1213
1301
  sess = _quiet_or_not_quiet(
1214
1302
  quiet,
1215
1303
  _mk("create_onnx_ort"),
@@ -1390,6 +1478,8 @@ def call_torch_export_onnx(
1390
1478
  if optimization == "ir":
1391
1479
  label, f_optim = "export_onnx_opt_ir", (lambda epo=epo: epo.optimize())
1392
1480
  else:
1481
+ import onnxscript
1482
+ import onnxscript.rewriter.ort_fusions as ort_fusions
1393
1483
 
1394
1484
  def _os_ort_optim(epo):
1395
1485
  onnxscript.optimizer.optimize_ir(epo.model)
@@ -1509,6 +1599,8 @@ def call_torch_export_custom(
1509
1599
  "default+onnxruntime+os_ort",
1510
1600
  None,
1511
1601
  }
1602
+ if optimization == "none":
1603
+ optimization = ""
1512
1604
  assert (
1513
1605
  optimization in available
1514
1606
  ), f"unexpected value for optimization={optimization}, available={available}"
@@ -1672,6 +1764,9 @@ def call_torch_export_custom(
1672
1764
  print("[call_torch_export_custom] done (export)")
1673
1765
 
1674
1766
  if os_ort:
1767
+ import onnxscript
1768
+ import onnxscript.rewriter.ort_fusions as ort_fusions
1769
+
1675
1770
  if verbose:
1676
1771
  print("[call_torch_export_custom] conversion to IR...")
1677
1772
  begin = time.perf_counter()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx-diagnostic
3
- Version: 0.7.10
3
+ Version: 0.7.11
4
4
  Summary: Investigate ONNX models
5
5
  Home-page: https://github.com/sdpython/onnx-diagnostic
6
6
  Author: Xavier Dupré
@@ -1,6 +1,6 @@
1
- onnx_diagnostic/__init__.py,sha256=zdvFsRTL3vL-gFelvSYnpPCXNbg8EPwbC3qQ47KhLbw,174
1
+ onnx_diagnostic/__init__.py,sha256=tyRANqD6rauk6F7FpFJN5K1FyB1baNeni92_ol7nrdU,174
2
2
  onnx_diagnostic/__main__.py,sha256=YmyV_Aq_ianDlHyKLHMa6h8YK3ZmFPpLVHLKjM91aCk,79
3
- onnx_diagnostic/_command_lines_parser.py,sha256=TVPlDjsWZd_Zb9DzN3zj0OGxd8nz_nUsjQyGkmyMNsA,32939
3
+ onnx_diagnostic/_command_lines_parser.py,sha256=EljrcTEKF4TuSdougR3i3FL4_jVDG8xizrLsLIA2JRs,33185
4
4
  onnx_diagnostic/api.py,sha256=BhCl_yCd78N7TlVtPOHjeYv1QBEy39TjZ647rcHqLh0,345
5
5
  onnx_diagnostic/doc.py,sha256=t3RELgfooYnVMAi0JSpggWkQEgUsREz8NmRvn0TnLI8,2829
6
6
  onnx_diagnostic/ext_test_case.py,sha256=emfQGiQSz5FVDhyJ1Acsv_Tast7tWl426TjtpNqxDBU,43558
@@ -12,7 +12,7 @@ onnx_diagnostic/helpers/__init__.py,sha256=GJ2GT7cgnlIveVUwMZhuvUwidbTJaKv8CsSIO
12
12
  onnx_diagnostic/helpers/_log_helper.py,sha256=OTwQH0OIxs9B6nrSvR7MoxMimSw_8mU0mj133NvLk5o,16832
13
13
  onnx_diagnostic/helpers/args_helper.py,sha256=SRWnqC7EENg09RZlA50B_PcdiIhdbgA4C3ACfzl5nMs,4419
14
14
  onnx_diagnostic/helpers/bench_run.py,sha256=CGA6VMJZMH2gDhVueT9ypNm4PMcjGrrGFYp08nhWj9k,16539
15
- onnx_diagnostic/helpers/cache_helper.py,sha256=-2H4hMO5ZIINsaJS7mK9ETgv-kA_d-dlwT1TDp2Yjbo,24754
15
+ onnx_diagnostic/helpers/cache_helper.py,sha256=zxjm0-3lHs0A7wLEejz2r2KPMPjkkva--8511MaSy74,24846
16
16
  onnx_diagnostic/helpers/config_helper.py,sha256=H2mOcMXfrcolFnt8EuqmRFkpQ3YdNRDfvm9ToI1vNH0,5618
17
17
  onnx_diagnostic/helpers/doc_helper.py,sha256=pl5MZd3_FaE8BqQnqoBuSBxoNCFcd2OJd3eITUSku5c,5897
18
18
  onnx_diagnostic/helpers/graph_helper.py,sha256=hevQT5a7_QuriVPQcbT5qe18n99Doyl5h3-qshx1-uk,14093
@@ -72,12 +72,13 @@ onnx_diagnostic/reference/torch_ops/reduce_ops.py,sha256=9gFfraPTQbe_ZEUNCUis1JS
72
72
  onnx_diagnostic/reference/torch_ops/sequence_ops.py,sha256=3EiVKpGfN4d1Iry4hgnr3MIJyEEKUrAIDgmRGsUXXa0,2297
73
73
  onnx_diagnostic/reference/torch_ops/shape_ops.py,sha256=pJrNR2UB4PlWl6cv4EDl1uGl8YTBUUMQkhJcsh5K4sA,4291
74
74
  onnx_diagnostic/reference/torch_ops/unary_ops.py,sha256=dwu6HPr4V_roxu85U3VLTtDLx5bfxKalT_-zlQxZ5wc,1850
75
- onnx_diagnostic/tasks/__init__.py,sha256=uWFP7HIr-VnxmXD5i_QAfXnLXc1HwUq2e8v9cKLqraQ,2492
75
+ onnx_diagnostic/tasks/__init__.py,sha256=KHMH-YONqUQD3tT6N995wyZuF0R4NIZlIH8moumqmRc,2532
76
76
  onnx_diagnostic/tasks/automatic_speech_recognition.py,sha256=umZmjGW1gDUFkqvBJnQyaL7D7-HqiwlQpsq6Ip187Dg,7150
77
77
  onnx_diagnostic/tasks/feature_extraction.py,sha256=Zh9p_Q8FqEO2_aqI0cCiq8OXuM3WUZbwItlLOmLnNl8,5537
78
78
  onnx_diagnostic/tasks/fill_mask.py,sha256=5Gt6zlj0p6vuifox7Wmj-TpHXJvPS0CEH8evgdBHDNA,2640
79
79
  onnx_diagnostic/tasks/image_classification.py,sha256=nLpBBB1Gkog3Fk6pu2waiHcuQr4ILPptc9FhQ-pn460,4682
80
80
  onnx_diagnostic/tasks/image_text_to_text.py,sha256=wkFrUaEvQAW-D-jql2xSnae1XvQBl-sSbhmAmJ76qGo,17428
81
+ onnx_diagnostic/tasks/image_to_video.py,sha256=SoF2cVIJr6P30Abp-FCuixFDh5RvTuNEOL36QthGY6U,3860
81
82
  onnx_diagnostic/tasks/mask_generation.py,sha256=fjdD3rd-O-mFL0hQy3la3JXKth_0bH2HL7Eelq-3Dbs,5057
82
83
  onnx_diagnostic/tasks/mixture_of_expert.py,sha256=al4tk1BrHidtRiHlAaiflWiJaAte0d5M8WcBioANG9k,2808
83
84
  onnx_diagnostic/tasks/object_detection.py,sha256=3FiT8ya5FCd9lwjQCRXhAwXspNwYTlAD3Gpk8aAcG5w,4279
@@ -99,27 +100,27 @@ onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=57x62uZNA80XiWgkG8F
99
100
  onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=OU8-63VDhiWtQV3scBV9JyGXn8ds74OzY2-IOZkwg0A,26580
100
101
  onnx_diagnostic/torch_export_patches/patches/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
101
102
  onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=TFjuw--sTYPCoVEaYlYLJuElx_CUynJR6s6ypoZtRWw,18956
102
- onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=wXopyo0-6KmATOfqXMLEvxpe_jDRRIY8fWRjUjMlSkI,67776
103
+ onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=SsN-y2yoVaY3xRGDaIl0V449LcuwKAGBHPKm2JjQNhc,67942
103
104
  onnx_diagnostic/torch_export_patches/serialization/__init__.py,sha256=BHLdRPtNAtNPAS-bPKEj3-foGSPvwAbZXrHzGGPDLEw,1876
104
105
  onnx_diagnostic/torch_export_patches/serialization/diffusers_impl.py,sha256=drq3EH_yjcSuIWYsVeUWm8Cx6YCZFU6bP_1PLtPfY5I,945
105
106
  onnx_diagnostic/torch_export_patches/serialization/transformers_impl.py,sha256=mcmZGekzQlLgE_o3SdKlRgCx4ewwyyAuNWZ9CaN_zrI,9317
106
107
  onnx_diagnostic/torch_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
107
108
  onnx_diagnostic/torch_models/llms.py,sha256=soyg4yC87ptGoeulJhKqw5opGmuLvH1pn_ZDXZ4Jr8E,90
108
- onnx_diagnostic/torch_models/validate.py,sha256=Qu9gW1AatgpmsWzXN3s-vVCKnKDYTV1wPO3wnUU44wU,67161
109
+ onnx_diagnostic/torch_models/validate.py,sha256=3UJzjH89dpa_pyFoFG_fZ2IwOa25gtC7RxHjKX7c2PQ,70887
109
110
  onnx_diagnostic/torch_models/hghub/__init__.py,sha256=vi1Q7YHdddj1soiBN42MSvJdFqe2_KUoWafHISjwOu8,58
110
- onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=Bvr-sTAhS6s6UCkt-KsY_7Mdai08-AQzvHrzbYCSuvk,13186
111
- onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=jN2Y-96DRmj3hBCQT4ugCT6Q5rKv5y5TUi80G-95Zko,8610
111
+ onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=YYSX3pLsGCTwhMFSu-6ML4Bcy09EWmg1GgXSZ5eCQjA,14515
112
+ onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=8V_pAgACPLPsLRYUododg7MSL6str-T3tBEGY4OaeYQ,8724
112
113
  onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=3yH1pQbCYNDmRMNUCwMFf5ELnAa35ubTKD2JRF5y9Ls,287515
113
- onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=-YX0guKGqj14cc8ZTco3QjCNXXBtf8inzwrsQdvQr6w,12559
114
- onnx_diagnostic/torch_models/hghub/model_specific.py,sha256=ZFajyL9MPZp7N6rveKB0IEAYeNKesbo0ItYoZIz90wc,1540
114
+ onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=qg-_incL_nX9J1bit_nYV5diQN0Zqf7b10ZZfTikbjg,13701
115
+ onnx_diagnostic/torch_models/hghub/model_specific.py,sha256=j50Nu7wddJMoqmD4QzMbNdFDUUgUmSBKRzPDH55TlUQ,2498
115
116
  onnx_diagnostic/torch_models/untrained/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
116
117
  onnx_diagnostic/torch_models/untrained/llm_phi2.py,sha256=JbGZmW41MPJcQgqaJc9R2G00nI79nI-lABN-ffA1lmY,4037
117
118
  onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=QXw_Bs2SzfeiQMf-tmtVl83SmVOL4-Um7Qy-f0E48QI,2507
118
119
  onnx_diagnostic/torch_onnx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
119
120
  onnx_diagnostic/torch_onnx/runtime_info.py,sha256=1g9F_Jf9AAgYQU4stbsrFXwQl-30mWlQrFbQ7val8Ps,9268
120
121
  onnx_diagnostic/torch_onnx/sbs.py,sha256=fN799L_G1c2RKEuNcKt_MnQri5dwD4OzeCkBBFFoUBI,16865
121
- onnx_diagnostic-0.7.10.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
122
- onnx_diagnostic-0.7.10.dist-info/METADATA,sha256=5FswMlBjyOZNZ-pxujgExFBAiJ3rNd9DfPwWL0f9edw,7435
123
- onnx_diagnostic-0.7.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
124
- onnx_diagnostic-0.7.10.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
125
- onnx_diagnostic-0.7.10.dist-info/RECORD,,
122
+ onnx_diagnostic-0.7.11.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
123
+ onnx_diagnostic-0.7.11.dist-info/METADATA,sha256=vfCWZZUvnv_GKZxFRDvpKFrz5JU3LDmBH0WPK6uN__I,7435
124
+ onnx_diagnostic-0.7.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
125
+ onnx_diagnostic-0.7.11.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
126
+ onnx_diagnostic-0.7.11.dist-info/RECORD,,