onnx-diagnostic 0.7.7__py3-none-any.whl → 0.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,5 +3,5 @@ Patches, Investigates onnx models.
3
3
  Functions, classes to dig into a model when this one is right, slow, wrong...
4
4
  """
5
5
 
6
- __version__ = "0.7.7"
6
+ __version__ = "0.7.8"
7
7
  __author__ = "Xavier Dupré"
@@ -96,13 +96,16 @@ def flatten_unflatten_for_dynamic_shapes(
96
96
  return tuple(subtrees)
97
97
  if spec.type is list:
98
98
  return list(subtrees)
99
+ if spec.type is None and not subtrees:
100
+ return None
99
101
  if spec.context:
100
102
  # This is a custom class with attributes.
101
103
  # It is returned as a list.
102
104
  return list(subtrees)
103
105
  raise ValueError(
104
106
  f"Unable to interpret spec type {spec.type} "
105
- f"(type is {type(spec.type)}, context is {spec.context})."
107
+ f"(type is {type(spec.type)}, context is {spec.context}), "
108
+ f"spec={spec}, subtrees={subtrees}"
106
109
  )
107
110
  # This is a list.
108
111
  return subtrees
@@ -119,4 +119,51 @@ def default_num_hidden_layers():
119
119
  It is lower when the unit tests are running
120
120
  when ``UNITTEST_GOING=1``.
121
121
  """
122
+ import torch
123
+
124
+ if torch.cuda.is_available():
125
+ capa = torch.cuda.get_device_capability(0)
126
+ if capa[0] < 9:
127
+ return 2
122
128
  return 2 if os.environ.get("UNITTEST_GOING", "0") == "1" else 4
129
+
130
+
131
+ def build_diff_config(config0, config1):
132
+ """
133
+ Returns all the modified values between two configuration
134
+ """
135
+ import torch
136
+
137
+ diff = {}
138
+ for k in config0:
139
+ assert isinstance(k, str), f"k={k!r}, wrong type in {config0}"
140
+ if k not in config1:
141
+ v0 = getattr(config0, k) if hasattr(config0, k) else config0[k]
142
+ diff[k] = f"-{v0}"
143
+ for k in config1:
144
+ assert isinstance(k, str), f"k={k!r}, wrong type in {config1}"
145
+ if k not in config0:
146
+ v1 = getattr(config1, k) if hasattr(config1, k) else config1[k]
147
+ diff[k] = f"+{v1}"
148
+ for k in config0:
149
+ if k not in config1:
150
+ continue
151
+ v0 = getattr(config0, k) if hasattr(config0, k) else config0[k]
152
+ v1 = getattr(config1, k) if hasattr(config1, k) else config1[k]
153
+ if (
154
+ v0 is None
155
+ or v1 is None
156
+ or isinstance(v1, (float, int, bool, str, list, tuple, torch.dtype))
157
+ or (
158
+ isinstance(v0, dict)
159
+ and isinstance(v1, dict)
160
+ and all(isinstance(k, int) for k in v1)
161
+ )
162
+ ):
163
+ if v1 != v0:
164
+ diff[k] = f"{v0} -> {v1}"
165
+ else:
166
+ d = build_diff_config(v0, v1)
167
+ if d:
168
+ diff[k] = d
169
+ return diff
@@ -23,14 +23,20 @@ def reduce_model_config(config: Any) -> Dict[str, Any]:
23
23
  config.vision_config.num_hidden_layers = min(
24
24
  config.vision_config.num_hidden_layers, 2
25
25
  )
26
+ if hasattr(config.vision_config, "num_heads"):
27
+ config.vision_config.num_heads = min(config.vision_config.num_heads, 4)
26
28
  if hasattr(config.vision_config, "image_size"):
27
- config.vision_config.image_size = min(config.vision_config.image_size, 96)
29
+ config.vision_config.image_size = min(config.vision_config.image_size, 168 // 2)
28
30
  if hasattr(config.vision_config, "intermediate_size"):
29
31
  config.vision_config.intermediate_size = min(
30
32
  config.vision_config.intermediate_size, 1076
31
33
  )
32
34
  if hasattr(config.vision_config, "patch_size"):
33
- config.vision_config.patch_size = min(config.vision_config.patch_size, 2)
35
+ config.vision_config.patch_size = min(config.vision_config.patch_size, 1)
36
+ if hasattr(config.vision_config, "temporal_patch_size"):
37
+ config.vision_config.temporal_patch_size = min(
38
+ config.vision_config.temporal_patch_size, 8
39
+ )
34
40
  if hasattr(config.vision_config, "hidden_size"):
35
41
  config.vision_config.hidden_size = min(config.vision_config.hidden_size, 16)
36
42
  if hasattr(config, "text_config"):
@@ -245,6 +251,7 @@ def get_inputs(
245
251
  else {0: batch_img}
246
252
  ),
247
253
  "image_attention_mask": {0: batch, 1: seq_length, 2: images},
254
+ "image_grid_thw": {0: batch},
248
255
  "use_cache": None,
249
256
  }
250
257
 
@@ -256,6 +263,11 @@ def get_inputs(
256
263
  # input_ids[input_ids == image_token_index] = pad_token_id
257
264
  token_type_ids = torch.zeros_like(input_ids)
258
265
  token_type_ids[input_ids == image_token_index] = 1
266
+ image_grid_thw = torch.zeros((n_images, 3), dtype=torch.int64)
267
+ image_grid_thw[:, 1] = height
268
+ image_grid_thw[:, 2] = width
269
+ image_grid_thw[0, :] //= 2
270
+ image_grid_thw[:, 0] = torch.arange(n_images, dtype=image_grid_thw.dtype)
259
271
 
260
272
  inputs = dict(
261
273
  input_ids=input_ids,
@@ -291,6 +303,7 @@ def get_inputs(
291
303
  torch.int64
292
304
  ),
293
305
  token_type_ids=token_type_ids,
306
+ image_grid_thw=image_grid_thw,
294
307
  use_cache=True, # Gemma3 does not set this value to true when a cache is provided
295
308
  )
296
309
  res = dict(inputs=inputs, dynamic_shapes=shapes)
@@ -1032,7 +1032,8 @@ def patched_modeling_marian_eager_attention_forward(
1032
1032
 
1033
1033
 
1034
1034
  class common_RotaryEmbedding(torch.nn.Module):
1035
- @torch.no_grad()
1035
+ # This may cause some issues.
1036
+ # @torch.no_grad()
1036
1037
  @patched_dynamic_rope_update
1037
1038
  def forward(self, x, position_ids):
1038
1039
  inv_freq_expanded = (
@@ -1482,3 +1483,109 @@ class patched_VisionAttention(torch.nn.Module):
1482
1483
  attn_output = attn_output.reshape(seq_length, -1)
1483
1484
  attn_output = self.proj(attn_output)
1484
1485
  return attn_output
1486
+
1487
+
1488
+ try:
1489
+ import transformers.models.qwen3_moe
1490
+
1491
+ patch_qwen3 = True
1492
+ except ImportError:
1493
+ patch_qwen3 = False
1494
+
1495
+ if patch_qwen3:
1496
+
1497
+ class patched_Qwen3MoeSparseMoeBlock(torch.nn.Module):
1498
+ _PATCHES_ = ["forward", "_forward_expert_loop"]
1499
+ _PATCHED_CLASS_ = (
1500
+ transformers.models.qwen3_moe.modeling_qwen3_moe.Qwen3MoeSparseMoeBlock
1501
+ )
1502
+
1503
+ def _forward_expert_loop(
1504
+ self,
1505
+ final_hidden_states,
1506
+ expert_mask_idx,
1507
+ hidden_states,
1508
+ routing_weights,
1509
+ expert_idx: int,
1510
+ ):
1511
+ # idx, top_x = torch.where(expert_mask_idx.squeeze(0))
1512
+ idx, top_x = torch.nonzero(expert_mask_idx, as_tuple=True)
1513
+ hidden_dim = hidden_states.shape[-1]
1514
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
1515
+ expert_current_state = self.experts[expert_idx](current_state)
1516
+ current_hidden_states = expert_current_state * routing_weights[top_x, idx, None]
1517
+ return final_hidden_states.index_add(
1518
+ 0, top_x, current_hidden_states.to(hidden_states.dtype)
1519
+ )
1520
+
1521
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
1522
+ """ """
1523
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
1524
+ hidden_states = hidden_states.view(-1, hidden_dim)
1525
+ # router_logits: (batch * sequence_length, n_experts)
1526
+ router_logits = self.gate(hidden_states)
1527
+
1528
+ routing_weights = torch.nn.functional.softmax(
1529
+ router_logits, dim=1, dtype=torch.float
1530
+ )
1531
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
1532
+ if self.norm_topk_prob: # only diff with mixtral sparse moe block!
1533
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
1534
+ # we cast back to the input dtype
1535
+ routing_weights = routing_weights.to(hidden_states.dtype)
1536
+
1537
+ final_hidden_states = torch.zeros(
1538
+ (batch_size * sequence_length, hidden_dim),
1539
+ dtype=hidden_states.dtype,
1540
+ device=hidden_states.device,
1541
+ )
1542
+
1543
+ # One hot encode the selected experts to create an expert mask
1544
+ # this will be used to easily index which expert is going to be sollicitated
1545
+ expert_mask = torch.nn.functional.one_hot(
1546
+ selected_experts, num_classes=self.num_experts
1547
+ ).permute(2, 1, 0)
1548
+
1549
+ # Loop over all available experts in the model
1550
+ # and perform the computation on each expert
1551
+ expert_sum = expert_mask.sum(dim=(-1, -2))
1552
+ # expert_hit = torch.greater(expert_sum, 0).nonzero()
1553
+ # for expert_idx in expert_hit:
1554
+ for expert_idx in range(self.num_experts):
1555
+ # initial code has a squeeze but it is not possible to do that.
1556
+ # expert_mask_idx = expert_mask[expert_idx].squeeze(0)
1557
+ expert_mask_idx = expert_mask[expert_idx]
1558
+ final_hidden_states = torch.cond(
1559
+ (expert_sum[expert_idx] > 0).item(),
1560
+ lambda final_hidden_states, expert_mask, hidden_states, routing_weights, _i=expert_idx: self._forward_expert_loop( # noqa: E501
1561
+ final_hidden_states,
1562
+ expert_mask,
1563
+ hidden_states,
1564
+ routing_weights,
1565
+ expert_idx=_i,
1566
+ ),
1567
+ lambda final_hidden_states, *args: final_hidden_states.clone(),
1568
+ [final_hidden_states, expert_mask_idx, hidden_states, routing_weights],
1569
+ )
1570
+
1571
+ # if expert_sum[expert_idx] > 0:
1572
+ # idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
1573
+
1574
+ # Index the correct hidden states and compute the expert hidden state for
1575
+ # the current expert. We need to make sure to multiply the output hidden
1576
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
1577
+ # current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
1578
+ # current_hidden_states = (
1579
+ # expert_layer(current_state) * routing_weights[top_x, idx, None]
1580
+ # )
1581
+
1582
+ # However `index_add_` only support torch tensors for indexing so we'll use
1583
+ # the `top_x` tensor here.
1584
+ # final_hidden_states.index_add_(
1585
+ # 0, top_x, current_hidden_states.to(hidden_states.dtype)
1586
+ # )
1587
+
1588
+ final_hidden_states = final_hidden_states.reshape(
1589
+ batch_size, sequence_length, hidden_dim
1590
+ )
1591
+ return final_hidden_states, router_logits
@@ -4562,7 +4562,7 @@ def _ccached_diffusers_tiny_torch_full_checker_unet():
4562
4562
  }
4563
4563
 
4564
4564
 
4565
- def _ccached_riny_random_gemma_3():
4565
+ def _ccached_tiny_random_gemma_3():
4566
4566
  "tiny-random/gemma-3"
4567
4567
  return transformers.Gemma3Config(
4568
4568
  **{
@@ -4618,3 +4618,72 @@ def _ccached_riny_random_gemma_3():
4618
4618
  },
4619
4619
  }
4620
4620
  )
4621
+
4622
+
4623
+ def _ccached_zai_glm_45():
4624
+ "zai-org/GLM-4.5V"
4625
+ return transformers.Glm4vMoeConfig(
4626
+ **{
4627
+ "architectures": ["Glm4vMoeForConditionalGeneration"],
4628
+ "model_type": "glm4v_moe",
4629
+ "text_config": {
4630
+ "pad_token_id": 151329,
4631
+ "vocab_size": 151552,
4632
+ "eos_token_id": [151329, 151336, 151338],
4633
+ "image_end_token_id": 151340,
4634
+ "image_start_token_id": 151339,
4635
+ "image_token_id": 151363,
4636
+ "head_dim": 128,
4637
+ "attention_bias": true,
4638
+ "attention_dropout": 0.0,
4639
+ "first_k_dense_replace": 1,
4640
+ "hidden_act": "silu",
4641
+ "hidden_size": 4096,
4642
+ "initializer_range": 0.02,
4643
+ "intermediate_size": 10944,
4644
+ "max_position_embeddings": 65536,
4645
+ "model_type": "glm4v_moe_text",
4646
+ "moe_intermediate_size": 1408,
4647
+ "n_group": 1,
4648
+ "n_routed_experts": 128,
4649
+ "n_shared_experts": 1,
4650
+ "norm_topk_prob": true,
4651
+ "num_attention_heads": 96,
4652
+ "num_experts_per_tok": 8,
4653
+ "num_hidden_layers": 46,
4654
+ "num_key_value_heads": 8,
4655
+ "partial_rotary_factor": 0.5,
4656
+ "rms_norm_eps": 1e-05,
4657
+ "torch_dtype": "bfloat16",
4658
+ "rope_scaling": {"rope_type": "default", "mrope_section": [8, 12, 12]},
4659
+ "rope_theta": 10000.0,
4660
+ "routed_scaling_factor": 1.0,
4661
+ "topk_group": 1,
4662
+ "use_cache": true,
4663
+ "use_qk_norm": false,
4664
+ },
4665
+ "torch_dtype": "bfloat16",
4666
+ "transformers_version": "4.55.0.dev0",
4667
+ "video_end_token_id": 151342,
4668
+ "video_start_token_id": 151341,
4669
+ "video_token_id": 151364,
4670
+ "vision_config": {
4671
+ "attention_bias": false,
4672
+ "attention_dropout": 0.0,
4673
+ "depth": 24,
4674
+ "hidden_act": "silu",
4675
+ "hidden_size": 1536,
4676
+ "image_size": 336,
4677
+ "in_channels": 3,
4678
+ "initializer_range": 0.02,
4679
+ "intermediate_size": 10944,
4680
+ "model_type": "glm4v_moe",
4681
+ "num_heads": 12,
4682
+ "out_hidden_size": 4096,
4683
+ "patch_size": 14,
4684
+ "rms_norm_eps": 1e-05,
4685
+ "spatial_merge_size": 2,
4686
+ "temporal_patch_size": 2,
4687
+ },
4688
+ }
4689
+ )
@@ -1,10 +1,11 @@
1
+ import copy
1
2
  import inspect
2
3
  import os
3
4
  import pprint
4
5
  from typing import Any, Dict, Optional, Tuple
5
6
  import torch
6
7
  import transformers
7
- from ...helpers.config_helper import update_config
8
+ from ...helpers.config_helper import update_config, build_diff_config
8
9
  from ...tasks import reduce_model_config, random_input_kwargs
9
10
  from .hub_api import task_from_arch, task_from_id, get_pretrained_config, download_code_modelid
10
11
 
@@ -121,6 +122,7 @@ def get_untrained_model_with_inputs(
121
122
  )
122
123
 
123
124
  # updating the configuration
125
+ config0 = copy.deepcopy(config)
124
126
  mkwargs = reduce_model_config(config, task) if not same_as_pretrained else {}
125
127
  if model_kwargs:
126
128
  for k, v in model_kwargs.items():
@@ -133,6 +135,15 @@ def get_untrained_model_with_inputs(
133
135
  mkwargs[k] = v
134
136
  if mkwargs:
135
137
  update_config(config, mkwargs)
138
+ try:
139
+ diff_config = build_diff_config(config0, config)
140
+ except (ValueError, AttributeError, TypeError) as e:
141
+ diff_config = f"DIFF CONFIG ERROR {e}"
142
+ if verbose:
143
+ if diff_config:
144
+ print("[get_untrained_model_with_inputs] -- updated config")
145
+ pprint.pprint(diff_config)
146
+ print("[get_untrained_model_with_inputs] --")
136
147
 
137
148
  # SDPA
138
149
  if model_kwargs and "attn_implementation" in model_kwargs:
@@ -232,6 +243,7 @@ def get_untrained_model_with_inputs(
232
243
 
233
244
  res["input_kwargs"] = kwargs
234
245
  res["model_kwargs"] = mkwargs
246
+ res["dump_info"] = dict(config_diff=diff_config)
235
247
 
236
248
  sizes = compute_model_size(model)
237
249
  res["model"] = model
@@ -1,6 +1,7 @@
1
1
  import datetime
2
2
  import inspect
3
3
  import os
4
+ import pprint
4
5
  import sys
5
6
  from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6
7
  import time
@@ -467,6 +468,21 @@ def validate_model(
467
468
  f"inputs2 is True but second set is missing in data for "
468
469
  f"model id {model_id!r}: {sorted(data)}"
469
470
  )
471
+ if dump_folder:
472
+ with open(os.path.join(dump_folder, "model_config.txt"), "w") as f:
473
+ f.write(f"model_id: {model_id}\n------\n")
474
+ f.write(
475
+ pprint.pformat(
476
+ data["configuration"]
477
+ if type(data["configuration"]) is dict
478
+ else data["configuration"].to_dict()
479
+ )
480
+ )
481
+ dump_info = data.get("dump_info", None)
482
+ if dump_info:
483
+ with open(os.path.join(dump_folder, "model_dump_info.txt"), "w") as f:
484
+ f.write(f"model_id: {model_id}\n------\n")
485
+ f.write(pprint.pformat(dump_info))
470
486
 
471
487
  if exporter == "modelbuilder":
472
488
  # Models used with ModelBuilder do not like batch size > 1.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx-diagnostic
3
- Version: 0.7.7
3
+ Version: 0.7.8
4
4
  Summary: Investigate ONNX models
5
5
  Home-page: https://github.com/sdpython/onnx-diagnostic
6
6
  Author: Xavier Dupré
@@ -1,4 +1,4 @@
1
- onnx_diagnostic/__init__.py,sha256=JdBGK1KI9-G1MdBmNPIE7pLfaEHkMhgCq6h_YPJEYdw,173
1
+ onnx_diagnostic/__init__.py,sha256=atIZhOUaWoHQQ1pUnGu6Qn-uTq_KocBvIG5yInBKI04,173
2
2
  onnx_diagnostic/__main__.py,sha256=YmyV_Aq_ianDlHyKLHMa6h8YK3ZmFPpLVHLKjM91aCk,79
3
3
  onnx_diagnostic/_command_lines_parser.py,sha256=8JlT1vzyGztkJT2v6lpQx5itLKY4FYlpFng3z8n3TAU,32937
4
4
  onnx_diagnostic/api.py,sha256=BhCl_yCd78N7TlVtPOHjeYv1QBEy39TjZ647rcHqLh0,345
@@ -12,8 +12,8 @@ onnx_diagnostic/helpers/__init__.py,sha256=GJ2GT7cgnlIveVUwMZhuvUwidbTJaKv8CsSIO
12
12
  onnx_diagnostic/helpers/_log_helper.py,sha256=zZ7AqGpiF3O2-9N8fLuPeB5VfJpbg3tQ20ccDRdQPVE,16753
13
13
  onnx_diagnostic/helpers/args_helper.py,sha256=SRWnqC7EENg09RZlA50B_PcdiIhdbgA4C3ACfzl5nMs,4419
14
14
  onnx_diagnostic/helpers/bench_run.py,sha256=CGA6VMJZMH2gDhVueT9ypNm4PMcjGrrGFYp08nhWj9k,16539
15
- onnx_diagnostic/helpers/cache_helper.py,sha256=sd9hnOW8uCU3yqvIB8tnxANRYEl1V_Ej8WDZMMI9VR8,24566
16
- onnx_diagnostic/helpers/config_helper.py,sha256=ZrwdQwG3atXzto1VLUzCVOzyBIWkih_EGc3qKHuluZw,4139
15
+ onnx_diagnostic/helpers/cache_helper.py,sha256=dFiKPnD3qT_rel9C7Az9AEnbV2drfSMSdXBRotJJUU4,24686
16
+ onnx_diagnostic/helpers/config_helper.py,sha256=H2mOcMXfrcolFnt8EuqmRFkpQ3YdNRDfvm9ToI1vNH0,5618
17
17
  onnx_diagnostic/helpers/doc_helper.py,sha256=pl5MZd3_FaE8BqQnqoBuSBxoNCFcd2OJd3eITUSku5c,5897
18
18
  onnx_diagnostic/helpers/graph_helper.py,sha256=hevQT5a7_QuriVPQcbT5qe18n99Doyl5h3-qshx1-uk,14093
19
19
  onnx_diagnostic/helpers/helper.py,sha256=OsQz2um10DgGiX3fgOulTDFQop0wCMX6shPonQgN71w,62940
@@ -77,7 +77,7 @@ onnx_diagnostic/tasks/automatic_speech_recognition.py,sha256=tguoQO77okXo8vcJrN2
77
77
  onnx_diagnostic/tasks/feature_extraction.py,sha256=pcFON5uGKoykjg52bMsvpYG7KJvXd8JDC43rAjXIzB0,5572
78
78
  onnx_diagnostic/tasks/fill_mask.py,sha256=Z0OyDs3pcnjJLzZBbS52d6pa6jh6m2Uy8-h3nF5wbDA,2675
79
79
  onnx_diagnostic/tasks/image_classification.py,sha256=nLpBBB1Gkog3Fk6pu2waiHcuQr4ILPptc9FhQ-pn460,4682
80
- onnx_diagnostic/tasks/image_text_to_text.py,sha256=syi7MzOKnHlWfqsfQsbmY6zcwtj8flg5_jVqodmIKcY,16806
80
+ onnx_diagnostic/tasks/image_text_to_text.py,sha256=XlikpvAdB2q4sQ9U17JZWLUZ77_rGsbICM-xxcgqfQc,17498
81
81
  onnx_diagnostic/tasks/mask_generation.py,sha256=fjdD3rd-O-mFL0hQy3la3JXKth_0bH2HL7Eelq-3Dbs,5057
82
82
  onnx_diagnostic/tasks/mixture_of_expert.py,sha256=al4tk1BrHidtRiHlAaiflWiJaAte0d5M8WcBioANG9k,2808
83
83
  onnx_diagnostic/tasks/object_detection.py,sha256=3FiT8ya5FCd9lwjQCRXhAwXspNwYTlAD3Gpk8aAcG5w,4279
@@ -99,26 +99,26 @@ onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=57x62uZNA80XiWgkG8F
99
99
  onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=DTvdHPtNQh25Akv5o3D4Jxf1L1-SJ7w14tgvj8AAns8,26577
100
100
  onnx_diagnostic/torch_export_patches/patches/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
101
101
  onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=g1UjL6A6iB7Qh2Cs1efuKk5377IvsSnZXUk3jNeRu_E,18830
102
- onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=3pAm3cPDq_HxpKCJNLLyrO53ZAnByjg7uKzpUPhz7nc,61378
102
+ onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=tcDNJzOIivyOM6XbTm4munHKHAmVrOKE6nbqIdl-4dg,66290
103
103
  onnx_diagnostic/torch_export_patches/serialization/__init__.py,sha256=BHLdRPtNAtNPAS-bPKEj3-foGSPvwAbZXrHzGGPDLEw,1876
104
104
  onnx_diagnostic/torch_export_patches/serialization/diffusers_impl.py,sha256=drq3EH_yjcSuIWYsVeUWm8Cx6YCZFU6bP_1PLtPfY5I,945
105
105
  onnx_diagnostic/torch_export_patches/serialization/transformers_impl.py,sha256=dAKi4zujlBxDvxvaVI_qH4qW9AlpVFMtCkvGTNCJCUY,9353
106
106
  onnx_diagnostic/torch_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
107
107
  onnx_diagnostic/torch_models/llms.py,sha256=soyg4yC87ptGoeulJhKqw5opGmuLvH1pn_ZDXZ4Jr8E,90
108
- onnx_diagnostic/torch_models/validate.py,sha256=hMe4fbRDubSLfWc3XguRl_fjqQBa7-2zVKxhge9iaqc,64917
108
+ onnx_diagnostic/torch_models/validate.py,sha256=fFDe68M5-1W67UEkLLFqKbaUNjN1qyehyBMxrlZTs90,65588
109
109
  onnx_diagnostic/torch_models/hghub/__init__.py,sha256=vi1Q7YHdddj1soiBN42MSvJdFqe2_KUoWafHISjwOu8,58
110
110
  onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=Bvr-sTAhS6s6UCkt-KsY_7Mdai08-AQzvHrzbYCSuvk,13186
111
111
  onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=W05mciqUqhaYEfYNHtUeuwOMOZoQTuDidRLEIx4z1CE,8523
112
- onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=XZ_PsMUmMooJw5pBqEtDMOWbbLYxDcJdRWf-FNz2cYg,279674
113
- onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=SDRLCA2zivEHIKr2RRRP-dZNiNUcpYS3EgP0unLExxY,11046
112
+ onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=mboN04WTZMPgfw_JOP01aINWjmq6qmOKQhDE28Fc_zY,282283
113
+ onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=h6Pi0dkUFXpDGudJ5mQQ9NSQCOjpF6Pm-J6_shsWiH4,11546
114
114
  onnx_diagnostic/torch_models/untrained/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
115
115
  onnx_diagnostic/torch_models/untrained/llm_phi2.py,sha256=ynBTDHJHCk44NjLT_t6OiFDBdPP0rFGPteiONDxvztw,3708
116
116
  onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=QXw_Bs2SzfeiQMf-tmtVl83SmVOL4-Um7Qy-f0E48QI,2507
117
117
  onnx_diagnostic/torch_onnx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
118
118
  onnx_diagnostic/torch_onnx/runtime_info.py,sha256=1g9F_Jf9AAgYQU4stbsrFXwQl-30mWlQrFbQ7val8Ps,9268
119
119
  onnx_diagnostic/torch_onnx/sbs.py,sha256=1EL25DeYFzlBSiFG_XjePBLvsiItRXbdDrr5-QZW2mA,16878
120
- onnx_diagnostic-0.7.7.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
121
- onnx_diagnostic-0.7.7.dist-info/METADATA,sha256=zus-hHQ_QtzpPCf--m_YA8MnWvfb8rmwbCGEy-ZlLro,7431
122
- onnx_diagnostic-0.7.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
123
- onnx_diagnostic-0.7.7.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
124
- onnx_diagnostic-0.7.7.dist-info/RECORD,,
120
+ onnx_diagnostic-0.7.8.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
121
+ onnx_diagnostic-0.7.8.dist-info/METADATA,sha256=6szG1djw7CRRO3FOrgBWGW3hZ3hNL6zwIOlcWrmtl9k,7431
122
+ onnx_diagnostic-0.7.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
123
+ onnx_diagnostic-0.7.8.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
124
+ onnx_diagnostic-0.7.8.dist-info/RECORD,,