onnx-diagnostic 0.6.2__py3-none-any.whl → 0.6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,5 +3,5 @@ Patches, Investigates onnx models.
3
3
  Functions, classes to dig into a model when this one is right, slow, wrong...
4
4
  """
5
5
 
6
- __version__ = "0.6.2"
6
+ __version__ = "0.6.3"
7
7
  __author__ = "Xavier Dupré"
@@ -191,24 +191,45 @@ def get_parser_find() -> ArgumentParser:
191
191
  "--names",
192
192
  type=str,
193
193
  required=False,
194
- help="names to look at comma separated values",
194
+ help="names to look at comma separated values, if 'SHADOW', "
195
+ "search for shadowing names",
195
196
  )
196
197
  parser.add_argument(
197
198
  "-v",
198
199
  "--verbose",
199
200
  default=0,
201
+ type=int,
200
202
  required=False,
201
203
  help="verbosity",
202
204
  )
205
+ parser.add_argument(
206
+ "--v2",
207
+ default=False,
208
+ action=BooleanOptionalAction,
209
+ help="use enumerate_results instead of onnx_find",
210
+ )
203
211
  return parser
204
212
 
205
213
 
206
214
  def _cmd_find(argv: List[Any]):
207
- from .helpers.onnx_helper import onnx_find
215
+ from .helpers.onnx_helper import onnx_find, enumerate_results, shadowing_names
208
216
 
209
217
  parser = get_parser_find()
210
218
  args = parser.parse_args(argv[1:])
211
- onnx_find(args.input, verbose=args.verbose, watch=set(args.names.split(",")))
219
+ if args.names == "SHADOW":
220
+ onx = onnx.load(args.input, load_external_data=False)
221
+ s, ps = shadowing_names(onx)[:2]
222
+ print(f"shadowing names: {s}")
223
+ print(f"post-shadowing names: {ps}")
224
+ elif args.v2:
225
+ onx = onnx.load(args.input, load_external_data=False)
226
+ res = list(
227
+ enumerate_results(onx, name=set(args.names.split(",")), verbose=args.verbose)
228
+ )
229
+ if not args.verbose:
230
+ print("\n".join(map(str, res)))
231
+ else:
232
+ onnx_find(args.input, verbose=args.verbose, watch=set(args.names.split(",")))
212
233
 
213
234
 
214
235
  def get_parser_config() -> ArgumentParser:
onnx_diagnostic/doc.py CHANGED
@@ -1,3 +1,7 @@
1
+ from typing import Optional
2
+ import numpy as np
3
+
4
+
1
5
  def reset_torch_transformers(gallery_conf, fname):
2
6
  "Resets torch dynamo for :epkg:`sphinx-gallery`."
3
7
  import matplotlib.pyplot as plt
@@ -30,3 +34,45 @@ def plot_legend(
30
34
  ax.grid(False)
31
35
  ax.set_axis_off()
32
36
  return ax
37
+
38
+
39
+ def rotate_align(ax, angle=15, align="right"):
40
+ """Rotates x-label and align them to thr right. Returns ax."""
41
+ for label in ax.get_xticklabels():
42
+ label.set_rotation(angle)
43
+ label.set_horizontalalignment(align)
44
+ return ax
45
+
46
+
47
+ def save_fig(ax, name: str):
48
+ """Applies ``tight_layout`` and saves the figures. Returns ax."""
49
+ import matplotlib.pyplot as plt
50
+
51
+ plt.tight_layout()
52
+ fig = ax.get_figure()
53
+ fig.savefig(name)
54
+ return ax
55
+
56
+
57
+ def title(ax: "plt.axes", title: str) -> "plt.axes": # noqa: F821
58
+ "Adds a title to axes and returns them."
59
+ ax.set_title(title)
60
+ return ax
61
+
62
+
63
+ def plot_histogram(
64
+ tensor: np.ndarray,
65
+ ax: Optional["plt.axes"] = None, # noqa: F821
66
+ bins: int = 30,
67
+ color: str = "orange",
68
+ alpha: float = 0.7,
69
+ ) -> "plt.axes": # noqa: F821
70
+ "Computes the distribution for a tensor."
71
+ if ax is None:
72
+ import matplotlib.pyplot as plt
73
+
74
+ ax = plt.gca()
75
+ ax.cla()
76
+ ax.hist(tensor, bins=30, color="orange", alpha=0.7)
77
+ ax.set_yscale("log")
78
+ return ax
@@ -1,4 +1,5 @@
1
- from typing import Dict, Optional, Tuple
1
+ import os
2
+ from typing import Dict, List, Optional, Tuple
2
3
  import onnx
3
4
  import onnx.helper as oh
4
5
  import torch
@@ -6,6 +7,17 @@ from ..reference.torch_ops import OpRunKernel, OpRunTensor
6
7
  from .torch_helper import onnx_dtype_to_torch_dtype, torch_dtype_to_onnx_dtype
7
8
  from .ort_session import InferenceSessionForTorch
8
9
 
10
+ _SAVED: List[str] = []
11
+ _SAVE_OPTIMIZED_MODEL_ = int(os.environ.get("DUMP_ONNX", "0"))
12
+
13
+
14
+ def _get_model_name(op_name: str, provider: str) -> Optional[str]:
15
+ if _SAVE_OPTIMIZED_MODEL_:
16
+ name = f"dump_doc_layer_norm_{provider}_{len(_SAVED)}.onnx"
17
+ _SAVED.append(name)
18
+ return name
19
+ return None
20
+
9
21
 
10
22
  class LayerNormalizationOrt(OpRunKernel):
11
23
  "LayerNormalization with onnxruntime"
@@ -13,14 +25,14 @@ class LayerNormalizationOrt(OpRunKernel):
13
25
  @classmethod
14
26
  def device_dependent(cls) -> bool:
15
27
  "Needs device."
16
- return False
28
+ return True
17
29
 
18
30
  def __init__(
19
31
  self,
20
32
  node: onnx.NodeProto,
21
33
  version=None,
22
34
  device: Optional[torch.device] = None,
23
- verbose=0,
35
+ verbose: int = 0,
24
36
  ):
25
37
  super().__init__(node, version, verbose=verbose)
26
38
  self.axis = self.get_attribute_int(node, "axis", -1)
@@ -70,7 +82,11 @@ class LayerNormalizationOrt(OpRunKernel):
70
82
  )
71
83
  provider = "CPUExecutionProvider" if self.is_cpu else "CUDAExecutionProvider"
72
84
  self._provider = provider
73
- return InferenceSessionForTorch(layer_model, providers=[provider])
85
+ return InferenceSessionForTorch(
86
+ layer_model,
87
+ optimized_model_filepath=_get_model_name("layer_norm", provider),
88
+ providers=[provider],
89
+ )
74
90
 
75
91
  def run(self, x, scale, bias=None):
76
92
  itype = torch_dtype_to_onnx_dtype(x.dtype)
@@ -94,14 +110,14 @@ class MatMulOrt(OpRunKernel):
94
110
  @classmethod
95
111
  def device_dependent(cls) -> bool:
96
112
  "Needs device."
97
- return False
113
+ return True
98
114
 
99
115
  def __init__(
100
116
  self,
101
117
  node: onnx.NodeProto,
102
118
  version=None,
103
119
  device: Optional[torch.device] = None,
104
- verbose=0,
120
+ verbose: int = 0,
105
121
  ):
106
122
  super().__init__(node, version, verbose=verbose)
107
123
  self.device = device
@@ -127,7 +143,11 @@ class MatMulOrt(OpRunKernel):
127
143
  )
128
144
  provider = "CPUExecutionProvider" if self.is_cpu else "CUDAExecutionProvider"
129
145
  self._provider = provider
130
- return InferenceSessionForTorch(model, providers=[provider])
146
+ return InferenceSessionForTorch(
147
+ model,
148
+ optimized_model_filepath=_get_model_name("matmul", provider),
149
+ providers=[provider],
150
+ )
131
151
 
132
152
  def run(self, a, b):
133
153
  itype = torch_dtype_to_onnx_dtype(a.dtype)
@@ -220,6 +220,9 @@ def create_model_builder(
220
220
  """
221
221
  assert cache_dir, "create_model_builder does not work without cache_dir."
222
222
  assert os.path.exists(cache_dir), f"cache_dir={cache_dir!r} does not exists"
223
+ precision = {"float32": "fp32", "float16": "fp16", "bfloat16": "bfp16"}.get(
224
+ precision, precision
225
+ )
223
226
  download_model_builder_to_cache()
224
227
  builder = import_model_builder()
225
228
  io_dtype = builder.set_io_dtype(precision, execution_provider, extra_options)
@@ -316,7 +316,7 @@ def check_model_ort(
316
316
 
317
317
 
318
318
  @functools.cache
319
- def onnx_dtype_name(itype: int) -> str:
319
+ def onnx_dtype_name(itype: int, exc: bool = True) -> str:
320
320
  """
321
321
  Returns the ONNX name for a specific element type.
322
322
 
@@ -335,7 +335,11 @@ def onnx_dtype_name(itype: int) -> str:
335
335
  v = getattr(TensorProto, k)
336
336
  if v == itype:
337
337
  return k
338
- raise ValueError(f"Unexpected value itype: {itype}")
338
+ if exc:
339
+ raise ValueError(f"Unexpected value itype: {itype}")
340
+ if itype == 0:
341
+ return "UNDEFINED"
342
+ return "UNEXPECTED"
339
343
 
340
344
 
341
345
  def pretty_onnx(
@@ -365,7 +369,7 @@ def pretty_onnx(
365
369
  itype = onx.type.tensor_type.elem_type
366
370
  shape = tuple((d.dim_param or d.dim_value) for d in onx.type.tensor_type.shape.dim)
367
371
  shape_str = ",".join(map(str, shape))
368
- return f"{onnx_dtype_name(itype)}[{shape_str}] {name}"
372
+ return f"{onnx_dtype_name(itype, exc=False)}[{shape_str}] {name}"
369
373
 
370
374
  if isinstance(onx, AttributeProto):
371
375
  att = onx
@@ -767,7 +771,7 @@ def tensor_dtype_to_np_dtype(tensor_dtype: int) -> np.dtype:
767
771
 
768
772
 
769
773
  def iterator_initializer_constant(
770
- model: Union[onnx.FunctionProto, onnx.GraphProto, onnx.ModelProto],
774
+ model: Union[FunctionProto, GraphProto, ModelProto],
771
775
  use_numpy: bool = True,
772
776
  prefix: str = "",
773
777
  ) -> Iterator[Tuple[str, Union["torch.Tensor", np.ndarray]]]: # noqa: F821
@@ -779,8 +783,8 @@ def iterator_initializer_constant(
779
783
  :param prefix: for subgraph
780
784
  :return: iterator
781
785
  """
782
- if not isinstance(model, onnx.FunctionProto):
783
- graph = model if isinstance(model, onnx.GraphProto) else model.graph
786
+ if not isinstance(model, FunctionProto):
787
+ graph = model if isinstance(model, GraphProto) else model.graph
784
788
  if not use_numpy:
785
789
  from .torch_helper import to_tensor
786
790
  if prefix:
@@ -791,7 +795,7 @@ def iterator_initializer_constant(
791
795
  )
792
796
  nodes = graph.node
793
797
  name = graph.name
794
- if isinstance(model, onnx.ModelProto):
798
+ if isinstance(model, ModelProto):
795
799
  for f in model.functions:
796
800
  yield from iterator_initializer_constant(
797
801
  f, use_numpy=use_numpy, prefix=f"{prefix}{f.name}"
@@ -908,3 +912,283 @@ def tensor_statistics(tensor: Union[np.ndarray, TensorProto]) -> Dict[str, Union
908
912
  qu = np.quantile(tensor, ii)
909
913
  stat.update({f"q{i}": float(q) for i, q in zip(ii, qu)})
910
914
  return stat
915
+
916
+
917
+ class NodeCoordinates:
918
+ """
919
+ A way to localize a node,
920
+ path is a tuple of three information, node index, node type, node name.
921
+ """
922
+
923
+ __slots__ = ("node", "path")
924
+
925
+ def __init__(
926
+ self,
927
+ node: Union[onnx.TensorProto, NodeProto, str],
928
+ path: Tuple[Tuple[int, str, str], ...],
929
+ ):
930
+ assert isinstance(path, tuple), f"Unexpected type {type(path)} for path"
931
+ assert all(isinstance(t, tuple) for t in path), f"Unexpected type in path={path}"
932
+ self.node = node
933
+ self.path = path
934
+
935
+ def __str__(self) -> str:
936
+ "usual"
937
+ if isinstance(self.node, str):
938
+ return f"{self.path_to_str()} :: {self.node!r}"
939
+ return f"{self.path_to_str()} :: {pretty_onnx(self.node)}"
940
+
941
+ def path_to_str(self) -> str:
942
+ "Strings representing coordinates."
943
+ return "x".join(f"({':'.join(map(str, t))})" for t in self.path)
944
+
945
+
946
+ class ResultFound:
947
+ """
948
+ Class returned by :func:`enumerate_results`.
949
+ """
950
+
951
+ __slots__ = ("consumer", "name", "producer")
952
+
953
+ def __init__(
954
+ self,
955
+ name: str,
956
+ producer: Optional[NodeCoordinates],
957
+ consumer: Optional[NodeCoordinates],
958
+ ):
959
+ assert isinstance(name, str), f"unexpected type {type(name)} for name"
960
+ self.name = name
961
+ self.producer = producer
962
+ self.consumer = consumer
963
+
964
+ def __str__(self) -> str:
965
+ "usuals"
966
+ return (
967
+ f"<< {self.name} - {self.consumer}"
968
+ if self.producer is None
969
+ else f">> {self.name} - {self.producer}"
970
+ )
971
+
972
+
973
+ def enumerate_results(
974
+ proto: Union[FunctionProto, GraphProto, ModelProto, Sequence[NodeProto]],
975
+ name: Union[Set[str], str],
976
+ verbose: int = 0,
977
+ coordinates: Optional[List[Tuple[int, str, str]]] = None,
978
+ ) -> Iterator[ResultFound]:
979
+ """
980
+ Iterates on all nodes, attributes to find where a name is used.
981
+
982
+ :param proto: a proto
983
+ :param name: name or names to find
984
+ :param verbose: verbosity
985
+ :param coordinates: coordinates of a node
986
+ :return: iterator on :class:`ResultFound`
987
+ """
988
+ if not isinstance(name, set):
989
+ name = {name}
990
+ coordinates = coordinates or []
991
+ assert all(
992
+ isinstance(c, tuple) for c in coordinates
993
+ ), f"Unexpected type in coordinates={coordinates}"
994
+ indent = " " * len(coordinates)
995
+ if isinstance(proto, ModelProto):
996
+ if verbose:
997
+ print(f"[enumerate_results] {indent}searching for {name!r} into ModelProto...")
998
+ yield from enumerate_results(proto.graph, name, verbose=verbose)
999
+ elif isinstance(proto, FunctionProto):
1000
+ if verbose:
1001
+ print(f"[enumerate_results] {indent}searching for {name!r} into FunctionProto...")
1002
+ for i in proto.input:
1003
+ if i in name:
1004
+ r = ResultFound(
1005
+ i,
1006
+ NodeCoordinates(i, tuple([*coordinates, (-1, "INPUT", "")])), # noqa: C409
1007
+ None,
1008
+ )
1009
+ if verbose > 1:
1010
+ print(f"[enumerate_results] {indent}-- {r}")
1011
+ yield r
1012
+ yield from enumerate_results(proto.node, name, verbose=verbose)
1013
+ for i in proto.output:
1014
+ if i in name:
1015
+ r = ResultFound(
1016
+ i,
1017
+ None,
1018
+ NodeCoordinates(
1019
+ i, tuple([*coordinates, (len(proto.node), "OUTPUT", "")]) # noqa: C409
1020
+ ),
1021
+ )
1022
+ if verbose > 1:
1023
+ print(f"[enumerate_results] {indent}-- {r}")
1024
+ yield r
1025
+ elif isinstance(proto, GraphProto):
1026
+ if verbose:
1027
+ print(f"[enumerate_results] {indent}searching for {name!r} into GraphProto...")
1028
+ for i in proto.initializer:
1029
+ if i.name in name:
1030
+ r = ResultFound(
1031
+ i.name,
1032
+ NodeCoordinates(i, tuple([*coordinates, (-1, "INIT", "")])), # noqa: C409
1033
+ None,
1034
+ )
1035
+ if verbose > 1:
1036
+ print(f"[enumerate_results] {indent}-- {r}")
1037
+ yield r
1038
+ for i in proto.sparse_initializer:
1039
+ if i.name in name:
1040
+ r = ResultFound(
1041
+ i.name,
1042
+ NodeCoordinates(i, tuple([*coordinates, (-1, "INIT", "")])), # noqa: C409
1043
+ None,
1044
+ )
1045
+ if verbose > 1:
1046
+ print(f"[enumerate_results] {indent}-- {r}")
1047
+ yield r
1048
+ for i in proto.input:
1049
+ if i.name in name:
1050
+ r = ResultFound(
1051
+ i.name,
1052
+ NodeCoordinates(i, tuple([*coordinates, (-1, "INPUT", "")])), # noqa: C409
1053
+ None,
1054
+ )
1055
+ if verbose > 1:
1056
+ print(f"[enumerate_results] {indent}-- {r}")
1057
+ yield r
1058
+ yield from enumerate_results(
1059
+ proto.node, name, verbose=verbose, coordinates=coordinates
1060
+ )
1061
+ for i in proto.output:
1062
+ if i.name in name:
1063
+ r = ResultFound(
1064
+ i.name,
1065
+ None,
1066
+ NodeCoordinates(
1067
+ i, tuple([*coordinates, (len(proto.node), "OUTPUT", "")]) # noqa: C409
1068
+ ),
1069
+ )
1070
+ if verbose > 1:
1071
+ print(f"[enumerate_results] {indent}-- {r}")
1072
+ yield r
1073
+ else:
1074
+ if verbose:
1075
+ print(
1076
+ f"[enumerate_results] {indent}searching for {name!r} into List[NodeProto]..."
1077
+ )
1078
+ for node_i, node in enumerate(proto):
1079
+ if set(node.input) & name:
1080
+ for n in node.input:
1081
+ if n in name:
1082
+ r = ResultFound(
1083
+ n,
1084
+ NodeCoordinates(
1085
+ node,
1086
+ tuple( # noqa: C409
1087
+ [*coordinates, (node_i, node.op_type, node.name)]
1088
+ ),
1089
+ ),
1090
+ None,
1091
+ )
1092
+ if verbose > 1:
1093
+ print(f"[enumerate_results] {indent}-- {r}")
1094
+ yield r
1095
+ if node.op_type in {"If", "Scan", "Loop", "SequenceMap"}:
1096
+ for att in node.attribute:
1097
+ if att.type == onnx.AttributeProto.GRAPH:
1098
+ yield from enumerate_results(
1099
+ att.g,
1100
+ name,
1101
+ verbose=verbose,
1102
+ coordinates=[*coordinates, (node_i, node.op_type, node.name)],
1103
+ )
1104
+ if set(node.output) & name:
1105
+ for n in node.output:
1106
+ if n in name:
1107
+ r = ResultFound(
1108
+ n,
1109
+ None,
1110
+ NodeCoordinates(
1111
+ node,
1112
+ tuple( # noqa: C409
1113
+ [*coordinates, (node_i, node.op_type, node.name)]
1114
+ ),
1115
+ ),
1116
+ )
1117
+ if verbose > 1:
1118
+ print(f"[enumerate_results] {indent}-- {r}")
1119
+ yield r
1120
+ if verbose:
1121
+ print(f"[enumerate_results] {indent}done")
1122
+
1123
+
1124
+ def shadowing_names(
1125
+ proto: Union[FunctionProto, GraphProto, ModelProto, Sequence[NodeProto]],
1126
+ verbose: int = 0,
1127
+ existing: Optional[Set[str]] = None,
1128
+ shadow_context: Optional[Set[str]] = None,
1129
+ post_shadow_context: Optional[Set[str]] = None,
1130
+ ) -> Tuple[Set[str], Set[str], Set[str]]:
1131
+ """
1132
+ Returns the shadowing names, the names created in the main graph
1133
+ after they were created in a subgraphs and the names created by the nodes.
1134
+ """
1135
+ if isinstance(proto, ModelProto):
1136
+ return shadowing_names(proto.graph)
1137
+ if isinstance(proto, GraphProto):
1138
+ assert (
1139
+ existing is None and shadow_context is None
1140
+ ), "existing must be None if nodes is None"
1141
+ return shadowing_names(
1142
+ proto.node,
1143
+ verbose=verbose,
1144
+ existing=set(i.name for i in proto.initializer)
1145
+ | set(i.name for i in proto.sparse_initializer)
1146
+ | set(i.name for i in proto.input if i.name),
1147
+ shadow_context=set(),
1148
+ post_shadow_context=set(),
1149
+ )
1150
+ if isinstance(proto, FunctionProto):
1151
+ assert (
1152
+ existing is None and shadow_context is None
1153
+ ), "existing must be None if nodes is None"
1154
+ return shadowing_names(
1155
+ proto.node,
1156
+ verbose=verbose,
1157
+ existing=set(i for i in proto.input if i),
1158
+ shadow_context=set(),
1159
+ post_shadow_context=set(),
1160
+ )
1161
+
1162
+ assert (
1163
+ existing is not None and shadow_context is not None
1164
+ ), "existing must not be None if nodes is not None"
1165
+ shadow = set()
1166
+ shadow_context = shadow_context.copy()
1167
+ existing = existing.copy()
1168
+ created = set()
1169
+ post_shadow = set()
1170
+ for node in proto:
1171
+ not_empty = set(n for n in node.input if n)
1172
+ intersection = not_empty & existing
1173
+ assert len(intersection) == len(not_empty), (
1174
+ f"One input in {not_empty}, node={pretty_onnx(node)} "
1175
+ f"was not found in {existing}"
1176
+ )
1177
+ for att in node.attribute:
1178
+ if att.type == AttributeProto.GRAPH:
1179
+ g = att.g
1180
+ shadow |= set(i.name for i in g.input) & shadow_context
1181
+ shadow |= set(i.name for i in g.initializer) & shadow_context
1182
+ shadow |= set(i.name for i in g.sparse_initializer) & shadow_context
1183
+ s, ps, c = shadowing_names(
1184
+ g.node, verbose=verbose, existing=existing, shadow_context=existing
1185
+ )
1186
+ shadow |= s
1187
+ created |= c
1188
+
1189
+ not_empty = set(n for n in node.output if n)
1190
+ post_shadow |= not_empty & created
1191
+ shadow |= not_empty & shadow_context
1192
+ existing |= not_empty
1193
+ created |= not_empty
1194
+ return shadow, post_shadow, created
@@ -168,8 +168,8 @@ class TorchOnnxEvaluator:
168
168
  class LayerNormalizationOrt(OpRunKernel):
169
169
  "LayerNormalization based on onnxruntime"
170
170
 
171
- def __init__(self, node: onnx.NodeProto, version=None):
172
- super().__init__(node, version)
171
+ def __init__(self, node: onnx.NodeProto, version=None, verbose=0):
172
+ super().__init__(node, version, verbose=verbose)
173
173
  self.axis = self.get_attribute_int(node, "axis", -1)
174
174
  self.epsilon = self.get_attribute_float(node, "epsilon", 1e-5)
175
175
  self.stash_type = onnx_dtype_to_torch_dtype(
@@ -80,6 +80,7 @@ def known_transformers_rewritings_clamp_float16() -> Dict[str, str]:
80
80
  "AutoformerModel": "AutoformerEncoderLayer",
81
81
  "BartEncoderLayer": "BartEncoderLayer",
82
82
  "BartForConditionalGeneration": "BartEncoderLayer",
83
+ "BartModel": "BartEncoderLayer",
83
84
  "BigBirdPegasusForConditionalGeneration": "BigBirdPegasusEncoderLayer",
84
85
  "BigBirdPegasusForQuestionAnswering": "BigBirdPegasusEncoderLayer",
85
86
  "BigBirdPegasusForCausalLM": "BigBirdPegasusEncoderLayer",
@@ -387,6 +387,12 @@ def validate_model(
387
387
  if model_options:
388
388
  print(f"[validate_model] model_options={model_options!r}")
389
389
  print(f"[validate_model] get dummy inputs with input_options={input_options}...")
390
+ print(
391
+ f"[validate_model] rewrite={rewrite}, patch={patch}, "
392
+ f"stop_if_static={stop_if_static}"
393
+ )
394
+ print(f"[validate_model] exporter={exporter!r}, optimization={optimization!r}")
395
+ print(f"[validate_model] dump_folder={dump_folder!r}")
390
396
  summary["model_id"] = model_id
391
397
  summary["model_subfolder"] = subfolder or ""
392
398
 
@@ -446,6 +452,8 @@ def validate_model(
446
452
  print(f"[validate_model] model_rewrite={summary['model_rewrite']}")
447
453
  else:
448
454
  del data["rewrite"]
455
+ if verbose:
456
+ print("[validate_model] no rewrite")
449
457
  if os.environ.get("PRINT_CONFIG", "0") in (1, "1"):
450
458
  print("[validate_model] -- PRINT CONFIG")
451
459
  print("-- type(config)", type(data["configuration"]))
@@ -1334,13 +1342,13 @@ def call_torch_export_custom(
1334
1342
  "custom-nostrict",
1335
1343
  "custom-nostrict-default",
1336
1344
  "custom-nostrict-all",
1337
- "custom-inline",
1338
- "custom-strict-inline",
1339
- "custom-strict-default-inline",
1340
- "custom-strict-all-inline",
1341
- "custom-nostrict-inline",
1342
- "custom-nostrict-default-inline",
1343
- "custom-nostrict-all-inline",
1345
+ "custom-noinline",
1346
+ "custom-strict-noinline",
1347
+ "custom-strict-default-noinline",
1348
+ "custom-strict-all-noinline",
1349
+ "custom-nostrict-noinline",
1350
+ "custom-nostrict-default-noinline",
1351
+ "custom-nostrict-all-noinline",
1344
1352
  }
1345
1353
  assert exporter in available, f"Unexpected value for exporter={exporter!r} in {available}"
1346
1354
  assert "model" in data, f"model is missing from data: {sorted(data)}"
@@ -1381,10 +1389,7 @@ def call_torch_export_custom(
1381
1389
  ),
1382
1390
  save_ep=(os.path.join(dump_folder, f"{exporter}.ep") if dump_folder else None),
1383
1391
  )
1384
- inline = "-inline" in exporter
1385
- if inline:
1386
- export_options.aten_as_function = set()
1387
-
1392
+ inline = "-noinline" not in exporter
1388
1393
  options = OptimizationOptions(patterns=optimization) if optimization else None
1389
1394
  model = data["model"]
1390
1395
  kws = dict(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx-diagnostic
3
- Version: 0.6.2
3
+ Version: 0.6.3
4
4
  Summary: Investigate ONNX models
5
5
  Home-page: https://github.com/sdpython/onnx-diagnostic
6
6
  Author: Xavier Dupré
@@ -1,8 +1,8 @@
1
- onnx_diagnostic/__init__.py,sha256=P1AdnI_jw7SIojX7zxqBeAy83xQePX59KApD_RBNnOY,173
1
+ onnx_diagnostic/__init__.py,sha256=mRkq5dlSo05GQMct7d6mMZLb6s5T24eG_3mD5O3wBo0,173
2
2
  onnx_diagnostic/__main__.py,sha256=YmyV_Aq_ianDlHyKLHMa6h8YK3ZmFPpLVHLKjM91aCk,79
3
- onnx_diagnostic/_command_lines_parser.py,sha256=WSoopSHjXWEgFvzyfGe3_c-hZoStuQPQc_k08siFuf4,19211
3
+ onnx_diagnostic/_command_lines_parser.py,sha256=pxG3dYpTTpRCjBRzFGEZm4ewb7xprZihP7fG08kWL04,19989
4
4
  onnx_diagnostic/api.py,sha256=BhCl_yCd78N7TlVtPOHjeYv1QBEy39TjZ647rcHqLh0,345
5
- onnx_diagnostic/doc.py,sha256=MTuT7Kxyvn7KEy84liQeFeqhugJrUQhjjpx21F72Uxw,926
5
+ onnx_diagnostic/doc.py,sha256=O_ncetL0G4-oHIxLv8ofTIIxCT_5ESSkKxfYvgccJEc,2038
6
6
  onnx_diagnostic/ext_test_case.py,sha256=nhWz75caudvKn-svH1ppUY8uw8MoTD4cEdqMdj6PiPc,42399
7
7
  onnx_diagnostic/export/__init__.py,sha256=yEIoWiOeTwBsDhyYt2fTKuhtA0Ya1J9u9ZzMTOTWaWs,101
8
8
  onnx_diagnostic/export/dynamic_shapes.py,sha256=EHB7VoWNx8sVetvOgE1vgC7wHtIjWDLjanhbEJNpK88,39892
@@ -12,13 +12,13 @@ onnx_diagnostic/helpers/args_helper.py,sha256=SRWnqC7EENg09RZlA50B_PcdiIhdbgA4C3
12
12
  onnx_diagnostic/helpers/bench_run.py,sha256=CGA6VMJZMH2gDhVueT9ypNm4PMcjGrrGFYp08nhWj9k,16539
13
13
  onnx_diagnostic/helpers/cache_helper.py,sha256=soKjyIXa7EQgALd9PAUGIKYzXlJGoLevYiQDsxoqkQ4,8349
14
14
  onnx_diagnostic/helpers/config_helper.py,sha256=aZATKVbZuw8L56KQpwMNcqJ3Qi5OplzS_N3ETR3hmj0,3351
15
- onnx_diagnostic/helpers/doc_helper.py,sha256=RLNqNrZx9NxwaryVu8NcImV9l9azEoK9OpQn3gqBxds,5328
15
+ onnx_diagnostic/helpers/doc_helper.py,sha256=pl5MZd3_FaE8BqQnqoBuSBxoNCFcd2OJd3eITUSku5c,5897
16
16
  onnx_diagnostic/helpers/graph_helper.py,sha256=hevQT5a7_QuriVPQcbT5qe18n99Doyl5h3-qshx1-uk,14093
17
17
  onnx_diagnostic/helpers/helper.py,sha256=oPybQruFcVLqvqLDhjphOZ8zZU1HHJWAlABMuTkAO8A,57090
18
18
  onnx_diagnostic/helpers/memory_peak.py,sha256=OT6mz0muBbBZY0pjgW2_eCk_lOtFRo-5w4jFo2Z6Kok,6380
19
19
  onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=R1Vu4zHzN7GIUnbMVQzpkaXj8cCyyOweWOI9-TSgAHM,20966
20
- onnx_diagnostic/helpers/model_builder_helper.py,sha256=cQuC7wbl8UcU_IaSBnX_CsydKTABN6lABCWEthmpmDA,13386
21
- onnx_diagnostic/helpers/onnx_helper.py,sha256=chw-HB4iqGCD_16d0_BaCnreEgWYW4KeH78nh-3t2Uw,29213
20
+ onnx_diagnostic/helpers/model_builder_helper.py,sha256=xIZmsVMFHfdtYeZHVEffBtxYObAaRPiaSmwwSKkmLwY,13502
21
+ onnx_diagnostic/helpers/onnx_helper.py,sha256=pXXQjfyNTSUF-Kt72U4fnBDkYAnWYMxdSw8m0qk3xmE,39670
22
22
  onnx_diagnostic/helpers/ort_session.py,sha256=UgUUeUslDxEFBc6w6f3HMq_a7bn4TBlItmojqWquSj4,29281
23
23
  onnx_diagnostic/helpers/rt_helper.py,sha256=BXU_u1syk2RyM0HTFHKEiO6rHHhZW2UFPyUTVdeq8BU,4251
24
24
  onnx_diagnostic/helpers/torch_helper.py,sha256=mrmn4mBeRvMRJ9cEu7BbNG-AHq2OJfSm8dxgtzh-yQQ,31631
@@ -26,7 +26,7 @@ onnx_diagnostic/reference/__init__.py,sha256=nrd09rRuwMDBCPTSZ6kSKZXp1W9W_ExO1t9
26
26
  onnx_diagnostic/reference/evaluator.py,sha256=RzNzjFDeMe-4X51Tb22N6aagazY5ktNq-mRmPcfY5EU,8848
27
27
  onnx_diagnostic/reference/ort_evaluator.py,sha256=OaWMREF8fuJwimmONpIjQ6WxQT1X2roDsdJsgR8H_Cg,24853
28
28
  onnx_diagnostic/reference/quantized_tensor.py,sha256=5u67uS2uGacdMD5VYCbpojNjiesDlV_kO0fAJ0vUWGE,1098
29
- onnx_diagnostic/reference/torch_evaluator.py,sha256=ekQTYo0YPVdcM6TGRnEk0mDmGHUOwi0OpcMOWJsRvT8,26572
29
+ onnx_diagnostic/reference/torch_evaluator.py,sha256=qAeYvSFwOCMDctc39evBEle_2bX8kuJW2QSLksofzn8,26600
30
30
  onnx_diagnostic/reference/ops/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
31
31
  onnx_diagnostic/reference/ops/op_add_add_mul_mul.py,sha256=CXQVtgVrT066gDJFwxL4nDSY4G8r08XNu3EwhWqMapU,1521
32
32
  onnx_diagnostic/reference/ops/op_attention.py,sha256=ThALMDF53v3QeG1bohi0bvX2o90HZhGJbbAFOtwEHPE,2027
@@ -88,7 +88,7 @@ onnx_diagnostic/torch_export_patches/onnx_export_serialization.py,sha256=l5HvE_F
88
88
  onnx_diagnostic/torch_export_patches/patch_expressions.py,sha256=vr4tt61cbDnaaaduzMj4UBZ8OUtr6GfDpIWwOYqjWzs,3213
89
89
  onnx_diagnostic/torch_export_patches/patch_inputs.py,sha256=9b4pmyT00BwLqi7WG-gliep1RUy3gXEgW6BDnlSSA-M,7689
90
90
  onnx_diagnostic/torch_export_patches/patch_module.py,sha256=R2d9IHM-RwsBKDsxuBIJnEqMoxbS9gd4YWFGG2wwV5A,39881
91
- onnx_diagnostic/torch_export_patches/patch_module_helper.py,sha256=sIvu4E9BsCB8f-KlM4xykR19mflDfLGaiel6nb9ZGx8,6926
91
+ onnx_diagnostic/torch_export_patches/patch_module_helper.py,sha256=2U0AdyZuU0W54QTdE7tY7imVzMnpQ5091ADNtTCkT8Y,6967
92
92
  onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=VtkQB1o3Q2Fh99OOF6vQ2dynkhwzx2Wx6oB-rRbvTI0,23954
93
93
  onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=DTvdHPtNQh25Akv5o3D4Jxf1L1-SJ7w14tgvj8AAns8,26577
94
94
  onnx_diagnostic/torch_export_patches/patches/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -96,7 +96,7 @@ onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=KaZ8TjDa9ATgT
96
96
  onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=Hf-U50vzgzJ4iUjS2LAYkbfmzCEwX80Dzvdrr-Rhlp0,26456
97
97
  onnx_diagnostic/torch_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
98
98
  onnx_diagnostic/torch_models/llms.py,sha256=soyg4yC87ptGoeulJhKqw5opGmuLvH1pn_ZDXZ4Jr8E,90
99
- onnx_diagnostic/torch_models/test_helper.py,sha256=32uFEZwILyOWHrj-qaNfIv4P2ySe7A0lN2WQ4eVEudA,59604
99
+ onnx_diagnostic/torch_models/test_helper.py,sha256=tt6bgLjGRayzvkXrTelKHTjr7XU9BvhX7uE4XJq6H6o,59927
100
100
  onnx_diagnostic/torch_models/hghub/__init__.py,sha256=vi1Q7YHdddj1soiBN42MSvJdFqe2_KUoWafHISjwOu8,58
101
101
  onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=BgM_p57Q0gT9GOhdrmOYcnbuTTzCWp80jS4OQqWwFhs,9990
102
102
  onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=885wKyZkdM-Qp5Sg6C9Ol1dxigmA8FYAko-Ys08sppo,8096
@@ -108,8 +108,8 @@ onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=7N3fGvT_4Mn4NbIo0Q
108
108
  onnx_diagnostic/torch_onnx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
109
109
  onnx_diagnostic/torch_onnx/runtime_info.py,sha256=1g9F_Jf9AAgYQU4stbsrFXwQl-30mWlQrFbQ7val8Ps,9268
110
110
  onnx_diagnostic/torch_onnx/sbs.py,sha256=1EL25DeYFzlBSiFG_XjePBLvsiItRXbdDrr5-QZW2mA,16878
111
- onnx_diagnostic-0.6.2.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
112
- onnx_diagnostic-0.6.2.dist-info/METADATA,sha256=GwbmPUz9oOj_KKA_164p9H-RxNtpGz1Smn-eTJm4sGA,6643
113
- onnx_diagnostic-0.6.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
114
- onnx_diagnostic-0.6.2.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
115
- onnx_diagnostic-0.6.2.dist-info/RECORD,,
111
+ onnx_diagnostic-0.6.3.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
112
+ onnx_diagnostic-0.6.3.dist-info/METADATA,sha256=eJxj0KTPv1rXf-3T9KImWIF-u8g7wHXBGZm5zvXM7V8,6643
113
+ onnx_diagnostic-0.6.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
114
+ onnx_diagnostic-0.6.3.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
115
+ onnx_diagnostic-0.6.3.dist-info/RECORD,,