onnx-diagnostic 0.7.13__py3-none-any.whl → 0.7.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,5 +3,5 @@ Patches, Investigates onnx models.
3
3
  Functions, classes to dig into a model when this one is right, slow, wrong...
4
4
  """
5
5
 
6
- __version__ = "0.7.13"
6
+ __version__ = "0.7.14"
7
7
  __author__ = "Xavier Dupré"
@@ -400,12 +400,17 @@ def get_parser_validate() -> ArgumentParser:
400
400
 
401
401
  position_ids is usually not needed, they can be removed by adding:
402
402
 
403
- --drop position_ids
403
+ --drop position_ids
404
404
 
405
405
  The behaviour may be modified compare the original configuration,
406
406
  the following argument can be rope_scaling to dynamic:
407
407
 
408
- --mop \"rope_scaling={'rope_type': 'dynamic', 'factor': 10.0}\""
408
+ --mop \"rope_scaling={'rope_type': 'dynamic', 'factor': 10.0}\""
409
+
410
+ You can profile the command line by running:
411
+
412
+ pyinstrument -m onnx_diagnostic validate ...
413
+ pyinstrument -r html -o profile.html -m onnx_diagnostic validate ...
409
414
  """
410
415
  ),
411
416
  formatter_class=RawTextHelpFormatter,
@@ -1167,7 +1167,7 @@ class CubeLogs:
1167
1167
  df.to_excel(
1168
1168
  writer,
1169
1169
  sheet_name=name,
1170
- freeze_panes=(df.columns.nlevels + df.index.nlevels, df.index.nlevels),
1170
+ freeze_panes=(df.columns.nlevels + 1, df.index.nlevels),
1171
1171
  )
1172
1172
  f_highlights[name] = tview.f_highlight
1173
1173
  if tview.plots:
@@ -1210,7 +1210,7 @@ class CubeLogs:
1210
1210
  for k, v in sbs.items():
1211
1211
  print(f"[CubeLogs.to_excel] sbs {k}: {v}")
1212
1212
  name = "∧".join(sbs)
1213
- sbs_raw, sbs_agg = self.sbs(sbs)
1213
+ sbs_raw, sbs_agg, sbs_col = self.sbs(sbs)
1214
1214
  if verbose:
1215
1215
  print(f"[CubeLogs.to_excel] add sheet {name!r} with shape {sbs_raw.shape}")
1216
1216
  print(
@@ -1222,7 +1222,7 @@ class CubeLogs:
1222
1222
  writer,
1223
1223
  sheet_name=name,
1224
1224
  freeze_panes=(
1225
- sbs_raw.columns.nlevels + sbs_raw.index.nlevels,
1225
+ sbs_raw.columns.nlevels + 1,
1226
1226
  sbs_raw.index.nlevels,
1227
1227
  ),
1228
1228
  )
@@ -1230,10 +1230,18 @@ class CubeLogs:
1230
1230
  writer,
1231
1231
  sheet_name=f"{name}-AGG",
1232
1232
  freeze_panes=(
1233
- sbs_agg.columns.nlevels + sbs_agg.index.nlevels,
1233
+ sbs_agg.columns.nlevels + 1,
1234
1234
  sbs_agg.index.nlevels,
1235
1235
  ),
1236
1236
  )
1237
+ sbs_col.to_excel(
1238
+ writer,
1239
+ sheet_name=f"{name}-COL",
1240
+ freeze_panes=(
1241
+ sbs_col.columns.nlevels + 1,
1242
+ sbs_col.index.nlevels,
1243
+ ),
1244
+ )
1237
1245
 
1238
1246
  if plots:
1239
1247
  from openpyxl.drawing.image import Image
@@ -1314,7 +1322,7 @@ class CubeLogs:
1314
1322
 
1315
1323
  def sbs(
1316
1324
  self, configs: Dict[str, Dict[str, Any]], column_name: str = "CONF"
1317
- ) -> Tuple[pandas.DataFrame, pandas.DataFrame]:
1325
+ ) -> Tuple[pandas.DataFrame, pandas.DataFrame, pandas.DataFrame]:
1318
1326
  """
1319
1327
  Creates a side-by-side for two configurations.
1320
1328
  Every configuration a dictionary column:value which filters in
@@ -1325,7 +1333,7 @@ class CubeLogs:
1325
1333
  :param configs: example
1326
1334
  ``dict(CFA=dict(exporter="E1", opt="O"), CFB=dict(exporter="E2", opt="O"))``
1327
1335
  :param column_name: column to add with the name of the configuration
1328
- :return: data and aggregated date
1336
+ :return: data, aggregated date, data with a row per model
1329
1337
  """
1330
1338
  assert (
1331
1339
  len(configs) >= 2
@@ -1433,6 +1441,8 @@ class CubeLogs:
1433
1441
  _mkc(m, f"{n1}<{n2}"): (si < sj).astype(int),
1434
1442
  _mkc(m, f"{n1}=={n2}"): (si == sj).astype(int),
1435
1443
  _mkc(m, f"{n1}>{n2}"): (si > sj).astype(int),
1444
+ _mkc(m, f"{n1}*({n1}∧{n2})"): si * (~sinan & ~sjnan).astype(float),
1445
+ _mkc(m, f"{n2}*({n1}∧{n2})"): sj * (~sinan & ~sjnan).astype(float),
1436
1446
  }
1437
1447
  )
1438
1448
  nas.columns.names = view_res.columns.names
@@ -1452,13 +1462,11 @@ class CubeLogs:
1452
1462
  }
1453
1463
  flat = view_res.groupby(self.time).agg(aggs)
1454
1464
  flat = flat.stack("METRICS", future_stack=True)
1455
- return res, flat
1465
+ return res, flat, view_res.T.sort_index().T
1456
1466
 
1457
1467
 
1458
1468
  class CubeLogsPerformance(CubeLogs):
1459
- """
1460
- Processes logs coming from experiments.
1461
- """
1469
+ """Processes logs coming from experiments."""
1462
1470
 
1463
1471
  def __init__(
1464
1472
  self,
@@ -1511,20 +1519,25 @@ class CubeLogsPerformance(CubeLogs):
1511
1519
  "n_model_faster2x",
1512
1520
  "n_model_faster3x",
1513
1521
  "n_model_faster4x",
1522
+ "n_model_faster5x",
1514
1523
  "n_node_attention",
1515
1524
  "n_node_attention23",
1516
- "n_node_rotary_embedding",
1517
- "n_node_rotary_embedding23",
1518
- "n_node_layer_normalization",
1519
- "n_node_layer_normalization23",
1525
+ "n_node_causal_mask",
1526
+ "n_node_constant",
1520
1527
  "n_node_control_flow",
1521
- "n_node_scatter",
1528
+ "n_node_expand",
1522
1529
  "n_node_function",
1530
+ "n_node_gqa",
1523
1531
  "n_node_initializer",
1524
1532
  "n_node_initializer_small",
1525
- "n_node_constant",
1533
+ "n_node_layer_normalization",
1534
+ "n_node_layer_normalization23",
1535
+ "n_node_reshape",
1536
+ "n_node_rotary_embedding",
1537
+ "n_node_rotary_embedding23",
1538
+ "n_node_scatter",
1539
+ "n_node_sequence",
1526
1540
  "n_node_shape",
1527
- "n_node_expand",
1528
1541
  "onnx_n_nodes_no_cst",
1529
1542
  "peak_gpu_torch",
1530
1543
  "peak_gpu_nvidia",
@@ -1690,6 +1703,11 @@ class CubeLogsPerformance(CubeLogs):
1690
1703
  "time_latency",
1691
1704
  gdf(df, "time_latency_eager") > gdf(df, "time_latency", np.inf) * 3.98,
1692
1705
  ),
1706
+ n_model_faster5x=lambda df: gpreserve(
1707
+ df,
1708
+ "time_latency",
1709
+ gdf(df, "time_latency_eager") > gdf(df, "time_latency", np.inf) * 4.98,
1710
+ ),
1693
1711
  n_node_attention23=lambda df: gpreserve(
1694
1712
  df, "time_latency_eager", gdf(df, "op_onnx__Attention")
1695
1713
  ),
@@ -1720,6 +1738,11 @@ class CubeLogsPerformance(CubeLogs):
1720
1738
  + gdf(df, "op_onnx_com.microsoft_DecoderMaskedMultiHeadAttention", 0)
1721
1739
  + gdf(df, "op_onnx_com.microsoft_SparseAttention", 0),
1722
1740
  ),
1741
+ n_node_gqa=lambda df: gpreserve(
1742
+ df,
1743
+ "time_latency_eager",
1744
+ gdf(df, "op_onnx_com.microsoft_GroupQueryAttention", 0),
1745
+ ),
1723
1746
  n_node_layer_normalization=lambda df: gpreserve(
1724
1747
  df,
1725
1748
  "time_latency_eager",
@@ -1764,9 +1787,22 @@ class CubeLogsPerformance(CubeLogs):
1764
1787
  n_node_shape=lambda df: gpreserve(
1765
1788
  df, "time_latency_eager", gdf(df, "op_onnx__Shape")
1766
1789
  ),
1790
+ n_node_reshape=lambda df: gpreserve(
1791
+ df, "time_latency_eager", gdf(df, "op_onnx__Reshape")
1792
+ ),
1767
1793
  n_node_expand=lambda df: gpreserve(
1768
1794
  df, "time_latency_eager", gdf(df, "op_onnx__Expand")
1769
1795
  ),
1796
+ n_node_causal_mask=lambda df: gpreserve(
1797
+ df,
1798
+ "time_latency_eager",
1799
+ gdf(df, "op_onnx__CausalMask", 0),
1800
+ ),
1801
+ n_node_sequence=lambda df: gpreserve(
1802
+ df,
1803
+ "time_latency_eager",
1804
+ gdf(df, "op_onnx__SequenceAt", 0) + gdf(df, "op_onnx__SplitToSequence", 0),
1805
+ ),
1770
1806
  )
1771
1807
  assert (
1772
1808
  formula in lambdas
@@ -284,6 +284,21 @@ def get_inputs(
284
284
  add_second_input=0,
285
285
  **kwargs,
286
286
  )["inputs"]
287
+ res["inputs_batch1"] = get_inputs(
288
+ model=model,
289
+ config=config,
290
+ dummy_max_token_id=dummy_max_token_id,
291
+ num_hidden_layers=num_hidden_layers,
292
+ batch_size=1,
293
+ sequence_length=sequence_length,
294
+ sequence_length2=sequence_length2,
295
+ dynamic_rope=dynamic_rope,
296
+ num_key_value_heads=num_key_value_heads,
297
+ head_dim=head_dim,
298
+ cls_cache=cls_cache,
299
+ add_second_input=0,
300
+ **kwargs,
301
+ )["inputs"]
287
302
  return res
288
303
 
289
304
 
@@ -676,7 +676,13 @@ def run_exporter(
676
676
 
677
677
  if dynamic and len(inputs) > 1:
678
678
  for index, i in enumerate(inputs):
679
- expected = model(*_clone(i))
679
+ if quiet:
680
+ try:
681
+ expected = model(*_clone(i))
682
+ except Exception as e:
683
+ return dict(error=str(e), success=0, error_step=f"run0.{index}")
684
+ else:
685
+ expected = model(*_clone(i))
680
686
  try:
681
687
  got = mod(*i)
682
688
  except Exception as e:
@@ -353,12 +353,9 @@ class ControlFlowCondNonZero(torch.nn.Module):
353
353
 
354
354
 
355
355
  class ControlFlowCondIdentity_153832(torch.nn.Module):
356
- """
357
- `#153832 <https://github.com/pytorch/pytorch/issues/153832>`_
358
- """
356
+ """`#153832 <https://github.com/pytorch/pytorch/issues/153832>`_"""
359
357
 
360
358
  def forward(self, x, y):
361
-
362
359
  def branch_cond_then_1(x):
363
360
  x = torch.abs(x) + 1
364
361
  return x
@@ -340,6 +340,7 @@ def torch_export_patches(
340
340
  ###############
341
341
 
342
342
  if patch_torch:
343
+ from torch.fx.experimental.symbolic_shapes import ShapeEnv
343
344
  from .patches.patch_torch import (
344
345
  patched_infer_size,
345
346
  patched_vmap,
@@ -347,6 +348,9 @@ def torch_export_patches(
347
348
  patched__constrain_user_specified_dimhint_range,
348
349
  _catch_produce_guards_and_solve_constraints,
349
350
  patch__check_input_constraints_for_graph,
351
+ patched__broadcast_in_dim_meta,
352
+ patched__maybe_broadcast,
353
+ patched_ShapeEnv,
350
354
  )
351
355
 
352
356
  if verbose:
@@ -383,6 +387,20 @@ def torch_export_patches(
383
387
  patched__constrain_user_specified_dimhint_range
384
388
  )
385
389
 
390
+ # torch._prims._broadcast_in_dim_meta
391
+ f_broadcast_in_dim = torch._prims.broadcast_in_dim
392
+ f__broadcast_in_dim_meta = torch._prims._broadcast_in_dim_meta
393
+ torch._prims._broadcast_in_dim_meta = patched__broadcast_in_dim_meta
394
+ torch._prims.broadcast_in_dim = patched__broadcast_in_dim_meta
395
+
396
+ # torch._refs._maybe_broadcast
397
+ f__maybe_broadcast = torch._refs._maybe_broadcast
398
+ torch._refs._maybe_broadcast = patched__maybe_broadcast
399
+
400
+ # ShapeEnv
401
+ f_shape_env__evaluate_expr = ShapeEnv._evaluate_expr
402
+ ShapeEnv._evaluate_expr = patched_ShapeEnv._evaluate_expr
403
+
386
404
  # torch._export.non_strict_utils.produce_guards_and_solve_constraints
387
405
  if patch_torch and catch_constraints:
388
406
  if verbose:
@@ -405,9 +423,6 @@ def torch_export_patches(
405
423
  )
406
424
 
407
425
  if stop_if_static:
408
- from torch.fx.experimental.symbolic_shapes import ShapeEnv
409
- from .patches.patch_torch import patched_ShapeEnv
410
-
411
426
  ShapeEnv._log_guard_remember = ShapeEnv._log_guard
412
427
 
413
428
  if verbose:
@@ -584,6 +599,10 @@ def torch_export_patches(
584
599
  torch._export.non_strict_utils._constrain_user_specified_dimhint_range = (
585
600
  f___constrain_user_specified_dimhint_range
586
601
  )
602
+ torch._prims._broadcast_in_dim_meta = f__broadcast_in_dim_meta
603
+ torch._prims.broadcast_in_dim = f_broadcast_in_dim
604
+ torch._refs._maybe_broadcast = f__maybe_broadcast
605
+ ShapeEnv._evaluate_expr = f_shape_env__evaluate_expr
587
606
 
588
607
  if verbose:
589
608
  print("[torch_export_patches] restored pytorch functions")
@@ -723,9 +742,7 @@ def torch_export_patches(
723
742
 
724
743
 
725
744
  def replacement_before_exporting(args: Any) -> Any:
726
- """
727
- Does replacements on the given inputs if needed.
728
- """
745
+ """Does replacements on the given inputs if needed."""
729
746
  if args is None:
730
747
  return None
731
748
  if isinstance(args, (int, float)):
@@ -1,7 +1,10 @@
1
+ import functools
1
2
  import inspect
3
+ import operator
2
4
  import os
3
5
  import traceback
4
- from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
6
+ from functools import reduce
7
+ from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Tuple, Union
5
8
  import torch
6
9
  from torch._subclasses.fake_tensor import FakeTensorMode
7
10
 
@@ -396,6 +399,284 @@ class patched_ShapeEnv:
396
399
  # stacklevel=0,
397
400
  # )
398
401
 
402
+ def _evaluate_expr(
403
+ self,
404
+ orig_expr: "sympy.Basic", # noqa: F821
405
+ hint: Optional[Union[bool, int, float]] = None,
406
+ fx_node: Optional[torch.fx.Node] = None,
407
+ size_oblivious: bool = False,
408
+ fallback_value: Optional[bool] = None,
409
+ *,
410
+ forcing_spec: bool = False,
411
+ ) -> "sympy.Basic": # noqa: F821
412
+ # TODO: split conjunctions and evaluate them separately
413
+ import sympy
414
+ from torch.fx.experimental import _config as config
415
+ from torch.fx.experimental.symbolic_shapes import (
416
+ SympyBoolean,
417
+ log,
418
+ SymT,
419
+ symbol_is_type,
420
+ )
421
+ from torch._guards import ShapeGuard
422
+
423
+ if isinstance(
424
+ orig_expr,
425
+ (sympy.logic.boolalg.BooleanTrue, sympy.logic.boolalg.BooleanFalse),
426
+ ):
427
+ return orig_expr
428
+
429
+ # Don't track this one. (Because this cache is inside this function the
430
+ # cache only lasts for the invocation of this function call)
431
+ @functools.cache
432
+ def compute_concrete_val() -> sympy.Basic:
433
+ if hint is None:
434
+ # This is only ever called for expressions WITHOUT unbacked
435
+ # symbols
436
+ r = self.size_hint(orig_expr)
437
+ assert r is not None
438
+ return r
439
+ else:
440
+ return sympy.sympify(hint)
441
+
442
+ concrete_val: Optional[sympy.Basic]
443
+
444
+ # Check if:
445
+ # 1. 'translation_validation' is set
446
+ # 2. the corresponding 'fx_node' is not 'None'
447
+ # 3. the guard should not be suppressed
448
+ # 4. the guard doesn't contain backed symfloat symbols
449
+ # since z3 can't handle floats
450
+ # 5. fallback_value is none.
451
+ # If all of the above check, we create an FX node representing the
452
+ # actual expression to be guarded.
453
+ node = None
454
+ fresh = False
455
+ if (
456
+ self._translation_validation_enabled
457
+ and fx_node is not None
458
+ and not self._suppress_guards_tls()
459
+ and not size_oblivious
460
+ and not any(symbol_is_type(s, SymT.FLOAT) for s in orig_expr.free_symbols)
461
+ and fallback_value is None
462
+ ):
463
+ # TODO: does this even worked with unbacked :think:
464
+ concrete_val = compute_concrete_val()
465
+ if concrete_val is sympy.true:
466
+ node, fresh = self._create_fx_call_function(torch._assert, (fx_node,))
467
+ elif concrete_val is sympy.false:
468
+ neg, _ = self._create_fx_call_function(operator.not_, (fx_node,))
469
+ node, fresh = self._create_fx_call_function(torch._assert, (neg,))
470
+ else:
471
+ eql, _ = self._create_fx_call_function(operator.eq, (fx_node, concrete_val))
472
+ node, fresh = self._create_fx_call_function(torch._assert, (eql,))
473
+
474
+ assert node is not None
475
+ # If this is a fresh node, we have to remember the event index that
476
+ # corresponds to this assertion node.
477
+ # Reason: so that, given an assertion node, we can replay the ShapeEnv
478
+ # events until the point where this assertion node was freshly created.
479
+ if fresh:
480
+ self._add_fx_node_metadata(node)
481
+
482
+ # After creating the FX node corresponding to orig_expr, we must make sure that
483
+ # no error will be raised until the end of this function.
484
+ #
485
+ # Reason: the translation validation may become invalid otherwise.
486
+ #
487
+ # If an error is raised before the end of this function, we remove the FX node
488
+ # inserted, and re-raise the error.
489
+ guard = None
490
+
491
+ try:
492
+ if orig_expr.is_number:
493
+ self.log.debug("eval %s [trivial]", orig_expr)
494
+ if hint is not None:
495
+ if isinstance(hint, bool):
496
+ assert orig_expr == hint, f"{orig_expr} != {hint}"
497
+ else:
498
+ assert sympy.Eq(orig_expr, hint), f"{orig_expr} != {hint}"
499
+ return orig_expr
500
+
501
+ expr = orig_expr
502
+
503
+ static_expr = self._maybe_evaluate_static(expr, size_oblivious=size_oblivious)
504
+ if static_expr is not None:
505
+ self.log.debug(
506
+ "eval %s == %s [statically known]",
507
+ (f"size_oblivious({orig_expr})" if size_oblivious else size_oblivious),
508
+ static_expr,
509
+ )
510
+ if not size_oblivious and config.backed_size_oblivious and hint is not None:
511
+ # TODO: maybe reconcile this with use of counterfactual hints
512
+ # in unbacked case
513
+ assert static_expr == hint, f"{static_expr} != {hint}"
514
+ return static_expr
515
+
516
+ transmute_into_runtime_assert = False
517
+
518
+ concrete_val = None
519
+ if not (expr.free_symbols <= self.var_to_val.keys()):
520
+ # TODO: dedupe this with _maybe_evaluate_static
521
+ # Attempt to eliminate the unbacked SymInt
522
+ new_expr = self._maybe_evaluate_static(expr, unbacked_only=True)
523
+ assert new_expr is not None
524
+ if not (new_expr.free_symbols <= self.var_to_val.keys()):
525
+ ok = False
526
+
527
+ # fallback_value is set when guard_or_true or guard_or_false are used.
528
+ if not ok and fallback_value is not None:
529
+ self._log_suppressed_dde(orig_expr, fallback_value)
530
+ return fallback_value
531
+
532
+ # oblivious_var_to_val will be defined iff we have sizes
533
+ # with DimDynamic.OBLIVIOUS_SIZE type.
534
+ # See https://github.com/pytorch/pytorch/issues/137100#issuecomment-2495778113
535
+ if (
536
+ self.oblivious_var_to_val
537
+ and not (
538
+ correct_hint := orig_expr.xreplace(self.oblivious_var_to_val)
539
+ ).free_symbols
540
+ and not (
541
+ counterfactual_hint := orig_expr.xreplace(
542
+ {k: max(2, v) for k, v in self.oblivious_var_to_val.items()}
543
+ )
544
+ ).free_symbols
545
+ and correct_hint == counterfactual_hint
546
+ ):
547
+ # TODO: better logging
548
+ log.info(
549
+ "oblivious_size %s -> %s (passed counterfactual)",
550
+ orig_expr,
551
+ # pyrefly: ignore # unbound-name
552
+ correct_hint,
553
+ )
554
+ # pyrefly: ignore # unbound-name
555
+ concrete_val = correct_hint
556
+ # NB: do NOT transmute into runtime assert
557
+ ok = True
558
+
559
+ # unbacked_var_to_val is not None iff propagate_real_tensors is on.
560
+ # if propagate_real_tensors is on, we check the example values
561
+ # to generate (unsound_result)
562
+ # and if they pass we add a runtime assertions and continue.
563
+ if (
564
+ not ok
565
+ and self.unbacked_var_to_val
566
+ and not (
567
+ unsound_result := orig_expr.xreplace(
568
+ self.unbacked_var_to_val
569
+ ).xreplace(self.var_to_val)
570
+ ).free_symbols
571
+ ):
572
+ # pyrefly: ignore # unbound-name
573
+ self._log_real_tensor_propagation(orig_expr, unsound_result)
574
+ transmute_into_runtime_assert = True
575
+ # pyrefly: ignore # unbound-name
576
+ concrete_val = unsound_result
577
+ ok = True
578
+
579
+ # Check if this is coming from a python assert statement,
580
+ # if so, convert it to a runtime assertion
581
+ # instead of failing.
582
+ if not ok and self.trace_asserts and self._is_python_assert():
583
+ concrete_val = sympy.true
584
+ transmute_into_runtime_assert = True
585
+ ok = True
586
+
587
+ # PATCHED: ok -> True
588
+ ok = True
589
+ # if not ok:
590
+ # raise self._make_data_dependent_error(
591
+ # expr.xreplace(self.var_to_val),
592
+ # expr,
593
+ # expr_sym_node_id=self._expr_sym_node_id,
594
+ # )
595
+ else:
596
+ expr = new_expr
597
+
598
+ if concrete_val is None:
599
+ concrete_val = compute_concrete_val()
600
+ self._check_frozen(expr, concrete_val)
601
+
602
+ if (
603
+ config.inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY
604
+ and isinstance(hint, bool)
605
+ and isinstance(expr, (sympy.Eq, sympy.Ne))
606
+ ):
607
+ expr = sympy.Not(expr)
608
+
609
+ # Turn this into a boolean expression, no longer need to consult
610
+ # concrete_val
611
+ if concrete_val is sympy.true:
612
+ g = cast(SympyBoolean, expr)
613
+ elif concrete_val is sympy.false:
614
+ g = sympy.Not(expr)
615
+ else:
616
+ g = sympy.Eq(expr, concrete_val) # type: ignore[arg-type]
617
+
618
+ if transmute_into_runtime_assert:
619
+ self.guard_or_defer_runtime_assert(
620
+ g, f"propagate_real_tensors: {orig_expr} == {concrete_val}"
621
+ )
622
+ return concrete_val
623
+
624
+ if not self._suppress_guards_tls():
625
+ self._log_guard("eval", g, forcing_spec=forcing_spec)
626
+
627
+ # TODO: If we successfully eliminate a symbol via equality, it
628
+ # is not actually necessary to save a guard for the equality,
629
+ # as we will implicitly generate a guard when we match that
630
+ # input against the symbol. Probably the easiest way to
631
+ # implement this is to have maybe_guard_rel return a bool
632
+ # saying if it "subsumed" the guard (and therefore the guard
633
+ # is no longer necessary)
634
+ self._maybe_guard_rel(g)
635
+
636
+ if (
637
+ torch.compiler.is_exporting()
638
+ and self.prefer_deferred_runtime_asserts_over_guards
639
+ ):
640
+ # it's fine to defer simple guards here without checking,
641
+ # the _maybe_guard_rel() call above will set replacements if possible,
642
+ # and so the result here will be statically known
643
+ self.guard_or_defer_runtime_assert(g, f"evaluate_expr: {orig_expr}")
644
+ else:
645
+ # at this point, we've evaluated the concrete expr value, and have
646
+ # flipped/negated the guard if necessary. Now we know what to guard
647
+ # or defer to runtime assert on.
648
+ guard = ShapeGuard(g, self._get_sloc(), size_oblivious=size_oblivious)
649
+ self.guards.append(guard)
650
+ self.axioms.update(dict(self.get_implications(self.simplify(g))))
651
+ else:
652
+ self._log_guard("eval [guard suppressed]", g, forcing_spec=forcing_spec)
653
+
654
+ except Exception:
655
+ if fresh:
656
+ self._remove_fx_node(node)
657
+ raise
658
+
659
+ if not self._suppress_guards_tls():
660
+ if guard is not None: # we might have deferred this to runtime assert
661
+ for s in g.free_symbols:
662
+ self.symbol_guard_counter[s] += 1
663
+ # Forcing_spec to avoid infinite recursion
664
+ if (
665
+ not forcing_spec
666
+ and config.symbol_guard_limit_before_specialize is not None
667
+ and self.symbol_guard_counter[s]
668
+ > config.symbol_guard_limit_before_specialize
669
+ ):
670
+ # Force specialization
671
+ self.log.info(
672
+ "symbol_guard_limit_before_specialize=%s exceeded on %s",
673
+ config.symbol_guard_limit_before_specialize,
674
+ s,
675
+ )
676
+ self.evaluate_expr(s, forcing_spec=True)
677
+
678
+ return concrete_val
679
+
399
680
 
400
681
  def patched_vmap(func, in_dims=0, out_dims=0):
401
682
  """
@@ -570,3 +851,146 @@ def patched__constrain_user_specified_dimhint_range(
570
851
  return msg
571
852
 
572
853
  return None
854
+
855
+
856
+ def patched__maybe_broadcast(*args, preserve_cpu_scalar_tensors=True):
857
+ """Patches ``torch._refs._maybe_broadcast``."""
858
+ from torch._prims_common import ShapeType, TensorLike, Number
859
+
860
+ # Computes common shape
861
+ common_shape = patched__broadcast_shapes(
862
+ *(t.shape if isinstance(t, TensorLike) else None for t in args)
863
+ )
864
+
865
+ def should_expand(a: ShapeType, b: ShapeType) -> bool:
866
+ from torch.fx.experimental.symbolic_shapes import (
867
+ guard_or_false,
868
+ sym_and,
869
+ sym_or,
870
+ )
871
+
872
+ if len(a) != len(b):
873
+ return True
874
+
875
+ for x, y in zip(a, b):
876
+ if guard_or_false(x != y):
877
+ # We know they are not the same.
878
+ return True
879
+
880
+ # They are the same or we do not know if they are the same or not.
881
+ # 1==1 no-broadcast
882
+ # u0==1 and 1==u0 cases. We broadcast!
883
+ if guard_or_false(sym_and(x == 1, y == 1)):
884
+ pass
885
+ elif guard_or_false(sym_or(x == 1, y == 1)):
886
+ # assume broadcasting.
887
+ return True
888
+
889
+ # u0==u1 assume the same, no broadcasting!
890
+ # PATCHED: avoid errors
891
+ return True # guard_or_true(x != y)
892
+ # torch._check(
893
+ # x == y,
894
+ # lambda x=x, y=y: (
895
+ # f"sizes assumed to be the same due to unbacked "
896
+ # f"broadcasting semantics x={x!r}, y={y!r}"
897
+ # ),
898
+ # )
899
+
900
+ return False
901
+
902
+ def __maybe_broadcast(x, shape):
903
+ if x is None:
904
+ return None
905
+ elif isinstance(x, Number):
906
+ return x
907
+ elif isinstance(x, TensorLike):
908
+ if preserve_cpu_scalar_tensors and torch._prims_common.is_cpu_scalar_tensor(x):
909
+ return x
910
+
911
+ if should_expand(x.shape, common_shape):
912
+ return x.expand(common_shape)
913
+
914
+ return x
915
+ else:
916
+ raise RuntimeError(f"Unexpected type when broadcasting: {str(type(x))}!")
917
+
918
+ return tuple(__maybe_broadcast(x, common_shape) for x in args)
919
+
920
+
921
+ def patched__broadcast_in_dim_meta(
922
+ a: torch._prims_common.TensorLikeType,
923
+ shape: torch._prims_common.ShapeType,
924
+ broadcast_dimensions: Sequence[int],
925
+ ):
926
+ """Patches ``torch._prims._broadcast_in_dim_meta``."""
927
+ from torch.fx.experimental.symbolic_shapes import (
928
+ guard_or_false,
929
+ guard_or_true,
930
+ sym_or,
931
+ )
932
+
933
+ # Type checks
934
+ assert isinstance(a, torch._prims_common.TensorLike)
935
+ assert isinstance(shape, Sequence)
936
+ assert isinstance(broadcast_dimensions, Sequence)
937
+
938
+ # every dimension must be accounted for
939
+ assert a.ndim == len(broadcast_dimensions)
940
+
941
+ # broadcast shape must have weakly more dimensions
942
+ assert len(shape) >= a.ndim
943
+
944
+ # broadcast_dimensions must be an ascending sequence
945
+ # (no relative reordering of dims) of integers and
946
+ # each dimension must be within the new shape
947
+ def _greater_than_reduce(acc, x):
948
+ assert isinstance(x, (int, torch.export.Dim)), f"unexpected type {type(x)} for x"
949
+ assert x > acc
950
+ assert x < len(shape)
951
+
952
+ return x
953
+
954
+ reduce(_greater_than_reduce, broadcast_dimensions, -1)
955
+
956
+ # shape must be broadcastable to
957
+ for idx, new_idx in enumerate(broadcast_dimensions):
958
+ torch._check(
959
+ sym_or(a.shape[idx] == 1, shape[new_idx] == a.shape[idx]),
960
+ lambda idx=idx, new_idx=new_idx: (
961
+ f"{a.shape[idx]} must be broadcastable to {shape[new_idx]}"
962
+ ),
963
+ )
964
+
965
+ new_strides = []
966
+ original_idx = 0
967
+ for idx in range(len(shape)):
968
+ if idx in broadcast_dimensions:
969
+ # Assigns a stride of zero to dimensions
970
+ # which were actually broadcast
971
+ if guard_or_false(a.shape[original_idx] == 1):
972
+ if guard_or_false(a.shape[original_idx] == shape[idx]):
973
+ new_strides.append(a.stride()[original_idx])
974
+ else:
975
+ new_strides.append(0)
976
+ else:
977
+ # PATCHED: disabled this check
978
+ # torch._check(
979
+ # a.shape[original_idx] == shape[idx],
980
+ # lambda idx=idx, original_idx=original_idx: (
981
+ # f"non-broadcasting semantics require "
982
+ # f"{a.shape[original_idx]} == {shape[idx]}"
983
+ # ),
984
+ # )
985
+ new_strides.append(a.stride()[original_idx])
986
+ original_idx = original_idx + 1
987
+ else:
988
+ if guard_or_true(shape[idx] != 1):
989
+ # consistent with previous use of guard_size_oblivious
990
+ new_strides.append(0)
991
+ elif original_idx == a.ndim:
992
+ new_strides.append(1)
993
+ else:
994
+ new_strides.append(a.stride()[original_idx] * a.size()[original_idx])
995
+
996
+ return a.as_strided(shape, new_strides, a.storage_offset())
@@ -25,6 +25,20 @@ def _code_needing_rewriting(model: Any) -> Any:
25
25
  return code_needing_rewriting(model)
26
26
 
27
27
 
28
+ def _preprocess_model_id(
29
+ model_id: str, subfolder: Optional[str], same_as_pretrained: bool, use_pretrained: bool
30
+ ) -> Tuple[str, Optional[str], bool, bool]:
31
+ if subfolder or "//" not in model_id:
32
+ return model_id, subfolder, same_as_pretrained, use_pretrained
33
+ spl = model_id.split("//")
34
+ if spl[-1] == "pretrained":
35
+ return _preprocess_model_id("//".join(spl[:-1]), "", True, True)
36
+ if spl[-1] in {"transformer", "vae"}:
37
+ # known subfolder
38
+ return "//".join(spl[:-1]), spl[-1], same_as_pretrained, use_pretrained
39
+ return model_id, subfolder, same_as_pretrained, use_pretrained
40
+
41
+
28
42
  def get_untrained_model_with_inputs(
29
43
  model_id: str,
30
44
  config: Optional[Any] = None,
@@ -85,8 +99,16 @@ def get_untrained_model_with_inputs(
85
99
  f"model_id={model_id!r}, preinstalled model is only available "
86
100
  f"if use_only_preinstalled is False."
87
101
  )
102
+ model_id, subfolder, same_as_pretrained, use_pretrained = _preprocess_model_id(
103
+ model_id,
104
+ subfolder,
105
+ same_as_pretrained=same_as_pretrained,
106
+ use_pretrained=use_pretrained,
107
+ )
88
108
  if verbose:
89
- print(f"[get_untrained_model_with_inputs] model_id={model_id!r}")
109
+ print(
110
+ f"[get_untrained_model_with_inputs] model_id={model_id!r}, subfolder={subfolder!r}"
111
+ )
90
112
  if use_preinstalled:
91
113
  print(f"[get_untrained_model_with_inputs] use preinstalled {model_id!r}")
92
114
  if config is None:
@@ -178,7 +200,7 @@ def get_untrained_model_with_inputs(
178
200
 
179
201
  if verbose:
180
202
  print(
181
- f"[get_untrained_model_with_inputs] package_source={package_source.__name__} é"
203
+ f"[get_untrained_model_with_inputs] package_source={package_source.__name__} "
182
204
  f"from {package_source.__file__}"
183
205
  )
184
206
  if use_pretrained:
@@ -19,6 +19,7 @@ from ..tasks import random_input_kwargs
19
19
  from ..torch_export_patches import torch_export_patches
20
20
  from ..torch_export_patches.patch_inputs import use_dyn_not_str
21
21
  from .hghub import get_untrained_model_with_inputs
22
+ from .hghub.model_inputs import _preprocess_model_id
22
23
 
23
24
 
24
25
  def empty(value: Any) -> bool:
@@ -289,20 +290,6 @@ def shrink_config(cfg: Dict[str, Any]) -> Dict[str, Any]:
289
290
  return new_cfg
290
291
 
291
292
 
292
- def _preprocess_model_id(
293
- model_id: str, subfolder: Optional[str], same_as_pretrained: bool, use_pretrained: bool
294
- ) -> Tuple[str, Optional[str], bool, bool]:
295
- if subfolder or "//" not in model_id:
296
- return model_id, subfolder, same_as_pretrained, use_pretrained
297
- spl = model_id.split("//")
298
- if spl[-1] == "pretrained":
299
- return _preprocess_model_id("//".join(spl[:-1]), "", True, True)
300
- if spl[-1] in {"transformer", "vae"}:
301
- # known subfolder
302
- return "//".join(spl[:-1]), spl[-1], same_as_pretrained, use_pretrained
303
- return model_id, subfolder, same_as_pretrained, use_pretrained
304
-
305
-
306
293
  def validate_model(
307
294
  model_id: str,
308
295
  task: Optional[str] = None,
@@ -419,14 +406,14 @@ def validate_model(
419
406
  such as ``input_empty_cache``
420
407
  which refers to a set of inputs using an empty cache.
421
408
  """
422
- validation_begin = time.perf_counter()
409
+ main_validation_begin = time.perf_counter()
423
410
  model_id, subfolder, same_as_pretrained, use_pretrained = _preprocess_model_id(
424
411
  model_id,
425
412
  subfolder,
426
413
  same_as_pretrained=same_as_pretrained,
427
414
  use_pretrained=use_pretrained,
428
415
  )
429
- time_preprocess_model_id = time.perf_counter() - validation_begin
416
+ time_preprocess_model_id = time.perf_counter() - main_validation_begin
430
417
  default_patch = dict(patch_transformers=True, patch_diffusers=True, patch=True)
431
418
  if isinstance(patch, bool):
432
419
  patch_kwargs = default_patch if patch else dict(patch=False)
@@ -921,7 +908,7 @@ def validate_model(
921
908
  summary.update(summary_valid)
922
909
 
923
910
  _compute_final_statistics(summary)
924
- summary["time_total"] = time.perf_counter() - validation_begin
911
+ summary["time_total"] = time.perf_counter() - main_validation_begin
925
912
 
926
913
  if verbose:
927
914
  print("[validate_model] -- done (final)")
@@ -1744,6 +1731,7 @@ def process_statistics(data: Sequence[Dict[str, float]]) -> Dict[str, Any]:
1744
1731
  "constant_folding",
1745
1732
  "remove_identity",
1746
1733
  "remove_duplicated_initializer",
1734
+ "remove_duplicated_shape",
1747
1735
  "dynamic_dimension_naming",
1748
1736
  "inline",
1749
1737
  "check",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx-diagnostic
3
- Version: 0.7.13
3
+ Version: 0.7.14
4
4
  Summary: Tools to help converting pytorch models into ONNX.
5
5
  Home-page: https://github.com/sdpython/onnx-diagnostic
6
6
  Author: Xavier Dupré
@@ -1,6 +1,6 @@
1
- onnx_diagnostic/__init__.py,sha256=Sv9eg4qDNdyO5uUafa3e98pIerP4faa203FF3hqygOI,174
1
+ onnx_diagnostic/__init__.py,sha256=fxgnYe-ZeX2ZhqiqehQfAUIDhdiy2BjpbzcaUtrI5g8,174
2
2
  onnx_diagnostic/__main__.py,sha256=YmyV_Aq_ianDlHyKLHMa6h8YK3ZmFPpLVHLKjM91aCk,79
3
- onnx_diagnostic/_command_lines_parser.py,sha256=wleBwnoCDyAWRYRREUSGkwAJKw2YI4Td_7ydxmdOXfI,33457
3
+ onnx_diagnostic/_command_lines_parser.py,sha256=qCPdI1_Za7OM1MuR1utyhTcSZQlM4UVmN8Su4HoRjvI,33670
4
4
  onnx_diagnostic/api.py,sha256=BhCl_yCd78N7TlVtPOHjeYv1QBEy39TjZ647rcHqLh0,345
5
5
  onnx_diagnostic/doc.py,sha256=t3RELgfooYnVMAi0JSpggWkQEgUsREz8NmRvn0TnLI8,2829
6
6
  onnx_diagnostic/ext_test_case.py,sha256=emfQGiQSz5FVDhyJ1Acsv_Tast7tWl426TjtpNqxDBU,43558
@@ -17,7 +17,7 @@ onnx_diagnostic/helpers/config_helper.py,sha256=H2mOcMXfrcolFnt8EuqmRFkpQ3YdNRDf
17
17
  onnx_diagnostic/helpers/doc_helper.py,sha256=pl5MZd3_FaE8BqQnqoBuSBxoNCFcd2OJd3eITUSku5c,5897
18
18
  onnx_diagnostic/helpers/graph_helper.py,sha256=hevQT5a7_QuriVPQcbT5qe18n99Doyl5h3-qshx1-uk,14093
19
19
  onnx_diagnostic/helpers/helper.py,sha256=zl7vG6G4ueq931Z9iT8OlKfmtFxvRJD2WJQh_qsMiBs,63401
20
- onnx_diagnostic/helpers/log_helper.py,sha256=SKzxJ6DdP9uq4e2feA2nqd2Rreq4G-ujKZFUELfycP0,85674
20
+ onnx_diagnostic/helpers/log_helper.py,sha256=xBKz5rj2-jEtN_tFKsOV4RpBGermrv7CWqG3KUm2psI,87335
21
21
  onnx_diagnostic/helpers/memory_peak.py,sha256=OT6mz0muBbBZY0pjgW2_eCk_lOtFRo-5w4jFo2Z6Kok,6380
22
22
  onnx_diagnostic/helpers/mini_onnx_builder.py,sha256=Cgx1ojmV0S_JpZ_UqwsNxeULMMDvMInXslhkE34fwec,22051
23
23
  onnx_diagnostic/helpers/model_builder_helper.py,sha256=sK40KRN9GWK1vbNJHIXkYAojblbKD0bdom7BFmoNSv4,12860
@@ -86,34 +86,34 @@ onnx_diagnostic/tasks/sentence_similarity.py,sha256=vPqNZgAnIvY0rKWPUTs0IlU3RFQD
86
86
  onnx_diagnostic/tasks/summarization.py,sha256=8vB_JiRzDEacIvr8CYTuVQTH73xG_jNkndoS9RHJTSs,8292
87
87
  onnx_diagnostic/tasks/text2text_generation.py,sha256=35eF_RlSeMdLTZPooLMAnszs-z0bkKZ34Iej3JgA96A,8602
88
88
  onnx_diagnostic/tasks/text_classification.py,sha256=CGc72SpXFzTUyzAHEMPgyy_s187DaYGsRdrosxG80_Q,2711
89
- onnx_diagnostic/tasks/text_generation.py,sha256=PRUcVF6XBmOkNA2yi2MUDAT7G8JS1w_6nvjIGcmhST8,13366
89
+ onnx_diagnostic/tasks/text_generation.py,sha256=-oWq_I1lAUm9wxJnvFM1kXDJAmHbCiM6lUG3waR3o2k,13909
90
90
  onnx_diagnostic/tasks/text_to_image.py,sha256=mOS3Ruosi3hzRMxXLDN7ZkAbi7NnQb7MWwQP_okGVHs,2962
91
91
  onnx_diagnostic/tasks/zero_shot_image_classification.py,sha256=jJCMWuOqGv5ahCfjrcqxuYCJFhTgHV5KUf2yyv2yxYA,4624
92
92
  onnx_diagnostic/tasks/data/__init__.py,sha256=uJoemrWgEjI6oA-tMX7r3__x-b3siPmkgqaY7bgIles,401
93
93
  onnx_diagnostic/tasks/data/dummies_imagetext2text_generation_gemma3.onnx,sha256=UbtvmWMqcZOKJ-I-HXWI1A6YR6QDaFS5u_yXm5C3ZBw,10299
94
94
  onnx_diagnostic/torch_export_patches/__init__.py,sha256=0SaZedwznm1hQUCvXZsGZORV5vby954wEExr5faepGg,720
95
- onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=fQGyk6IkapGYYlFxbly8hS5oLWkhIC4bHV3DfZA1Keg,29449
95
+ onnx_diagnostic/torch_export_patches/onnx_export_errors.py,sha256=ZMsUeU3Hx5YD8xNgQTaW8Br88HvPSiCmqmKLhMz5jw0,30459
96
96
  onnx_diagnostic/torch_export_patches/onnx_export_serialization.py,sha256=klvqiMjccwGhiRnLRVbwTi5WWkMfvtnOV5ycirPcAdA,11354
97
97
  onnx_diagnostic/torch_export_patches/patch_expressions.py,sha256=vr4tt61cbDnaaaduzMj4UBZ8OUtr6GfDpIWwOYqjWzs,3213
98
98
  onnx_diagnostic/torch_export_patches/patch_inputs.py,sha256=2HQZKQV6TM5430RIvKiMPe4cfGvFdx1UnP1w76CeGE4,8110
99
99
  onnx_diagnostic/torch_export_patches/patch_module.py,sha256=R2d9IHM-RwsBKDsxuBIJnEqMoxbS9gd4YWFGG2wwV5A,39881
100
100
  onnx_diagnostic/torch_export_patches/patch_module_helper.py,sha256=2U0AdyZuU0W54QTdE7tY7imVzMnpQ5091ADNtTCkT8Y,6967
101
- onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=6z8Fk4rcJKo1Nh2F0K3JGkmFH0XZSIfv5-HvO6bhhTY,24818
102
- onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=SqaQU0zsvQsZXU0kIrxcURvVCp-ysZAaF01WLlgKZsw,27183
101
+ onnx_diagnostic/torch_export_patches/eval/__init__.py,sha256=YQoOGt9XQLWqnJ15NnT7ri_jDevfvpuQwEJo38E-VRU,25056
102
+ onnx_diagnostic/torch_export_patches/eval/model_cases.py,sha256=joDJV1YfrhYBR_6eXYvNO1jbiJM8Whb47NWZxo8SBwg,27172
103
103
  onnx_diagnostic/torch_export_patches/patches/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
104
- onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=bQFxePwj9OwCFykhcZiLvqOV2sXPBcZXa4260XueHLE,23117
104
+ onnx_diagnostic/torch_export_patches/patches/patch_torch.py,sha256=muA2i6Krd6iB2-nIteplxo_pvQEx4LQMZTxDmLe1n44,40825
105
105
  onnx_diagnostic/torch_export_patches/patches/patch_transformers.py,sha256=hHO7XOaUw3XrhPSrG2hTpMNzGVm_zigLg8d7hMOK7Gs,79188
106
106
  onnx_diagnostic/torch_export_patches/serialization/__init__.py,sha256=BHLdRPtNAtNPAS-bPKEj3-foGSPvwAbZXrHzGGPDLEw,1876
107
107
  onnx_diagnostic/torch_export_patches/serialization/diffusers_impl.py,sha256=drq3EH_yjcSuIWYsVeUWm8Cx6YCZFU6bP_1PLtPfY5I,945
108
108
  onnx_diagnostic/torch_export_patches/serialization/transformers_impl.py,sha256=mcmZGekzQlLgE_o3SdKlRgCx4ewwyyAuNWZ9CaN_zrI,9317
109
109
  onnx_diagnostic/torch_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
110
110
  onnx_diagnostic/torch_models/llms.py,sha256=soyg4yC87ptGoeulJhKqw5opGmuLvH1pn_ZDXZ4Jr8E,90
111
- onnx_diagnostic/torch_models/validate.py,sha256=B5h9iBTtkjFNFP6VzqxCvCfwKe4XQOpre1WeKwKEWNA,79628
111
+ onnx_diagnostic/torch_models/validate.py,sha256=XNGZi7qSSytUczDfJ-X2ff5xvGdWdWkwjyz8ejxUSCE,79107
112
112
  onnx_diagnostic/torch_models/hghub/__init__.py,sha256=vi1Q7YHdddj1soiBN42MSvJdFqe2_KUoWafHISjwOu8,58
113
113
  onnx_diagnostic/torch_models/hghub/hub_api.py,sha256=rFbiPNLET-KdBpnv-p0nKgwHX6d7C_Z0s9zZ86_92kQ,14307
114
114
  onnx_diagnostic/torch_models/hghub/hub_data.py,sha256=8V_pAgACPLPsLRYUododg7MSL6str-T3tBEGY4OaeYQ,8724
115
115
  onnx_diagnostic/torch_models/hghub/hub_data_cached_configs.py,sha256=aSa_7Rjider6ruqQ2-fXQMyyDS8VhB1xKxcPNk8qUeU,288776
116
- onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=Ioi92UHT3bsfA9oMi9IzY16FxnAKrPJHsEpFepBwr_o,14607
116
+ onnx_diagnostic/torch_models/hghub/model_inputs.py,sha256=FaNFmWmzAqQQ7nM-L0eypeHG-YmCReXxwOOAb3UN7D0,15493
117
117
  onnx_diagnostic/torch_models/hghub/model_specific.py,sha256=j50Nu7wddJMoqmD4QzMbNdFDUUgUmSBKRzPDH55TlUQ,2498
118
118
  onnx_diagnostic/torch_models/untrained/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
119
119
  onnx_diagnostic/torch_models/untrained/llm_phi2.py,sha256=JbGZmW41MPJcQgqaJc9R2G00nI79nI-lABN-ffA1lmY,4037
@@ -121,8 +121,8 @@ onnx_diagnostic/torch_models/untrained/llm_tiny_llm.py,sha256=QXw_Bs2SzfeiQMf-tm
121
121
  onnx_diagnostic/torch_onnx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
122
122
  onnx_diagnostic/torch_onnx/runtime_info.py,sha256=1g9F_Jf9AAgYQU4stbsrFXwQl-30mWlQrFbQ7val8Ps,9268
123
123
  onnx_diagnostic/torch_onnx/sbs.py,sha256=IoKLA5UwS6kY8g4OOf_bdQwCziIsQfBczZ3w8wo4wZM,16905
124
- onnx_diagnostic-0.7.13.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
125
- onnx_diagnostic-0.7.13.dist-info/METADATA,sha256=1ZoJZw78GxT1chXfFumfWyr-kcD8puKgaJ_qTHbfs60,6730
126
- onnx_diagnostic-0.7.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
127
- onnx_diagnostic-0.7.13.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
128
- onnx_diagnostic-0.7.13.dist-info/RECORD,,
124
+ onnx_diagnostic-0.7.14.dist-info/licenses/LICENSE.txt,sha256=Vv6TXglX6Rc0d-f8aREhayhT-6PMQXEyOmI2NKlUCMc,1045
125
+ onnx_diagnostic-0.7.14.dist-info/METADATA,sha256=id7f09epUAspAc4BxIlxRp0HhfGpR4SXI3BnYx0bjts,6730
126
+ onnx_diagnostic-0.7.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
127
+ onnx_diagnostic-0.7.14.dist-info/top_level.txt,sha256=KwNkXewmcobM3ZT1DJLVWH6ebJzA5qKg7cWqKfpGNT4,16
128
+ onnx_diagnostic-0.7.14.dist-info/RECORD,,