haoline 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- haoline/.streamlit/config.toml +10 -0
- haoline/__init__.py +248 -0
- haoline/analyzer.py +935 -0
- haoline/cli.py +2712 -0
- haoline/compare.py +811 -0
- haoline/compare_visualizations.py +1564 -0
- haoline/edge_analysis.py +525 -0
- haoline/eval/__init__.py +131 -0
- haoline/eval/adapters.py +844 -0
- haoline/eval/cli.py +390 -0
- haoline/eval/comparison.py +542 -0
- haoline/eval/deployment.py +633 -0
- haoline/eval/schemas.py +833 -0
- haoline/examples/__init__.py +15 -0
- haoline/examples/basic_inspection.py +74 -0
- haoline/examples/compare_models.py +117 -0
- haoline/examples/hardware_estimation.py +78 -0
- haoline/format_adapters.py +1001 -0
- haoline/formats/__init__.py +123 -0
- haoline/formats/coreml.py +250 -0
- haoline/formats/gguf.py +483 -0
- haoline/formats/openvino.py +255 -0
- haoline/formats/safetensors.py +273 -0
- haoline/formats/tflite.py +369 -0
- haoline/hardware.py +2307 -0
- haoline/hierarchical_graph.py +462 -0
- haoline/html_export.py +1573 -0
- haoline/layer_summary.py +769 -0
- haoline/llm_summarizer.py +465 -0
- haoline/op_icons.py +618 -0
- haoline/operational_profiling.py +1492 -0
- haoline/patterns.py +1116 -0
- haoline/pdf_generator.py +265 -0
- haoline/privacy.py +250 -0
- haoline/pydantic_models.py +241 -0
- haoline/report.py +1923 -0
- haoline/report_sections.py +539 -0
- haoline/risks.py +521 -0
- haoline/schema.py +523 -0
- haoline/streamlit_app.py +2024 -0
- haoline/tests/__init__.py +4 -0
- haoline/tests/conftest.py +123 -0
- haoline/tests/test_analyzer.py +868 -0
- haoline/tests/test_compare_visualizations.py +293 -0
- haoline/tests/test_edge_analysis.py +243 -0
- haoline/tests/test_eval.py +604 -0
- haoline/tests/test_format_adapters.py +460 -0
- haoline/tests/test_hardware.py +237 -0
- haoline/tests/test_hardware_recommender.py +90 -0
- haoline/tests/test_hierarchical_graph.py +326 -0
- haoline/tests/test_html_export.py +180 -0
- haoline/tests/test_layer_summary.py +428 -0
- haoline/tests/test_llm_patterns.py +540 -0
- haoline/tests/test_llm_summarizer.py +339 -0
- haoline/tests/test_patterns.py +774 -0
- haoline/tests/test_pytorch.py +327 -0
- haoline/tests/test_report.py +383 -0
- haoline/tests/test_risks.py +398 -0
- haoline/tests/test_schema.py +417 -0
- haoline/tests/test_tensorflow.py +380 -0
- haoline/tests/test_visualizations.py +316 -0
- haoline/universal_ir.py +856 -0
- haoline/visualizations.py +1086 -0
- haoline/visualize_yolo.py +44 -0
- haoline/web.py +110 -0
- haoline-0.3.0.dist-info/METADATA +471 -0
- haoline-0.3.0.dist-info/RECORD +70 -0
- haoline-0.3.0.dist-info/WHEEL +4 -0
- haoline-0.3.0.dist-info/entry_points.txt +5 -0
- haoline-0.3.0.dist-info/licenses/LICENSE +22 -0
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
# Copyright (c) 2025 HaoLine Contributors
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
Unit tests for the risks module (risk signal detection).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import sys
|
|
11
|
+
import tempfile
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
import numpy as np
|
|
15
|
+
import onnx
|
|
16
|
+
import pytest
|
|
17
|
+
from onnx import TensorProto, helper
|
|
18
|
+
|
|
19
|
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
|
|
20
|
+
from ..analyzer import MetricsEngine, ONNXGraphLoader
|
|
21
|
+
from ..patterns import PatternAnalyzer
|
|
22
|
+
from ..risks import RiskAnalyzer, RiskThresholds
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def create_deep_no_skip_model(num_layers: int = 60) -> onnx.ModelProto:
|
|
26
|
+
"""Create a deep model without skip connections."""
|
|
27
|
+
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 64])
|
|
28
|
+
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 64])
|
|
29
|
+
|
|
30
|
+
nodes = []
|
|
31
|
+
initializers = []
|
|
32
|
+
prev_output = "X"
|
|
33
|
+
|
|
34
|
+
for i in range(num_layers):
|
|
35
|
+
W = helper.make_tensor(
|
|
36
|
+
f"W_{i}",
|
|
37
|
+
TensorProto.FLOAT,
|
|
38
|
+
[64, 64],
|
|
39
|
+
np.random.randn(64, 64).astype(np.float32).flatten().tolist(),
|
|
40
|
+
)
|
|
41
|
+
initializers.append(W)
|
|
42
|
+
|
|
43
|
+
out_name = f"layer_{i}" if i < num_layers - 1 else "Y"
|
|
44
|
+
matmul = helper.make_node("MatMul", [prev_output, f"W_{i}"], [out_name], name=f"matmul_{i}")
|
|
45
|
+
nodes.append(matmul)
|
|
46
|
+
prev_output = out_name
|
|
47
|
+
|
|
48
|
+
graph = helper.make_graph(nodes, "deep_no_skip", [X], [Y], initializers)
|
|
49
|
+
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 17)])
|
|
50
|
+
return model
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def create_dynamic_input_model() -> onnx.ModelProto:
|
|
54
|
+
"""Create a model with dynamic input shapes."""
|
|
55
|
+
# Dynamic batch and sequence length
|
|
56
|
+
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, ["batch", "seq", 64])
|
|
57
|
+
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, ["batch", "seq", 64])
|
|
58
|
+
|
|
59
|
+
relu = helper.make_node("Relu", ["X"], ["Y"])
|
|
60
|
+
|
|
61
|
+
graph = helper.make_graph([relu], "dynamic_test", [X], [Y])
|
|
62
|
+
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 17)])
|
|
63
|
+
return model
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def create_small_model() -> onnx.ModelProto:
|
|
67
|
+
"""Create a tiny model that shouldn't trigger any risk signals."""
|
|
68
|
+
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 8])
|
|
69
|
+
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 8])
|
|
70
|
+
|
|
71
|
+
W = helper.make_tensor(
|
|
72
|
+
"W",
|
|
73
|
+
TensorProto.FLOAT,
|
|
74
|
+
[8, 8],
|
|
75
|
+
np.random.randn(8, 8).astype(np.float32).flatten().tolist(),
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
matmul = helper.make_node("MatMul", ["X", "W"], ["out1"])
|
|
79
|
+
relu = helper.make_node("Relu", ["out1"], ["Y"])
|
|
80
|
+
|
|
81
|
+
graph = helper.make_graph([matmul, relu], "small_test", [X], [Y], [W])
|
|
82
|
+
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 17)])
|
|
83
|
+
return model
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def create_no_activation_model() -> onnx.ModelProto:
|
|
87
|
+
"""Create a model with multiple linear layers but no activations.
|
|
88
|
+
|
|
89
|
+
Creates 25 MatMul nodes to exceed the MIN_NODES_FOR_DEPTH_CHECK threshold (20).
|
|
90
|
+
"""
|
|
91
|
+
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 64])
|
|
92
|
+
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 64])
|
|
93
|
+
|
|
94
|
+
nodes = []
|
|
95
|
+
initializers = []
|
|
96
|
+
prev_output = "X"
|
|
97
|
+
|
|
98
|
+
# Create 25 MatMul layers with no activations (exceeds 20-node threshold)
|
|
99
|
+
for i in range(25):
|
|
100
|
+
W = helper.make_tensor(
|
|
101
|
+
f"W_{i}",
|
|
102
|
+
TensorProto.FLOAT,
|
|
103
|
+
[64, 64],
|
|
104
|
+
np.random.randn(64, 64).astype(np.float32).flatten().tolist(),
|
|
105
|
+
)
|
|
106
|
+
initializers.append(W)
|
|
107
|
+
|
|
108
|
+
out_name = f"layer_{i}" if i < 24 else "Y"
|
|
109
|
+
matmul = helper.make_node("MatMul", [prev_output, f"W_{i}"], [out_name], name=f"matmul_{i}")
|
|
110
|
+
nodes.append(matmul)
|
|
111
|
+
prev_output = out_name
|
|
112
|
+
|
|
113
|
+
graph = helper.make_graph(nodes, "no_activation", [X], [Y], initializers)
|
|
114
|
+
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 17)])
|
|
115
|
+
return model
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class TestRiskAnalyzer:
|
|
119
|
+
"""Tests for RiskAnalyzer class."""
|
|
120
|
+
|
|
121
|
+
def test_no_risks_for_small_model(self):
|
|
122
|
+
"""Small models shouldn't trigger risk signals."""
|
|
123
|
+
model = create_small_model()
|
|
124
|
+
|
|
125
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f:
|
|
126
|
+
onnx.save(model, f.name)
|
|
127
|
+
model_path = Path(f.name)
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
loader = ONNXGraphLoader()
|
|
131
|
+
_, graph_info = loader.load(model_path)
|
|
132
|
+
|
|
133
|
+
# Compute FLOPs for risk analysis
|
|
134
|
+
engine = MetricsEngine()
|
|
135
|
+
engine.estimate_flops(graph_info)
|
|
136
|
+
|
|
137
|
+
pattern_analyzer = PatternAnalyzer()
|
|
138
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
139
|
+
|
|
140
|
+
risk_analyzer = RiskAnalyzer()
|
|
141
|
+
signals = risk_analyzer.analyze(graph_info, blocks)
|
|
142
|
+
|
|
143
|
+
# Small model shouldn't trigger depth or bottleneck signals
|
|
144
|
+
signal_ids = [s.id for s in signals]
|
|
145
|
+
assert "no_skip_connections" not in signal_ids
|
|
146
|
+
assert "compute_bottleneck" not in signal_ids
|
|
147
|
+
finally:
|
|
148
|
+
model_path.unlink()
|
|
149
|
+
|
|
150
|
+
def test_detect_deep_without_skips(self):
|
|
151
|
+
"""Detect deep networks without skip connections."""
|
|
152
|
+
model = create_deep_no_skip_model(num_layers=60)
|
|
153
|
+
|
|
154
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f:
|
|
155
|
+
onnx.save(model, f.name)
|
|
156
|
+
model_path = Path(f.name)
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
loader = ONNXGraphLoader()
|
|
160
|
+
_, graph_info = loader.load(model_path)
|
|
161
|
+
|
|
162
|
+
pattern_analyzer = PatternAnalyzer()
|
|
163
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
164
|
+
|
|
165
|
+
risk_analyzer = RiskAnalyzer()
|
|
166
|
+
signals = risk_analyzer.analyze(graph_info, blocks)
|
|
167
|
+
|
|
168
|
+
signal_ids = [s.id for s in signals]
|
|
169
|
+
assert "no_skip_connections" in signal_ids
|
|
170
|
+
finally:
|
|
171
|
+
model_path.unlink()
|
|
172
|
+
|
|
173
|
+
def test_detect_dynamic_shapes(self):
|
|
174
|
+
"""Detect dynamic input shapes."""
|
|
175
|
+
model = create_dynamic_input_model()
|
|
176
|
+
|
|
177
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f:
|
|
178
|
+
onnx.save(model, f.name)
|
|
179
|
+
model_path = Path(f.name)
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
loader = ONNXGraphLoader()
|
|
183
|
+
_, graph_info = loader.load(model_path)
|
|
184
|
+
|
|
185
|
+
pattern_analyzer = PatternAnalyzer()
|
|
186
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
187
|
+
|
|
188
|
+
risk_analyzer = RiskAnalyzer()
|
|
189
|
+
signals = risk_analyzer.analyze(graph_info, blocks)
|
|
190
|
+
|
|
191
|
+
signal_ids = [s.id for s in signals]
|
|
192
|
+
assert "dynamic_input_shapes" in signal_ids
|
|
193
|
+
finally:
|
|
194
|
+
model_path.unlink()
|
|
195
|
+
|
|
196
|
+
def test_detect_no_activations(self):
|
|
197
|
+
"""Detect models without activation functions."""
|
|
198
|
+
model = create_no_activation_model()
|
|
199
|
+
|
|
200
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f:
|
|
201
|
+
onnx.save(model, f.name)
|
|
202
|
+
model_path = Path(f.name)
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
loader = ONNXGraphLoader()
|
|
206
|
+
_, graph_info = loader.load(model_path)
|
|
207
|
+
|
|
208
|
+
pattern_analyzer = PatternAnalyzer()
|
|
209
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
210
|
+
|
|
211
|
+
risk_analyzer = RiskAnalyzer()
|
|
212
|
+
signals = risk_analyzer.analyze(graph_info, blocks)
|
|
213
|
+
|
|
214
|
+
signal_ids = [s.id for s in signals]
|
|
215
|
+
# Should detect either no_activations or no_skip_connections
|
|
216
|
+
assert "no_activations" in signal_ids or "no_skip_connections" in signal_ids
|
|
217
|
+
finally:
|
|
218
|
+
model_path.unlink()
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class TestRiskSignalSeverity:
|
|
222
|
+
"""Tests for risk signal severity levels."""
|
|
223
|
+
|
|
224
|
+
def test_severity_levels(self):
|
|
225
|
+
"""Verify severity levels are set correctly."""
|
|
226
|
+
model = create_deep_no_skip_model(num_layers=60)
|
|
227
|
+
|
|
228
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f:
|
|
229
|
+
onnx.save(model, f.name)
|
|
230
|
+
model_path = Path(f.name)
|
|
231
|
+
|
|
232
|
+
try:
|
|
233
|
+
loader = ONNXGraphLoader()
|
|
234
|
+
_, graph_info = loader.load(model_path)
|
|
235
|
+
|
|
236
|
+
pattern_analyzer = PatternAnalyzer()
|
|
237
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
238
|
+
|
|
239
|
+
risk_analyzer = RiskAnalyzer()
|
|
240
|
+
signals = risk_analyzer.analyze(graph_info, blocks)
|
|
241
|
+
|
|
242
|
+
for signal in signals:
|
|
243
|
+
assert signal.severity in ("info", "warning", "high")
|
|
244
|
+
assert signal.id # Should have an ID
|
|
245
|
+
assert signal.description # Should have a description
|
|
246
|
+
finally:
|
|
247
|
+
model_path.unlink()
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def create_gated_skip_model_for_risk() -> onnx.ModelProto:
|
|
251
|
+
"""Create a model with gated skip connections for risk testing."""
|
|
252
|
+
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 16, 8, 8])
|
|
253
|
+
|
|
254
|
+
W = helper.make_tensor(
|
|
255
|
+
"W",
|
|
256
|
+
TensorProto.FLOAT,
|
|
257
|
+
[16, 16, 1, 1],
|
|
258
|
+
np.random.randn(16, 16, 1, 1).astype(np.float32).flatten().tolist(),
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 16, 8, 8])
|
|
262
|
+
|
|
263
|
+
# Gate path: Conv -> Sigmoid
|
|
264
|
+
gate_conv = helper.make_node(
|
|
265
|
+
"Conv", ["X", "W"], ["gate_logits"], kernel_shape=[1, 1], name="gate_conv"
|
|
266
|
+
)
|
|
267
|
+
sigmoid = helper.make_node("Sigmoid", ["gate_logits"], ["gate"], name="sigmoid")
|
|
268
|
+
|
|
269
|
+
# Gated multiplication
|
|
270
|
+
gate_mul = helper.make_node("Mul", ["X", "gate"], ["Y"], name="gate_mul")
|
|
271
|
+
|
|
272
|
+
graph = helper.make_graph(
|
|
273
|
+
[gate_conv, sigmoid, gate_mul],
|
|
274
|
+
"gated_skip_test",
|
|
275
|
+
[X],
|
|
276
|
+
[Y],
|
|
277
|
+
[W],
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 17)])
|
|
281
|
+
return model
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
class TestNonstandardResidualRisk:
|
|
285
|
+
"""Tests for non-standard residual risk detection."""
|
|
286
|
+
|
|
287
|
+
def test_detect_nonstandard_residuals(self):
|
|
288
|
+
"""Test that non-standard residual patterns are flagged."""
|
|
289
|
+
model = create_gated_skip_model_for_risk()
|
|
290
|
+
|
|
291
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f:
|
|
292
|
+
onnx.save(model, f.name)
|
|
293
|
+
model_path = Path(f.name)
|
|
294
|
+
|
|
295
|
+
try:
|
|
296
|
+
loader = ONNXGraphLoader()
|
|
297
|
+
_, graph_info = loader.load(model_path)
|
|
298
|
+
|
|
299
|
+
pattern_analyzer = PatternAnalyzer()
|
|
300
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
301
|
+
|
|
302
|
+
risk_analyzer = RiskAnalyzer()
|
|
303
|
+
signals = risk_analyzer.analyze(graph_info, blocks)
|
|
304
|
+
|
|
305
|
+
signal_ids = [s.id for s in signals]
|
|
306
|
+
assert "nonstandard_residuals" in signal_ids
|
|
307
|
+
|
|
308
|
+
# Check signal details
|
|
309
|
+
nonstandard_signal = next(s for s in signals if s.id == "nonstandard_residuals")
|
|
310
|
+
assert nonstandard_signal.severity == "info"
|
|
311
|
+
assert "gated" in nonstandard_signal.description.lower()
|
|
312
|
+
assert nonstandard_signal.recommendation # Should have a recommendation
|
|
313
|
+
finally:
|
|
314
|
+
model_path.unlink()
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class TestConfigurableThresholds:
|
|
318
|
+
"""Tests for configurable risk thresholds."""
|
|
319
|
+
|
|
320
|
+
def test_default_thresholds(self):
|
|
321
|
+
"""Test that default thresholds are applied."""
|
|
322
|
+
analyzer = RiskAnalyzer()
|
|
323
|
+
assert analyzer.thresholds.deep_network_threshold == 50
|
|
324
|
+
assert analyzer.thresholds.min_nodes_for_depth_check == 20
|
|
325
|
+
assert analyzer.thresholds.high_flop_ratio_threshold == 0.5
|
|
326
|
+
|
|
327
|
+
def test_custom_thresholds_via_dataclass(self):
|
|
328
|
+
"""Test custom thresholds via RiskThresholds dataclass."""
|
|
329
|
+
custom = RiskThresholds(
|
|
330
|
+
deep_network_threshold=100,
|
|
331
|
+
min_nodes_for_depth_check=10,
|
|
332
|
+
high_flop_ratio_threshold=0.8,
|
|
333
|
+
)
|
|
334
|
+
analyzer = RiskAnalyzer(thresholds=custom)
|
|
335
|
+
|
|
336
|
+
assert analyzer.thresholds.deep_network_threshold == 100
|
|
337
|
+
assert analyzer.thresholds.min_nodes_for_depth_check == 10
|
|
338
|
+
assert analyzer.thresholds.high_flop_ratio_threshold == 0.8
|
|
339
|
+
|
|
340
|
+
def test_custom_threshold_affects_detection(self):
|
|
341
|
+
"""Test that custom thresholds change risk detection behavior."""
|
|
342
|
+
# Create a model with 30 layers (between default 20 min and 50 deep threshold)
|
|
343
|
+
model = create_deep_no_skip_model(num_layers=30)
|
|
344
|
+
|
|
345
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f:
|
|
346
|
+
onnx.save(model, f.name)
|
|
347
|
+
model_path = Path(f.name)
|
|
348
|
+
|
|
349
|
+
try:
|
|
350
|
+
loader = ONNXGraphLoader()
|
|
351
|
+
_, graph_info = loader.load(model_path)
|
|
352
|
+
|
|
353
|
+
pattern_analyzer = PatternAnalyzer()
|
|
354
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
355
|
+
|
|
356
|
+
# Default thresholds: 30 nodes < 50, so shouldn't trigger
|
|
357
|
+
default_analyzer = RiskAnalyzer()
|
|
358
|
+
default_signals = default_analyzer.analyze(graph_info, blocks)
|
|
359
|
+
default_ids = [s.id for s in default_signals]
|
|
360
|
+
assert "no_skip_connections" not in default_ids
|
|
361
|
+
|
|
362
|
+
# Custom thresholds: lower deep_network_threshold to 25
|
|
363
|
+
custom = RiskThresholds(deep_network_threshold=25)
|
|
364
|
+
custom_analyzer = RiskAnalyzer(thresholds=custom)
|
|
365
|
+
custom_signals = custom_analyzer.analyze(graph_info, blocks)
|
|
366
|
+
custom_ids = [s.id for s in custom_signals]
|
|
367
|
+
assert "no_skip_connections" in custom_ids
|
|
368
|
+
finally:
|
|
369
|
+
model_path.unlink()
|
|
370
|
+
|
|
371
|
+
def test_threshold_for_activation_check(self):
|
|
372
|
+
"""Test min_trainable_for_activation_check threshold."""
|
|
373
|
+
# Create model that would trigger "no_activations" with lower threshold
|
|
374
|
+
model = create_no_activation_model()
|
|
375
|
+
|
|
376
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f:
|
|
377
|
+
onnx.save(model, f.name)
|
|
378
|
+
model_path = Path(f.name)
|
|
379
|
+
|
|
380
|
+
try:
|
|
381
|
+
loader = ONNXGraphLoader()
|
|
382
|
+
_, graph_info = loader.load(model_path)
|
|
383
|
+
|
|
384
|
+
pattern_analyzer = PatternAnalyzer()
|
|
385
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
386
|
+
|
|
387
|
+
# Very high threshold: should NOT trigger
|
|
388
|
+
strict = RiskThresholds(min_trainable_for_activation_check=1000)
|
|
389
|
+
strict_analyzer = RiskAnalyzer(thresholds=strict)
|
|
390
|
+
strict_signals = strict_analyzer.analyze(graph_info, blocks)
|
|
391
|
+
strict_ids = [s.id for s in strict_signals]
|
|
392
|
+
assert "no_activations" not in strict_ids
|
|
393
|
+
finally:
|
|
394
|
+
model_path.unlink()
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
if __name__ == "__main__":
|
|
398
|
+
pytest.main([__file__, "-v"])
|