haoline 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. haoline/.streamlit/config.toml +10 -0
  2. haoline/__init__.py +248 -0
  3. haoline/analyzer.py +935 -0
  4. haoline/cli.py +2712 -0
  5. haoline/compare.py +811 -0
  6. haoline/compare_visualizations.py +1564 -0
  7. haoline/edge_analysis.py +525 -0
  8. haoline/eval/__init__.py +131 -0
  9. haoline/eval/adapters.py +844 -0
  10. haoline/eval/cli.py +390 -0
  11. haoline/eval/comparison.py +542 -0
  12. haoline/eval/deployment.py +633 -0
  13. haoline/eval/schemas.py +833 -0
  14. haoline/examples/__init__.py +15 -0
  15. haoline/examples/basic_inspection.py +74 -0
  16. haoline/examples/compare_models.py +117 -0
  17. haoline/examples/hardware_estimation.py +78 -0
  18. haoline/format_adapters.py +1001 -0
  19. haoline/formats/__init__.py +123 -0
  20. haoline/formats/coreml.py +250 -0
  21. haoline/formats/gguf.py +483 -0
  22. haoline/formats/openvino.py +255 -0
  23. haoline/formats/safetensors.py +273 -0
  24. haoline/formats/tflite.py +369 -0
  25. haoline/hardware.py +2307 -0
  26. haoline/hierarchical_graph.py +462 -0
  27. haoline/html_export.py +1573 -0
  28. haoline/layer_summary.py +769 -0
  29. haoline/llm_summarizer.py +465 -0
  30. haoline/op_icons.py +618 -0
  31. haoline/operational_profiling.py +1492 -0
  32. haoline/patterns.py +1116 -0
  33. haoline/pdf_generator.py +265 -0
  34. haoline/privacy.py +250 -0
  35. haoline/pydantic_models.py +241 -0
  36. haoline/report.py +1923 -0
  37. haoline/report_sections.py +539 -0
  38. haoline/risks.py +521 -0
  39. haoline/schema.py +523 -0
  40. haoline/streamlit_app.py +2024 -0
  41. haoline/tests/__init__.py +4 -0
  42. haoline/tests/conftest.py +123 -0
  43. haoline/tests/test_analyzer.py +868 -0
  44. haoline/tests/test_compare_visualizations.py +293 -0
  45. haoline/tests/test_edge_analysis.py +243 -0
  46. haoline/tests/test_eval.py +604 -0
  47. haoline/tests/test_format_adapters.py +460 -0
  48. haoline/tests/test_hardware.py +237 -0
  49. haoline/tests/test_hardware_recommender.py +90 -0
  50. haoline/tests/test_hierarchical_graph.py +326 -0
  51. haoline/tests/test_html_export.py +180 -0
  52. haoline/tests/test_layer_summary.py +428 -0
  53. haoline/tests/test_llm_patterns.py +540 -0
  54. haoline/tests/test_llm_summarizer.py +339 -0
  55. haoline/tests/test_patterns.py +774 -0
  56. haoline/tests/test_pytorch.py +327 -0
  57. haoline/tests/test_report.py +383 -0
  58. haoline/tests/test_risks.py +398 -0
  59. haoline/tests/test_schema.py +417 -0
  60. haoline/tests/test_tensorflow.py +380 -0
  61. haoline/tests/test_visualizations.py +316 -0
  62. haoline/universal_ir.py +856 -0
  63. haoline/visualizations.py +1086 -0
  64. haoline/visualize_yolo.py +44 -0
  65. haoline/web.py +110 -0
  66. haoline-0.3.0.dist-info/METADATA +471 -0
  67. haoline-0.3.0.dist-info/RECORD +70 -0
  68. haoline-0.3.0.dist-info/WHEEL +4 -0
  69. haoline-0.3.0.dist-info/entry_points.txt +5 -0
  70. haoline-0.3.0.dist-info/licenses/LICENSE +22 -0
@@ -0,0 +1,339 @@
1
+ # Copyright (c) 2025 HaoLine Contributors
2
+ # SPDX-License-Identifier: MIT
3
+
4
+ """
5
+ Unit tests for the LLM summarizer module.
6
+
7
+ Tests API client, prompt templates, and graceful error handling.
8
+ Note: Most tests mock the OpenAI API to avoid actual API calls.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import os
14
+
15
+ import pytest
16
+
17
+ from ..analyzer import FlopCounts, MemoryEstimates, ParamCounts
18
+ from ..llm_summarizer import (
19
+ DETAILED_SUMMARY_PROMPT,
20
+ SHORT_SUMMARY_PROMPT,
21
+ SYSTEM_PROMPT,
22
+ LLMSummarizer,
23
+ LLMSummary,
24
+ has_api_key,
25
+ is_available,
26
+ summarize_report,
27
+ )
28
+ from ..report import GraphSummary, InspectionReport, ModelMetadata
29
+
30
+
31
+ class TestLLMAvailability:
32
+ """Tests for availability checks."""
33
+
34
+ def test_is_available_returns_bool(self):
35
+ """is_available() should return a boolean."""
36
+ result = is_available()
37
+ assert isinstance(result, bool)
38
+
39
+ def test_has_api_key_returns_bool(self):
40
+ """has_api_key() should return a boolean."""
41
+ result = has_api_key()
42
+ assert isinstance(result, bool)
43
+
44
+ def test_has_api_key_checks_env_var(self):
45
+ """has_api_key() should check OPENAI_API_KEY env var."""
46
+ # Save original value
47
+ original = os.environ.get("OPENAI_API_KEY")
48
+
49
+ try:
50
+ # Test with key set
51
+ os.environ["OPENAI_API_KEY"] = "test-key"
52
+ assert has_api_key() is True
53
+
54
+ # Test with key unset
55
+ del os.environ["OPENAI_API_KEY"]
56
+ assert has_api_key() is False
57
+ finally:
58
+ # Restore original
59
+ if original:
60
+ os.environ["OPENAI_API_KEY"] = original
61
+
62
+
63
+ class TestLLMSummaryDataclass:
64
+ """Tests for the LLMSummary dataclass."""
65
+
66
+ def test_summary_creation(self):
67
+ """LLMSummary should be created with all fields."""
68
+ summary = LLMSummary(
69
+ short_summary="Test short",
70
+ detailed_summary="Test detailed",
71
+ model_used="gpt-4o-mini",
72
+ tokens_used=100,
73
+ success=True,
74
+ )
75
+ assert summary.short_summary == "Test short"
76
+ assert summary.detailed_summary == "Test detailed"
77
+ assert summary.model_used == "gpt-4o-mini"
78
+ assert summary.tokens_used == 100
79
+ assert summary.success is True
80
+ assert summary.error_message is None
81
+
82
+ def test_summary_with_error(self):
83
+ """LLMSummary should handle error state."""
84
+ summary = LLMSummary(
85
+ short_summary="",
86
+ detailed_summary="",
87
+ model_used="",
88
+ tokens_used=0,
89
+ success=False,
90
+ error_message="API error",
91
+ )
92
+ assert summary.success is False
93
+ assert summary.error_message == "API error"
94
+
95
+
96
+ class TestPromptTemplates:
97
+ """Tests for prompt templates."""
98
+
99
+ def test_system_prompt_exists(self):
100
+ """System prompt should be defined."""
101
+ assert SYSTEM_PROMPT
102
+ assert "ML engineer" in SYSTEM_PROMPT
103
+
104
+ def test_short_summary_prompt_has_placeholder(self):
105
+ """Short summary prompt should have report_json placeholder."""
106
+ assert "{report_json}" in SHORT_SUMMARY_PROMPT
107
+ assert "1-2 sentence" in SHORT_SUMMARY_PROMPT
108
+
109
+ def test_detailed_summary_prompt_has_placeholder(self):
110
+ """Detailed summary prompt should have report_json placeholder."""
111
+ assert "{report_json}" in DETAILED_SUMMARY_PROMPT
112
+ assert "paragraph" in DETAILED_SUMMARY_PROMPT
113
+
114
+
115
+ class TestLLMSummarizer:
116
+ """Tests for the LLMSummarizer class."""
117
+
118
+ def test_summarizer_initialization(self):
119
+ """Summarizer should initialize without errors."""
120
+ summarizer = LLMSummarizer()
121
+ assert summarizer is not None
122
+ assert summarizer.logger is not None
123
+
124
+ def test_summarizer_default_model(self):
125
+ """Summarizer should use default model."""
126
+ summarizer = LLMSummarizer()
127
+ assert summarizer.model == "gpt-4o-mini"
128
+
129
+ def test_summarizer_custom_model(self):
130
+ """Summarizer should accept custom model."""
131
+ summarizer = LLMSummarizer(model="gpt-4o")
132
+ assert summarizer.model == "gpt-4o"
133
+
134
+ def test_is_configured_without_key(self):
135
+ """is_configured should return False without API key."""
136
+ # Save and clear env var
137
+ original = os.environ.get("OPENAI_API_KEY")
138
+ if "OPENAI_API_KEY" in os.environ:
139
+ del os.environ["OPENAI_API_KEY"]
140
+
141
+ try:
142
+ summarizer = LLMSummarizer()
143
+ assert summarizer.is_configured() is False
144
+ finally:
145
+ if original:
146
+ os.environ["OPENAI_API_KEY"] = original
147
+
148
+ def test_summarize_returns_error_when_not_configured(self):
149
+ """summarize() should return error LLMSummary when not configured."""
150
+ # Save and clear env var
151
+ original = os.environ.get("OPENAI_API_KEY")
152
+ if "OPENAI_API_KEY" in os.environ:
153
+ del os.environ["OPENAI_API_KEY"]
154
+
155
+ try:
156
+ metadata = ModelMetadata(
157
+ path="test.onnx",
158
+ ir_version=8,
159
+ producer_name="test",
160
+ producer_version="1.0",
161
+ domain="",
162
+ model_version=1,
163
+ doc_string="",
164
+ opsets={"ai.onnx": 17},
165
+ )
166
+
167
+ report = InspectionReport(metadata=metadata)
168
+
169
+ summarizer = LLMSummarizer()
170
+ result = summarizer.summarize(report)
171
+
172
+ assert result.success is False
173
+ assert "not configured" in result.error_message.lower()
174
+ finally:
175
+ if original:
176
+ os.environ["OPENAI_API_KEY"] = original
177
+
178
+
179
+ class TestReportPreparation:
180
+ """Tests for report preparation for LLM consumption."""
181
+
182
+ def test_prepare_report_for_prompt(self):
183
+ """_prepare_report_for_prompt should create valid JSON."""
184
+ metadata = ModelMetadata(
185
+ path="models/test.onnx",
186
+ ir_version=8,
187
+ producer_name="pytorch",
188
+ producer_version="2.0",
189
+ domain="",
190
+ model_version=1,
191
+ doc_string="",
192
+ opsets={"ai.onnx": 17},
193
+ )
194
+
195
+ graph_summary = GraphSummary(
196
+ num_nodes=100,
197
+ num_inputs=1,
198
+ num_outputs=1,
199
+ num_initializers=50,
200
+ input_shapes={"input": [1, 3, 224, 224]},
201
+ output_shapes={"output": [1, 1000]},
202
+ op_type_counts={"Conv": 50, "Relu": 48},
203
+ )
204
+
205
+ param_counts = ParamCounts(
206
+ total=25000000,
207
+ trainable=25000000,
208
+ by_op_type={"Conv": 20000000},
209
+ )
210
+
211
+ flop_counts = FlopCounts(
212
+ total=4000000000,
213
+ by_op_type={"Conv": 3500000000},
214
+ )
215
+
216
+ memory_estimates = MemoryEstimates(
217
+ model_size_bytes=100000000,
218
+ peak_activation_bytes=50000000,
219
+ )
220
+
221
+ report = InspectionReport(
222
+ metadata=metadata,
223
+ graph_summary=graph_summary,
224
+ param_counts=param_counts,
225
+ flop_counts=flop_counts,
226
+ memory_estimates=memory_estimates,
227
+ architecture_type="cnn",
228
+ )
229
+
230
+ summarizer = LLMSummarizer()
231
+ json_str = summarizer._prepare_report_for_prompt(report)
232
+
233
+ import json
234
+
235
+ parsed = json.loads(json_str)
236
+
237
+ assert "model_name" in parsed
238
+ assert parsed["model_name"] == "test.onnx"
239
+ assert "graph" in parsed
240
+ assert parsed["graph"]["nodes"] == 100
241
+ assert "parameters" in parsed
242
+ assert parsed["parameters"]["total"] == 25000000
243
+
244
+
245
+ class TestConvenienceFunction:
246
+ """Tests for the summarize_report convenience function."""
247
+
248
+ def test_summarize_report_function(self):
249
+ """summarize_report should work as convenience function."""
250
+ # Save and clear env var
251
+ original = os.environ.get("OPENAI_API_KEY")
252
+ if "OPENAI_API_KEY" in os.environ:
253
+ del os.environ["OPENAI_API_KEY"]
254
+
255
+ try:
256
+ metadata = ModelMetadata(
257
+ path="test.onnx",
258
+ ir_version=8,
259
+ producer_name="test",
260
+ producer_version="1.0",
261
+ domain="",
262
+ model_version=1,
263
+ doc_string="",
264
+ opsets={},
265
+ )
266
+
267
+ report = InspectionReport(metadata=metadata)
268
+ result = summarize_report(report)
269
+
270
+ assert isinstance(result, LLMSummary)
271
+ assert result.success is False # No API key
272
+ finally:
273
+ if original:
274
+ os.environ["OPENAI_API_KEY"] = original
275
+
276
+
277
+ @pytest.mark.skipif(
278
+ not is_available() or not has_api_key(),
279
+ reason="OpenAI not installed or API key not set",
280
+ )
281
+ class TestLLMIntegration:
282
+ """Integration tests that make real API calls (skipped without API key)."""
283
+
284
+ def test_real_summarization(self):
285
+ """Test actual LLM summarization with real API."""
286
+ metadata = ModelMetadata(
287
+ path="resnet50.onnx",
288
+ ir_version=8,
289
+ producer_name="pytorch",
290
+ producer_version="2.0",
291
+ domain="",
292
+ model_version=1,
293
+ doc_string="",
294
+ opsets={"ai.onnx": 17},
295
+ )
296
+
297
+ graph_summary = GraphSummary(
298
+ num_nodes=150,
299
+ num_inputs=1,
300
+ num_outputs=1,
301
+ num_initializers=100,
302
+ input_shapes={"input": [1, 3, 224, 224]},
303
+ output_shapes={"output": [1, 1000]},
304
+ op_type_counts={"Conv": 53, "Relu": 49, "Add": 16, "MaxPool": 1},
305
+ )
306
+
307
+ param_counts = ParamCounts(
308
+ total=25557032,
309
+ trainable=25557032,
310
+ by_op_type={"Conv": 23000000, "Gemm": 2500000},
311
+ )
312
+
313
+ flop_counts = FlopCounts(
314
+ total=4100000000,
315
+ by_op_type={"Conv": 4000000000},
316
+ )
317
+
318
+ memory_estimates = MemoryEstimates(
319
+ model_size_bytes=100000000,
320
+ peak_activation_bytes=50000000,
321
+ )
322
+
323
+ report = InspectionReport(
324
+ metadata=metadata,
325
+ graph_summary=graph_summary,
326
+ param_counts=param_counts,
327
+ flop_counts=flop_counts,
328
+ memory_estimates=memory_estimates,
329
+ architecture_type="cnn",
330
+ )
331
+
332
+ summarizer = LLMSummarizer()
333
+ result = summarizer.summarize(report)
334
+
335
+ assert result.success is True
336
+ assert len(result.short_summary) > 0
337
+ assert len(result.detailed_summary) > 0
338
+ assert result.tokens_used > 0
339
+ assert result.model_used == "gpt-4o-mini"