haoline 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- haoline/.streamlit/config.toml +10 -0
- haoline/__init__.py +248 -0
- haoline/analyzer.py +935 -0
- haoline/cli.py +2712 -0
- haoline/compare.py +811 -0
- haoline/compare_visualizations.py +1564 -0
- haoline/edge_analysis.py +525 -0
- haoline/eval/__init__.py +131 -0
- haoline/eval/adapters.py +844 -0
- haoline/eval/cli.py +390 -0
- haoline/eval/comparison.py +542 -0
- haoline/eval/deployment.py +633 -0
- haoline/eval/schemas.py +833 -0
- haoline/examples/__init__.py +15 -0
- haoline/examples/basic_inspection.py +74 -0
- haoline/examples/compare_models.py +117 -0
- haoline/examples/hardware_estimation.py +78 -0
- haoline/format_adapters.py +1001 -0
- haoline/formats/__init__.py +123 -0
- haoline/formats/coreml.py +250 -0
- haoline/formats/gguf.py +483 -0
- haoline/formats/openvino.py +255 -0
- haoline/formats/safetensors.py +273 -0
- haoline/formats/tflite.py +369 -0
- haoline/hardware.py +2307 -0
- haoline/hierarchical_graph.py +462 -0
- haoline/html_export.py +1573 -0
- haoline/layer_summary.py +769 -0
- haoline/llm_summarizer.py +465 -0
- haoline/op_icons.py +618 -0
- haoline/operational_profiling.py +1492 -0
- haoline/patterns.py +1116 -0
- haoline/pdf_generator.py +265 -0
- haoline/privacy.py +250 -0
- haoline/pydantic_models.py +241 -0
- haoline/report.py +1923 -0
- haoline/report_sections.py +539 -0
- haoline/risks.py +521 -0
- haoline/schema.py +523 -0
- haoline/streamlit_app.py +2024 -0
- haoline/tests/__init__.py +4 -0
- haoline/tests/conftest.py +123 -0
- haoline/tests/test_analyzer.py +868 -0
- haoline/tests/test_compare_visualizations.py +293 -0
- haoline/tests/test_edge_analysis.py +243 -0
- haoline/tests/test_eval.py +604 -0
- haoline/tests/test_format_adapters.py +460 -0
- haoline/tests/test_hardware.py +237 -0
- haoline/tests/test_hardware_recommender.py +90 -0
- haoline/tests/test_hierarchical_graph.py +326 -0
- haoline/tests/test_html_export.py +180 -0
- haoline/tests/test_layer_summary.py +428 -0
- haoline/tests/test_llm_patterns.py +540 -0
- haoline/tests/test_llm_summarizer.py +339 -0
- haoline/tests/test_patterns.py +774 -0
- haoline/tests/test_pytorch.py +327 -0
- haoline/tests/test_report.py +383 -0
- haoline/tests/test_risks.py +398 -0
- haoline/tests/test_schema.py +417 -0
- haoline/tests/test_tensorflow.py +380 -0
- haoline/tests/test_visualizations.py +316 -0
- haoline/universal_ir.py +856 -0
- haoline/visualizations.py +1086 -0
- haoline/visualize_yolo.py +44 -0
- haoline/web.py +110 -0
- haoline-0.3.0.dist-info/METADATA +471 -0
- haoline-0.3.0.dist-info/RECORD +70 -0
- haoline-0.3.0.dist-info/WHEEL +4 -0
- haoline-0.3.0.dist-info/entry_points.txt +5 -0
- haoline-0.3.0.dist-info/licenses/LICENSE +22 -0
haoline/streamlit_app.py
ADDED
|
@@ -0,0 +1,2024 @@
|
|
|
1
|
+
# Copyright (c) 2025 HaoLine Contributors
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
HaoLine Streamlit Web UI.
|
|
6
|
+
|
|
7
|
+
A web interface for analyzing neural network models without installing anything.
|
|
8
|
+
Upload an ONNX model, get instant architecture analysis with interactive visualizations.
|
|
9
|
+
|
|
10
|
+
Run locally:
|
|
11
|
+
streamlit run streamlit_app.py
|
|
12
|
+
|
|
13
|
+
Deploy to HuggingFace Spaces or Streamlit Cloud for public access.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import os
|
|
17
|
+
import tempfile
|
|
18
|
+
from dataclasses import dataclass
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
import streamlit as st
|
|
24
|
+
|
|
25
|
+
# Page config must be first Streamlit command
|
|
26
|
+
st.set_page_config(
|
|
27
|
+
page_title="HaoLine - Model Inspector",
|
|
28
|
+
page_icon="๐ฌ",
|
|
29
|
+
layout="wide",
|
|
30
|
+
initial_sidebar_state="expanded",
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class AnalysisResult:
|
|
36
|
+
"""Stored analysis result for session history."""
|
|
37
|
+
|
|
38
|
+
name: str
|
|
39
|
+
timestamp: datetime
|
|
40
|
+
report: Any # InspectionReport
|
|
41
|
+
file_size: int
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def summary(self) -> str:
|
|
45
|
+
"""Get a brief summary for display."""
|
|
46
|
+
params = self.report.param_counts.total if self.report.param_counts else 0
|
|
47
|
+
flops = self.report.flop_counts.total if self.report.flop_counts else 0
|
|
48
|
+
return f"{format_number(params)} params, {format_number(flops)} FLOPs"
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def init_session_state():
|
|
52
|
+
"""Initialize session state for history and comparison."""
|
|
53
|
+
if "analysis_history" not in st.session_state:
|
|
54
|
+
st.session_state.analysis_history = []
|
|
55
|
+
if "compare_models" not in st.session_state:
|
|
56
|
+
st.session_state.compare_models = {"model_a": None, "model_b": None}
|
|
57
|
+
if "current_mode" not in st.session_state:
|
|
58
|
+
st.session_state.current_mode = "analyze" # "analyze" or "compare"
|
|
59
|
+
if "demo_model" not in st.session_state:
|
|
60
|
+
st.session_state.demo_model = None # Tuple of (bytes, name) when demo requested
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def add_to_history(name: str, report: Any, file_size: int) -> AnalysisResult:
|
|
64
|
+
"""Add an analysis result to session history."""
|
|
65
|
+
result = AnalysisResult(
|
|
66
|
+
name=name,
|
|
67
|
+
timestamp=datetime.now(),
|
|
68
|
+
report=report,
|
|
69
|
+
file_size=file_size,
|
|
70
|
+
)
|
|
71
|
+
# Keep max 10 results, newest first
|
|
72
|
+
st.session_state.analysis_history.insert(0, result)
|
|
73
|
+
if len(st.session_state.analysis_history) > 10:
|
|
74
|
+
st.session_state.analysis_history.pop()
|
|
75
|
+
return result
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
# Import haoline after page config
|
|
79
|
+
import streamlit.components.v1 as components
|
|
80
|
+
|
|
81
|
+
from haoline import ModelInspector, __version__
|
|
82
|
+
|
|
83
|
+
# Demo models from ONNX Model Zoo (small, real models)
|
|
84
|
+
DEMO_MODELS = {
|
|
85
|
+
"mnist": {
|
|
86
|
+
"name": "MNIST CNN",
|
|
87
|
+
"file": "mnist-12.onnx",
|
|
88
|
+
"url": "https://github.com/onnx/models/raw/main/validated/vision/classification/mnist/model/mnist-12.onnx",
|
|
89
|
+
"description": "Tiny CNN for handwritten digits (26 KB)",
|
|
90
|
+
"size": "26 KB",
|
|
91
|
+
},
|
|
92
|
+
"squeezenet": {
|
|
93
|
+
"name": "SqueezeNet 1.0",
|
|
94
|
+
"file": "squeezenet1.0-12.onnx",
|
|
95
|
+
"url": "https://github.com/onnx/models/raw/main/validated/vision/classification/squeezenet/model/squeezenet1.0-12.onnx",
|
|
96
|
+
"description": "Compact CNN for ImageNet (5 MB)",
|
|
97
|
+
"size": "5 MB",
|
|
98
|
+
},
|
|
99
|
+
"efficientnet": {
|
|
100
|
+
"name": "EfficientNet-Lite4",
|
|
101
|
+
"file": "efficientnet-lite4-11.onnx",
|
|
102
|
+
"url": "https://github.com/onnx/models/raw/main/validated/vision/classification/efficientnet-lite4/model/efficientnet-lite4-11.onnx",
|
|
103
|
+
"description": "Efficient CNN architecture (49 MB)",
|
|
104
|
+
"size": "49 MB",
|
|
105
|
+
},
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def download_demo_model(model_key: str) -> tuple[bytes, str]:
|
|
110
|
+
"""Download a demo model from ONNX Model Zoo.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
model_key: Key from DEMO_MODELS dict
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Tuple of (model_bytes, model_name)
|
|
117
|
+
"""
|
|
118
|
+
import urllib.request
|
|
119
|
+
|
|
120
|
+
model_info = DEMO_MODELS[model_key]
|
|
121
|
+
url = model_info["url"]
|
|
122
|
+
filename = model_info["file"]
|
|
123
|
+
|
|
124
|
+
# Download with timeout
|
|
125
|
+
with urllib.request.urlopen(url, timeout=30) as response:
|
|
126
|
+
model_bytes = response.read()
|
|
127
|
+
|
|
128
|
+
return model_bytes, filename
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
from haoline.analyzer import ONNXGraphLoader
|
|
132
|
+
from haoline.edge_analysis import EdgeAnalyzer
|
|
133
|
+
from haoline.hardware import (
|
|
134
|
+
HARDWARE_PROFILES,
|
|
135
|
+
HardwareEstimator,
|
|
136
|
+
detect_local_hardware,
|
|
137
|
+
get_profile,
|
|
138
|
+
)
|
|
139
|
+
from haoline.hierarchical_graph import HierarchicalGraphBuilder
|
|
140
|
+
from haoline.html_export import generate_html as generate_graph_html
|
|
141
|
+
from haoline.patterns import PatternAnalyzer
|
|
142
|
+
|
|
143
|
+
# Custom CSS - Sleek dark theme with mint/emerald accents
|
|
144
|
+
st.markdown(
|
|
145
|
+
"""
|
|
146
|
+
<style>
|
|
147
|
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
|
|
148
|
+
|
|
149
|
+
/* Root variables for consistency */
|
|
150
|
+
:root {
|
|
151
|
+
--bg-primary: #0d0d0d;
|
|
152
|
+
--bg-secondary: #161616;
|
|
153
|
+
--bg-tertiary: #1f1f1f;
|
|
154
|
+
--bg-card: #1a1a1a;
|
|
155
|
+
--accent-primary: #10b981;
|
|
156
|
+
--accent-secondary: #34d399;
|
|
157
|
+
--accent-glow: rgba(16, 185, 129, 0.3);
|
|
158
|
+
--text-primary: #f5f5f5;
|
|
159
|
+
--text-secondary: #a3a3a3;
|
|
160
|
+
--text-muted: #737373;
|
|
161
|
+
--border-subtle: rgba(255, 255, 255, 0.08);
|
|
162
|
+
--border-accent: rgba(16, 185, 129, 0.3);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/* Global app background */
|
|
166
|
+
.stApp {
|
|
167
|
+
background: var(--bg-primary);
|
|
168
|
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/* Sidebar styling */
|
|
172
|
+
[data-testid="stSidebar"] {
|
|
173
|
+
background: var(--bg-secondary) !important;
|
|
174
|
+
border-right: 1px solid var(--border-subtle);
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
[data-testid="stSidebar"] > div {
|
|
178
|
+
background: transparent !important;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
/* Header styling */
|
|
182
|
+
.main-header {
|
|
183
|
+
font-family: 'Inter', sans-serif;
|
|
184
|
+
background: linear-gradient(135deg, #10b981 0%, #34d399 50%, #6ee7b7 100%);
|
|
185
|
+
-webkit-background-clip: text;
|
|
186
|
+
-webkit-text-fill-color: transparent;
|
|
187
|
+
font-size: 3.5rem;
|
|
188
|
+
font-weight: 700;
|
|
189
|
+
text-align: center;
|
|
190
|
+
margin-bottom: 0;
|
|
191
|
+
letter-spacing: -0.03em;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
.sub-header {
|
|
195
|
+
text-align: center;
|
|
196
|
+
color: var(--text-secondary);
|
|
197
|
+
font-size: 1.1rem;
|
|
198
|
+
font-weight: 400;
|
|
199
|
+
margin-top: 0.5rem;
|
|
200
|
+
margin-bottom: 2.5rem;
|
|
201
|
+
letter-spacing: 0.02em;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
/* Metric styling */
|
|
205
|
+
[data-testid="stMetricValue"] {
|
|
206
|
+
color: var(--accent-primary) !important;
|
|
207
|
+
font-weight: 600 !important;
|
|
208
|
+
font-size: 2rem !important;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
[data-testid="stMetricLabel"] {
|
|
212
|
+
color: var(--text-secondary) !important;
|
|
213
|
+
font-weight: 500 !important;
|
|
214
|
+
text-transform: uppercase;
|
|
215
|
+
letter-spacing: 0.05em;
|
|
216
|
+
font-size: 0.75rem !important;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
/* Hide Streamlit branding */
|
|
220
|
+
#MainMenu {visibility: hidden;}
|
|
221
|
+
footer {visibility: hidden;}
|
|
222
|
+
header {visibility: hidden;}
|
|
223
|
+
|
|
224
|
+
/* Text colors */
|
|
225
|
+
.stMarkdown, .stText, p, span, label, li {
|
|
226
|
+
color: var(--text-primary) !important;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
h1, h2, h3, h4, h5, h6 {
|
|
230
|
+
color: var(--text-primary) !important;
|
|
231
|
+
font-weight: 600 !important;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
/* Sidebar - remove top padding */
|
|
235
|
+
[data-testid="stSidebar"] [data-testid="stVerticalBlockBorderWrapper"]:first-child {
|
|
236
|
+
padding-top: 0 !important;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
[data-testid="stSidebar"] > div:first-child {
|
|
240
|
+
padding-top: 1rem !important;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/* Sidebar section headers */
|
|
244
|
+
[data-testid="stSidebar"] h4,
|
|
245
|
+
[data-testid="stSidebar"] .stMarkdown h3 {
|
|
246
|
+
color: var(--accent-primary) !important;
|
|
247
|
+
font-size: 0.8rem !important;
|
|
248
|
+
text-transform: uppercase;
|
|
249
|
+
letter-spacing: 0.1em;
|
|
250
|
+
margin-top: 1rem !important;
|
|
251
|
+
margin-bottom: 0.5rem !important;
|
|
252
|
+
font-weight: 600 !important;
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/* First header in sidebar - no top margin */
|
|
256
|
+
[data-testid="stSidebar"] .stMarkdown:first-of-type h3 {
|
|
257
|
+
margin-top: 0 !important;
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
/* Tighten sidebar dividers */
|
|
261
|
+
[data-testid="stSidebar"] hr {
|
|
262
|
+
margin: 0.75rem 0 !important;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
/* Input fields */
|
|
266
|
+
.stTextInput input, .stSelectbox > div > div {
|
|
267
|
+
background: var(--bg-tertiary) !important;
|
|
268
|
+
border: 1px solid var(--border-subtle) !important;
|
|
269
|
+
border-radius: 8px !important;
|
|
270
|
+
color: var(--text-primary) !important;
|
|
271
|
+
transition: all 0.2s ease;
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
.stTextInput input:focus {
|
|
275
|
+
border-color: var(--accent-primary) !important;
|
|
276
|
+
box-shadow: 0 0 0 2px var(--accent-glow) !important;
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
/* Checkboxes */
|
|
280
|
+
.stCheckbox label span {
|
|
281
|
+
color: var(--text-primary) !important;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
[data-testid="stCheckbox"] > label > div:first-child {
|
|
285
|
+
background: var(--bg-tertiary) !important;
|
|
286
|
+
border-color: var(--border-subtle) !important;
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
[data-testid="stCheckbox"][aria-checked="true"] > label > div:first-child {
|
|
290
|
+
background: var(--accent-primary) !important;
|
|
291
|
+
border-color: var(--accent-primary) !important;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
/* Tabs - modern pill style */
|
|
295
|
+
.stTabs [data-baseweb="tab-list"] {
|
|
296
|
+
gap: 4px;
|
|
297
|
+
background: var(--bg-tertiary);
|
|
298
|
+
padding: 4px;
|
|
299
|
+
border-radius: 12px;
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
.stTabs [data-baseweb="tab"] {
|
|
303
|
+
background: transparent !important;
|
|
304
|
+
border-radius: 8px !important;
|
|
305
|
+
color: var(--text-secondary) !important;
|
|
306
|
+
font-weight: 500 !important;
|
|
307
|
+
padding: 8px 16px !important;
|
|
308
|
+
border: none !important;
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
.stTabs [aria-selected="true"] {
|
|
312
|
+
background: var(--accent-primary) !important;
|
|
313
|
+
color: var(--bg-primary) !important;
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
.stTabs [data-baseweb="tab"]:hover:not([aria-selected="true"]) {
|
|
317
|
+
background: rgba(255, 255, 255, 0.05) !important;
|
|
318
|
+
color: var(--text-primary) !important;
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
/* File uploader - clean dark style */
|
|
322
|
+
[data-testid="stFileUploader"] {
|
|
323
|
+
background: transparent !important;
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
[data-testid="stFileUploader"] section {
|
|
327
|
+
background: var(--bg-secondary) !important;
|
|
328
|
+
border: 2px dashed var(--border-accent) !important;
|
|
329
|
+
border-radius: 16px !important;
|
|
330
|
+
padding: 2.5rem 2rem !important;
|
|
331
|
+
transition: all 0.3s ease;
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
[data-testid="stFileUploader"] section:hover {
|
|
335
|
+
border-color: var(--accent-primary) !important;
|
|
336
|
+
background: rgba(16, 185, 129, 0.05) !important;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
[data-testid="stFileUploader"] section div,
|
|
340
|
+
[data-testid="stFileUploader"] section span {
|
|
341
|
+
color: var(--text-secondary) !important;
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
[data-testid="stFileUploader"] button {
|
|
345
|
+
background: var(--accent-primary) !important;
|
|
346
|
+
color: var(--bg-primary) !important;
|
|
347
|
+
border: none !important;
|
|
348
|
+
border-radius: 8px !important;
|
|
349
|
+
padding: 0.6rem 1.5rem !important;
|
|
350
|
+
font-weight: 600 !important;
|
|
351
|
+
transition: all 0.2s ease;
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
[data-testid="stFileUploader"] button:hover {
|
|
355
|
+
background: var(--accent-secondary) !important;
|
|
356
|
+
transform: translateY(-1px);
|
|
357
|
+
box-shadow: 0 4px 12px var(--accent-glow);
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
/* Alerts - amber for warnings, mint for info */
|
|
361
|
+
.stAlert {
|
|
362
|
+
border-radius: 12px !important;
|
|
363
|
+
border: none !important;
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
[data-testid="stNotificationContentWarning"] {
|
|
367
|
+
background: rgba(251, 191, 36, 0.1) !important;
|
|
368
|
+
border-left: 4px solid #fbbf24 !important;
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
[data-testid="stNotificationContentWarning"] p {
|
|
372
|
+
color: #fcd34d !important;
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
[data-testid="stNotificationContentInfo"] {
|
|
376
|
+
background: rgba(16, 185, 129, 0.1) !important;
|
|
377
|
+
border-left: 4px solid var(--accent-primary) !important;
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
[data-testid="stNotificationContentInfo"] p {
|
|
381
|
+
color: var(--accent-secondary) !important;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
[data-testid="stNotificationContentError"] {
|
|
385
|
+
background: rgba(239, 68, 68, 0.1) !important;
|
|
386
|
+
border-left: 4px solid #ef4444 !important;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
[data-testid="stNotificationContentError"] p {
|
|
390
|
+
color: #fca5a5 !important;
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
/* Expanders */
|
|
394
|
+
.streamlit-expanderHeader {
|
|
395
|
+
background: var(--bg-tertiary) !important;
|
|
396
|
+
border-radius: 8px !important;
|
|
397
|
+
border: 1px solid var(--border-subtle) !important;
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
.streamlit-expanderHeader:hover {
|
|
401
|
+
border-color: var(--accent-primary) !important;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
/* Caption/muted text */
|
|
405
|
+
.stCaption, small {
|
|
406
|
+
color: var(--text-muted) !important;
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
/* Download buttons */
|
|
410
|
+
.stDownloadButton button {
|
|
411
|
+
background: var(--bg-tertiary) !important;
|
|
412
|
+
color: var(--text-primary) !important;
|
|
413
|
+
border: 1px solid var(--border-subtle) !important;
|
|
414
|
+
border-radius: 8px !important;
|
|
415
|
+
font-weight: 500 !important;
|
|
416
|
+
transition: all 0.2s ease;
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
.stDownloadButton button:hover {
|
|
420
|
+
background: var(--accent-primary) !important;
|
|
421
|
+
color: var(--bg-primary) !important;
|
|
422
|
+
border-color: var(--accent-primary) !important;
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
/* Dividers */
|
|
426
|
+
hr {
|
|
427
|
+
border-color: var(--border-subtle) !important;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
/* Code blocks */
|
|
431
|
+
code {
|
|
432
|
+
background: var(--bg-tertiary) !important;
|
|
433
|
+
color: var(--accent-secondary) !important;
|
|
434
|
+
padding: 2px 6px !important;
|
|
435
|
+
border-radius: 4px !important;
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
/* Links */
|
|
439
|
+
a {
|
|
440
|
+
color: var(--accent-primary) !important;
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
a:hover {
|
|
444
|
+
color: var(--accent-secondary) !important;
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
/* Uploaded file chip */
|
|
448
|
+
[data-testid="stFileUploaderFile"] {
|
|
449
|
+
background: var(--bg-tertiary) !important;
|
|
450
|
+
border: 1px solid var(--border-subtle) !important;
|
|
451
|
+
border-radius: 8px !important;
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
[data-testid="stFileUploaderFile"] button {
|
|
455
|
+
background: transparent !important;
|
|
456
|
+
color: var(--text-secondary) !important;
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
[data-testid="stFileUploaderFile"] button:hover {
|
|
460
|
+
color: #ef4444 !important;
|
|
461
|
+
background: rgba(239, 68, 68, 0.1) !important;
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
/* Spinner */
|
|
465
|
+
.stSpinner > div {
|
|
466
|
+
border-top-color: var(--accent-primary) !important;
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
/* Privacy notice */
|
|
470
|
+
.privacy-notice {
|
|
471
|
+
background: rgba(16, 185, 129, 0.08);
|
|
472
|
+
border-left: 3px solid var(--accent-primary);
|
|
473
|
+
padding: 0.75rem 1rem;
|
|
474
|
+
border-radius: 0 8px 8px 0;
|
|
475
|
+
font-size: 0.85rem;
|
|
476
|
+
color: var(--text-secondary);
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
/* Scrollbar */
|
|
480
|
+
::-webkit-scrollbar {
|
|
481
|
+
width: 8px;
|
|
482
|
+
height: 8px;
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
::-webkit-scrollbar-track {
|
|
486
|
+
background: var(--bg-secondary);
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
::-webkit-scrollbar-thumb {
|
|
490
|
+
background: var(--bg-tertiary);
|
|
491
|
+
border-radius: 4px;
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
::-webkit-scrollbar-thumb:hover {
|
|
495
|
+
background: var(--text-muted);
|
|
496
|
+
}
|
|
497
|
+
</style>
|
|
498
|
+
""",
|
|
499
|
+
unsafe_allow_html=True,
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
# Helper functions (defined early for use in dataclasses)
|
|
504
|
+
def format_number(n: float) -> str:
|
|
505
|
+
"""Format large numbers with K/M/B suffixes."""
|
|
506
|
+
if n >= 1e9:
|
|
507
|
+
return f"{n / 1e9:.2f}B"
|
|
508
|
+
elif n >= 1e6:
|
|
509
|
+
return f"{n / 1e6:.2f}M"
|
|
510
|
+
elif n >= 1e3:
|
|
511
|
+
return f"{n / 1e3:.2f}K"
|
|
512
|
+
else:
|
|
513
|
+
return f"{n:.0f}"
|
|
514
|
+
|
|
515
|
+
|
|
516
|
+
def format_bytes(b: float) -> str:
|
|
517
|
+
"""Format bytes with KB/MB/GB suffixes."""
|
|
518
|
+
if b >= 1e9:
|
|
519
|
+
return f"{b / 1e9:.2f} GB"
|
|
520
|
+
elif b >= 1e6:
|
|
521
|
+
return f"{b / 1e6:.2f} MB"
|
|
522
|
+
elif b >= 1e3:
|
|
523
|
+
return f"{b / 1e3:.2f} KB"
|
|
524
|
+
else:
|
|
525
|
+
return f"{b:.0f} B"
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
def render_comparison_view(model_a: AnalysisResult, model_b: AnalysisResult):
|
|
529
|
+
"""Render CLI-style model comparison report."""
|
|
530
|
+
import pandas as pd
|
|
531
|
+
|
|
532
|
+
# Extract all metrics
|
|
533
|
+
params_a = model_a.report.param_counts.total if model_a.report.param_counts else 0
|
|
534
|
+
params_b = model_b.report.param_counts.total if model_b.report.param_counts else 0
|
|
535
|
+
flops_a = model_a.report.flop_counts.total if model_a.report.flop_counts else 0
|
|
536
|
+
flops_b = model_b.report.flop_counts.total if model_b.report.flop_counts else 0
|
|
537
|
+
size_a = (
|
|
538
|
+
model_a.report.memory_estimates.model_size_bytes if model_a.report.memory_estimates else 0
|
|
539
|
+
)
|
|
540
|
+
size_b = (
|
|
541
|
+
model_b.report.memory_estimates.model_size_bytes if model_b.report.memory_estimates else 0
|
|
542
|
+
)
|
|
543
|
+
ops_a = model_a.report.graph_summary.num_nodes
|
|
544
|
+
ops_b = model_b.report.graph_summary.num_nodes
|
|
545
|
+
|
|
546
|
+
# Precision detection
|
|
547
|
+
bytes_per_param_a = (size_a / params_a) if params_a > 0 else 0
|
|
548
|
+
bytes_per_param_b = (size_b / params_b) if params_b > 0 else 0
|
|
549
|
+
|
|
550
|
+
def get_precision(bpp: float) -> str:
|
|
551
|
+
if bpp < 1.5:
|
|
552
|
+
return "INT8"
|
|
553
|
+
elif bpp < 2.5:
|
|
554
|
+
return "FP16"
|
|
555
|
+
elif bpp < 4.5:
|
|
556
|
+
return "FP32"
|
|
557
|
+
return "FP64"
|
|
558
|
+
|
|
559
|
+
precision_a = get_precision(bytes_per_param_a)
|
|
560
|
+
precision_b = get_precision(bytes_per_param_b)
|
|
561
|
+
|
|
562
|
+
# Size ratio (B relative to A)
|
|
563
|
+
size_ratio = size_b / size_a if size_a > 0 else 1.0
|
|
564
|
+
|
|
565
|
+
# Title
|
|
566
|
+
st.markdown(
|
|
567
|
+
f"""
|
|
568
|
+
<h2 style="margin-bottom: 0.25rem;">Quantization Impact Report</h2>
|
|
569
|
+
<p style="color: #a3a3a3; font-size: 0.9rem;">
|
|
570
|
+
Baseline: <strong>{model_a.name}</strong> ({precision_a})
|
|
571
|
+
</p>
|
|
572
|
+
""",
|
|
573
|
+
unsafe_allow_html=True,
|
|
574
|
+
)
|
|
575
|
+
|
|
576
|
+
# Trade-off Analysis box
|
|
577
|
+
st.markdown("### Trade-off Analysis")
|
|
578
|
+
|
|
579
|
+
# Determine best characteristics
|
|
580
|
+
smaller = model_b.name if size_b < size_a else model_a.name
|
|
581
|
+
fewer_params = model_b.name if params_b < params_a else model_a.name
|
|
582
|
+
fewer_flops = model_b.name if flops_b < flops_a else model_a.name
|
|
583
|
+
|
|
584
|
+
col1, col2, col3 = st.columns(3)
|
|
585
|
+
with col1:
|
|
586
|
+
st.metric(
|
|
587
|
+
"Smallest",
|
|
588
|
+
smaller,
|
|
589
|
+
f"{(1 - min(size_a, size_b) / max(size_a, size_b)) * 100:.1f}% smaller",
|
|
590
|
+
)
|
|
591
|
+
with col2:
|
|
592
|
+
st.metric("Fewest Params", fewer_params)
|
|
593
|
+
with col3:
|
|
594
|
+
st.metric("Fewest FLOPs", fewer_flops)
|
|
595
|
+
|
|
596
|
+
# Recommendations
|
|
597
|
+
st.markdown("#### Recommendations")
|
|
598
|
+
recommendations = []
|
|
599
|
+
|
|
600
|
+
if precision_a != precision_b:
|
|
601
|
+
if size_ratio < 0.6:
|
|
602
|
+
recommendations.append(
|
|
603
|
+
f"**{model_b.name}** offers **{(1 - size_ratio) * 100:.0f}% smaller** model size "
|
|
604
|
+
f"({precision_a} โ {precision_b})"
|
|
605
|
+
)
|
|
606
|
+
if size_ratio > 1.4:
|
|
607
|
+
recommendations.append(
|
|
608
|
+
f"**{model_a.name}** is **{(1 - 1 / size_ratio) * 100:.0f}% smaller** than {model_b.name}"
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
if abs(size_ratio - 0.5) < 0.1 and precision_b == "FP16":
|
|
612
|
+
recommendations.append(
|
|
613
|
+
"FP16 achieves expected ~50% size reduction with minimal accuracy impact"
|
|
614
|
+
)
|
|
615
|
+
elif abs(size_ratio - 0.25) < 0.1 and precision_b == "INT8":
|
|
616
|
+
recommendations.append(
|
|
617
|
+
"INT8 achieves expected ~75% size reduction - verify accuracy on your dataset"
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
if params_a == params_b and flops_a == flops_b:
|
|
621
|
+
recommendations.append("Same architecture - only precision/quantization differs")
|
|
622
|
+
|
|
623
|
+
if not recommendations:
|
|
624
|
+
recommendations.append("Models have similar characteristics")
|
|
625
|
+
|
|
626
|
+
for rec in recommendations:
|
|
627
|
+
st.markdown(f"- {rec}")
|
|
628
|
+
|
|
629
|
+
st.markdown("---")
|
|
630
|
+
|
|
631
|
+
# Variant Comparison Table (CLI-style)
|
|
632
|
+
st.markdown("### Variant Comparison")
|
|
633
|
+
|
|
634
|
+
table_data = [
|
|
635
|
+
{
|
|
636
|
+
"Model": model_a.name,
|
|
637
|
+
"Precision": precision_a,
|
|
638
|
+
"Size": format_bytes(size_a),
|
|
639
|
+
"Params": format_number(params_a),
|
|
640
|
+
"FLOPs": format_number(flops_a),
|
|
641
|
+
"Size vs Baseline": "baseline",
|
|
642
|
+
"Ops": ops_a,
|
|
643
|
+
},
|
|
644
|
+
{
|
|
645
|
+
"Model": model_b.name,
|
|
646
|
+
"Precision": precision_b,
|
|
647
|
+
"Size": format_bytes(size_b),
|
|
648
|
+
"Params": format_number(params_b),
|
|
649
|
+
"FLOPs": format_number(flops_b),
|
|
650
|
+
"Size vs Baseline": f"{size_ratio:.2f}x ({(size_ratio - 1) * 100:+.1f}%)",
|
|
651
|
+
"Ops": ops_b,
|
|
652
|
+
},
|
|
653
|
+
]
|
|
654
|
+
|
|
655
|
+
df = pd.DataFrame(table_data)
|
|
656
|
+
st.dataframe(
|
|
657
|
+
df,
|
|
658
|
+
use_container_width=True,
|
|
659
|
+
hide_index=True,
|
|
660
|
+
column_config={
|
|
661
|
+
"Model": st.column_config.TextColumn("Model", width="medium"),
|
|
662
|
+
"Precision": st.column_config.TextColumn("Precision", width="small"),
|
|
663
|
+
"Size": st.column_config.TextColumn("Size", width="small"),
|
|
664
|
+
"Params": st.column_config.TextColumn("Params", width="small"),
|
|
665
|
+
"FLOPs": st.column_config.TextColumn("FLOPs", width="small"),
|
|
666
|
+
"Size vs Baseline": st.column_config.TextColumn("ฮ Size", width="medium"),
|
|
667
|
+
"Ops": st.column_config.NumberColumn("Ops", width="small"),
|
|
668
|
+
},
|
|
669
|
+
)
|
|
670
|
+
|
|
671
|
+
st.markdown("---")
|
|
672
|
+
|
|
673
|
+
# Memory Savings visualization
|
|
674
|
+
st.markdown("### Memory Comparison")
|
|
675
|
+
|
|
676
|
+
col1, col2 = st.columns(2)
|
|
677
|
+
|
|
678
|
+
with col1:
|
|
679
|
+
# Size comparison bar
|
|
680
|
+
size_data = pd.DataFrame(
|
|
681
|
+
{"Model": [model_a.name, model_b.name], "Size (MB)": [size_a / 1e6, size_b / 1e6]}
|
|
682
|
+
)
|
|
683
|
+
st.bar_chart(size_data.set_index("Model"), height=200)
|
|
684
|
+
st.caption("Model Size (weights)")
|
|
685
|
+
|
|
686
|
+
with col2:
|
|
687
|
+
# Savings indicator
|
|
688
|
+
if size_a > size_b:
|
|
689
|
+
savings_pct = (1 - size_b / size_a) * 100
|
|
690
|
+
savings_bytes = size_a - size_b
|
|
691
|
+
st.markdown(
|
|
692
|
+
f"""
|
|
693
|
+
<div style="background: linear-gradient(135deg, rgba(16, 185, 129, 0.2) 0%, rgba(5, 150, 105, 0.1) 100%);
|
|
694
|
+
border: 1px solid rgba(16, 185, 129, 0.3); border-radius: 12px; padding: 1.5rem; text-align: center;">
|
|
695
|
+
<div style="font-size: 2rem; font-weight: 700; color: #10b981;">{savings_pct:.1f}%</div>
|
|
696
|
+
<div style="color: #a3a3a3; font-size: 0.9rem;">Size Reduction</div>
|
|
697
|
+
<div style="color: #6b7280; font-size: 0.8rem; margin-top: 0.5rem;">
|
|
698
|
+
Saves {format_bytes(savings_bytes)}
|
|
699
|
+
</div>
|
|
700
|
+
</div>
|
|
701
|
+
""",
|
|
702
|
+
unsafe_allow_html=True,
|
|
703
|
+
)
|
|
704
|
+
elif size_b > size_a:
|
|
705
|
+
increase_pct = (size_b / size_a - 1) * 100
|
|
706
|
+
st.markdown(
|
|
707
|
+
f"""
|
|
708
|
+
<div style="background: linear-gradient(135deg, rgba(239, 68, 68, 0.2) 0%, rgba(220, 38, 38, 0.1) 100%);
|
|
709
|
+
border: 1px solid rgba(239, 68, 68, 0.3); border-radius: 12px; padding: 1.5rem; text-align: center;">
|
|
710
|
+
<div style="font-size: 2rem; font-weight: 700; color: #ef4444;">+{increase_pct:.1f}%</div>
|
|
711
|
+
<div style="color: #a3a3a3; font-size: 0.9rem;">Size Increase</div>
|
|
712
|
+
</div>
|
|
713
|
+
""",
|
|
714
|
+
unsafe_allow_html=True,
|
|
715
|
+
)
|
|
716
|
+
else:
|
|
717
|
+
st.info("Models have identical size")
|
|
718
|
+
|
|
719
|
+
st.markdown("---")
|
|
720
|
+
|
|
721
|
+
# Operator Distribution
|
|
722
|
+
st.markdown("### Operator Distribution")
|
|
723
|
+
|
|
724
|
+
ops_a_dict = model_a.report.graph_summary.op_type_counts or {}
|
|
725
|
+
ops_b_dict = model_b.report.graph_summary.op_type_counts or {}
|
|
726
|
+
all_ops = sorted(set(ops_a_dict.keys()) | set(ops_b_dict.keys()))
|
|
727
|
+
|
|
728
|
+
if all_ops:
|
|
729
|
+
op_data = []
|
|
730
|
+
for op in all_ops:
|
|
731
|
+
count_a = ops_a_dict.get(op, 0)
|
|
732
|
+
count_b = ops_b_dict.get(op, 0)
|
|
733
|
+
if count_a > 0 or count_b > 0:
|
|
734
|
+
op_data.append({"Operator": op, model_a.name: count_a, model_b.name: count_b})
|
|
735
|
+
|
|
736
|
+
op_df = pd.DataFrame(op_data)
|
|
737
|
+
st.bar_chart(op_df.set_index("Operator"), height=300)
|
|
738
|
+
|
|
739
|
+
with st.expander("View operator details"):
|
|
740
|
+
# Add difference column
|
|
741
|
+
for row in op_data:
|
|
742
|
+
row["Difference"] = row[model_b.name] - row[model_a.name]
|
|
743
|
+
detail_df = pd.DataFrame(op_data)
|
|
744
|
+
st.dataframe(detail_df, use_container_width=True, hide_index=True)
|
|
745
|
+
|
|
746
|
+
# Architecture compatibility check
|
|
747
|
+
if params_a != params_b or flops_a != flops_b:
|
|
748
|
+
st.markdown("---")
|
|
749
|
+
st.warning(
|
|
750
|
+
"**Architecture Difference Detected**: Models have different parameter counts or FLOPs. "
|
|
751
|
+
"This may indicate structural changes beyond precision conversion."
|
|
752
|
+
)
|
|
753
|
+
|
|
754
|
+
# Universal IR Structural Comparison (if available)
|
|
755
|
+
ir_a = getattr(model_a.report, "universal_graph", None)
|
|
756
|
+
ir_b = getattr(model_b.report, "universal_graph", None)
|
|
757
|
+
|
|
758
|
+
if ir_a and ir_b:
|
|
759
|
+
st.markdown("---")
|
|
760
|
+
st.markdown("### Structural Analysis (Universal IR)")
|
|
761
|
+
|
|
762
|
+
# Check structural equality
|
|
763
|
+
is_equal = ir_a.is_structurally_equal(ir_b)
|
|
764
|
+
|
|
765
|
+
if is_equal:
|
|
766
|
+
st.success(
|
|
767
|
+
"**Architectures are structurally identical** โ same ops in same order. "
|
|
768
|
+
"Differences are limited to precision/weights."
|
|
769
|
+
)
|
|
770
|
+
else:
|
|
771
|
+
st.warning(
|
|
772
|
+
"**Structural differences detected** โ graphs differ in ops or connectivity."
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
# Show detailed diff
|
|
776
|
+
with st.expander("View IR Diff", expanded=True):
|
|
777
|
+
diff_result = ir_a.diff(ir_b)
|
|
778
|
+
|
|
779
|
+
diff_cols = st.columns(2)
|
|
780
|
+
|
|
781
|
+
with diff_cols[0]:
|
|
782
|
+
st.markdown("**Summary:**")
|
|
783
|
+
st.text(f" Node count: {ir_a.num_nodes} โ {ir_b.num_nodes}")
|
|
784
|
+
st.text(f" Parameters: {ir_a.total_parameters:,} โ {ir_b.total_parameters:,}")
|
|
785
|
+
|
|
786
|
+
with diff_cols[1]:
|
|
787
|
+
st.markdown("**Changes:**")
|
|
788
|
+
if diff_result.get("node_count_diff", 0) != 0:
|
|
789
|
+
st.text(f" ฮ Nodes: {diff_result['node_count_diff']:+d}")
|
|
790
|
+
if diff_result.get("param_count_diff", 0) != 0:
|
|
791
|
+
st.text(f" ฮ Params: {diff_result['param_count_diff']:+,}")
|
|
792
|
+
|
|
793
|
+
# Op type differences
|
|
794
|
+
op_diff = diff_result.get("op_type_diff", {})
|
|
795
|
+
if op_diff:
|
|
796
|
+
st.markdown("**Op Type Changes:**")
|
|
797
|
+
added = op_diff.get("added", [])
|
|
798
|
+
removed = op_diff.get("removed", [])
|
|
799
|
+
if added:
|
|
800
|
+
st.text(f" Added: {', '.join(added)}")
|
|
801
|
+
if removed:
|
|
802
|
+
st.text(f" Removed: {', '.join(removed)}")
|
|
803
|
+
|
|
804
|
+
# Footer
|
|
805
|
+
st.markdown("---")
|
|
806
|
+
st.caption("*Generated by HaoLine Compare Mode*")
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
def render_compare_mode():
|
|
810
|
+
"""Render the model comparison interface."""
|
|
811
|
+
model_a = st.session_state.compare_models.get("model_a")
|
|
812
|
+
model_b = st.session_state.compare_models.get("model_b")
|
|
813
|
+
|
|
814
|
+
# Show comparison if both models are selected
|
|
815
|
+
if model_a and model_b:
|
|
816
|
+
# Clear selection buttons
|
|
817
|
+
col1, col2, col3 = st.columns([1, 1, 1])
|
|
818
|
+
with col2:
|
|
819
|
+
if st.button("Clear Comparison", type="secondary", use_container_width=True):
|
|
820
|
+
st.session_state.compare_models = {"model_a": None, "model_b": None}
|
|
821
|
+
st.rerun()
|
|
822
|
+
|
|
823
|
+
render_comparison_view(model_a, model_b)
|
|
824
|
+
return
|
|
825
|
+
|
|
826
|
+
# Model selection interface
|
|
827
|
+
st.markdown("## Compare Two Models")
|
|
828
|
+
st.markdown("Upload two models at once, or select from session history.")
|
|
829
|
+
|
|
830
|
+
# Quick dual upload
|
|
831
|
+
with st.expander("๐ค Quick Upload (both models at once)", expanded=not (model_a or model_b)):
|
|
832
|
+
dual_files = st.file_uploader(
|
|
833
|
+
"Select two ONNX models",
|
|
834
|
+
type=["onnx"],
|
|
835
|
+
accept_multiple_files=True,
|
|
836
|
+
key="dual_upload",
|
|
837
|
+
help="Select exactly 2 models to compare",
|
|
838
|
+
)
|
|
839
|
+
if dual_files:
|
|
840
|
+
if len(dual_files) == 2:
|
|
841
|
+
with st.spinner("Analyzing both models..."):
|
|
842
|
+
result_a = analyze_model_file(dual_files[0])
|
|
843
|
+
result_b = analyze_model_file(dual_files[1])
|
|
844
|
+
if result_a and result_b:
|
|
845
|
+
st.session_state.compare_models["model_a"] = result_a
|
|
846
|
+
st.session_state.compare_models["model_b"] = result_b
|
|
847
|
+
st.rerun()
|
|
848
|
+
elif len(dual_files) == 1:
|
|
849
|
+
st.warning("Please select 2 models to compare")
|
|
850
|
+
else:
|
|
851
|
+
st.warning(f"Please select exactly 2 models (you selected {len(dual_files)})")
|
|
852
|
+
|
|
853
|
+
st.markdown("**Or select individually:**")
|
|
854
|
+
col1, col2 = st.columns(2)
|
|
855
|
+
|
|
856
|
+
with col1:
|
|
857
|
+
st.markdown(
|
|
858
|
+
"""
|
|
859
|
+
<div style="background: linear-gradient(135deg, rgba(16, 185, 129, 0.1) 0%, rgba(5, 150, 105, 0.05) 100%);
|
|
860
|
+
border: 2px dashed rgba(16, 185, 129, 0.3); border-radius: 16px; padding: 2rem; text-align: center;">
|
|
861
|
+
<div style="font-size: 1.5rem; margin-bottom: 0.5rem;">๐ข</div>
|
|
862
|
+
<div style="font-size: 1rem; font-weight: 600; color: #10b981;">Model A</div>
|
|
863
|
+
</div>
|
|
864
|
+
""",
|
|
865
|
+
unsafe_allow_html=True,
|
|
866
|
+
)
|
|
867
|
+
|
|
868
|
+
if model_a:
|
|
869
|
+
st.success(f"Selected: **{model_a.name}**")
|
|
870
|
+
st.caption(model_a.summary)
|
|
871
|
+
if st.button("Clear Model A"):
|
|
872
|
+
st.session_state.compare_models["model_a"] = None
|
|
873
|
+
st.rerun()
|
|
874
|
+
else:
|
|
875
|
+
# Upload option
|
|
876
|
+
file_a = st.file_uploader(
|
|
877
|
+
"Upload Model A",
|
|
878
|
+
type=["onnx"],
|
|
879
|
+
key="compare_file_a",
|
|
880
|
+
help="Upload an ONNX model",
|
|
881
|
+
)
|
|
882
|
+
if file_a:
|
|
883
|
+
with st.spinner("Analyzing Model A..."):
|
|
884
|
+
result = analyze_model_file(file_a)
|
|
885
|
+
if result:
|
|
886
|
+
st.session_state.compare_models["model_a"] = result
|
|
887
|
+
st.rerun()
|
|
888
|
+
|
|
889
|
+
# Or select from history
|
|
890
|
+
if st.session_state.analysis_history:
|
|
891
|
+
st.markdown("**Or select from history:**")
|
|
892
|
+
for i, result in enumerate(st.session_state.analysis_history[:3]):
|
|
893
|
+
if st.button(f"{result.name}", key=f"select_a_{i}"):
|
|
894
|
+
st.session_state.compare_models["model_a"] = result
|
|
895
|
+
st.rerun()
|
|
896
|
+
|
|
897
|
+
with col2:
|
|
898
|
+
st.markdown(
|
|
899
|
+
"""
|
|
900
|
+
<div style="background: linear-gradient(135deg, rgba(99, 102, 241, 0.1) 0%, rgba(79, 70, 229, 0.05) 100%);
|
|
901
|
+
border: 2px dashed rgba(99, 102, 241, 0.3); border-radius: 16px; padding: 2rem; text-align: center;">
|
|
902
|
+
<div style="font-size: 1.5rem; margin-bottom: 0.5rem;">๐ฃ</div>
|
|
903
|
+
<div style="font-size: 1rem; font-weight: 600; color: #6366f1;">Model B</div>
|
|
904
|
+
</div>
|
|
905
|
+
""",
|
|
906
|
+
unsafe_allow_html=True,
|
|
907
|
+
)
|
|
908
|
+
|
|
909
|
+
if model_b:
|
|
910
|
+
st.success(f"Selected: **{model_b.name}**")
|
|
911
|
+
st.caption(model_b.summary)
|
|
912
|
+
if st.button("Clear Model B"):
|
|
913
|
+
st.session_state.compare_models["model_b"] = None
|
|
914
|
+
st.rerun()
|
|
915
|
+
else:
|
|
916
|
+
# Upload option
|
|
917
|
+
file_b = st.file_uploader(
|
|
918
|
+
"Upload Model B",
|
|
919
|
+
type=["onnx"],
|
|
920
|
+
key="compare_file_b",
|
|
921
|
+
help="Upload an ONNX model",
|
|
922
|
+
)
|
|
923
|
+
if file_b:
|
|
924
|
+
with st.spinner("Analyzing Model B..."):
|
|
925
|
+
result = analyze_model_file(file_b)
|
|
926
|
+
if result:
|
|
927
|
+
st.session_state.compare_models["model_b"] = result
|
|
928
|
+
st.rerun()
|
|
929
|
+
|
|
930
|
+
# Or select from history
|
|
931
|
+
if st.session_state.analysis_history:
|
|
932
|
+
st.markdown("**Or select from history:**")
|
|
933
|
+
for i, result in enumerate(st.session_state.analysis_history[:3]):
|
|
934
|
+
if st.button(f"{result.name}", key=f"select_b_{i}"):
|
|
935
|
+
st.session_state.compare_models["model_b"] = result
|
|
936
|
+
st.rerun()
|
|
937
|
+
|
|
938
|
+
# Tips
|
|
939
|
+
if not st.session_state.analysis_history:
|
|
940
|
+
st.info(
|
|
941
|
+
"๐ก **Tip:** First analyze some models in **Analyze** mode. They'll appear in your session history for easy comparison."
|
|
942
|
+
)
|
|
943
|
+
|
|
944
|
+
|
|
945
|
+
def analyze_model_file(uploaded_file) -> AnalysisResult | None:
|
|
946
|
+
"""Analyze an uploaded model file and return the result."""
|
|
947
|
+
from haoline import ModelInspector
|
|
948
|
+
|
|
949
|
+
file_ext = Path(uploaded_file.name).suffix.lower()
|
|
950
|
+
|
|
951
|
+
if file_ext not in [".onnx"]:
|
|
952
|
+
st.error("Only ONNX files are supported in compare mode. Convert your model first.")
|
|
953
|
+
return None
|
|
954
|
+
|
|
955
|
+
try:
|
|
956
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as tmp:
|
|
957
|
+
tmp.write(uploaded_file.getvalue())
|
|
958
|
+
tmp_path = tmp.name
|
|
959
|
+
|
|
960
|
+
inspector = ModelInspector()
|
|
961
|
+
report = inspector.inspect(tmp_path)
|
|
962
|
+
|
|
963
|
+
# Clean up
|
|
964
|
+
Path(tmp_path).unlink(missing_ok=True)
|
|
965
|
+
|
|
966
|
+
# Add to history and return
|
|
967
|
+
result = add_to_history(uploaded_file.name, report, len(uploaded_file.getvalue()))
|
|
968
|
+
return result
|
|
969
|
+
|
|
970
|
+
except Exception as e:
|
|
971
|
+
st.error(f"Error analyzing model: {e}")
|
|
972
|
+
return None
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
def get_hardware_options() -> dict[str, dict]:
|
|
976
|
+
"""Get hardware profile options organized by category."""
|
|
977
|
+
categories = {
|
|
978
|
+
"๐ง Auto": {"auto": {"name": "Auto-detect local GPU", "vram": 0, "tflops": 0}},
|
|
979
|
+
"๐ข Data Center - H100": {},
|
|
980
|
+
"๐ข Data Center - A100": {},
|
|
981
|
+
"๐ข Data Center - Other": {},
|
|
982
|
+
"๐ฎ Consumer - RTX 40 Series": {},
|
|
983
|
+
"๐ฎ Consumer - RTX 30 Series": {},
|
|
984
|
+
"๐ผ Workstation": {},
|
|
985
|
+
"๐ค Edge / Jetson": {},
|
|
986
|
+
"โ๏ธ Cloud Instances": {},
|
|
987
|
+
}
|
|
988
|
+
|
|
989
|
+
for name, profile in HARDWARE_PROFILES.items():
|
|
990
|
+
if profile.device_type != "gpu":
|
|
991
|
+
continue
|
|
992
|
+
|
|
993
|
+
vram_gb = profile.vram_bytes // (1024**3)
|
|
994
|
+
tflops = profile.peak_fp16_tflops or profile.peak_fp32_tflops
|
|
995
|
+
|
|
996
|
+
entry = {
|
|
997
|
+
"name": profile.name,
|
|
998
|
+
"vram": vram_gb,
|
|
999
|
+
"tflops": tflops,
|
|
1000
|
+
}
|
|
1001
|
+
|
|
1002
|
+
# Categorize
|
|
1003
|
+
name_lower = name.lower()
|
|
1004
|
+
if "h100" in name_lower:
|
|
1005
|
+
categories["๐ข Data Center - H100"][name] = entry
|
|
1006
|
+
elif "a100" in name_lower:
|
|
1007
|
+
categories["๐ข Data Center - A100"][name] = entry
|
|
1008
|
+
elif any(x in name_lower for x in ["a10", "l4", "t4", "v100", "a40", "a30"]):
|
|
1009
|
+
categories["๐ข Data Center - Other"][name] = entry
|
|
1010
|
+
elif (
|
|
1011
|
+
"rtx40" in name_lower
|
|
1012
|
+
or "4090" in name_lower
|
|
1013
|
+
or "4080" in name_lower
|
|
1014
|
+
or "4070" in name_lower
|
|
1015
|
+
or "4060" in name_lower
|
|
1016
|
+
):
|
|
1017
|
+
categories["๐ฎ Consumer - RTX 40 Series"][name] = entry
|
|
1018
|
+
elif (
|
|
1019
|
+
"rtx30" in name_lower
|
|
1020
|
+
or "3090" in name_lower
|
|
1021
|
+
or "3080" in name_lower
|
|
1022
|
+
or "3070" in name_lower
|
|
1023
|
+
or "3060" in name_lower
|
|
1024
|
+
):
|
|
1025
|
+
categories["๐ฎ Consumer - RTX 30 Series"][name] = entry
|
|
1026
|
+
elif any(x in name_lower for x in ["rtxa", "a6000", "a5000", "a4000"]):
|
|
1027
|
+
categories["๐ผ Workstation"][name] = entry
|
|
1028
|
+
elif (
|
|
1029
|
+
"jetson" in name_lower
|
|
1030
|
+
or "orin" in name_lower
|
|
1031
|
+
or "xavier" in name_lower
|
|
1032
|
+
or "nano" in name_lower
|
|
1033
|
+
):
|
|
1034
|
+
categories["๐ค Edge / Jetson"][name] = entry
|
|
1035
|
+
elif any(x in name_lower for x in ["aws", "azure", "gcp"]):
|
|
1036
|
+
categories["โ๏ธ Cloud Instances"][name] = entry
|
|
1037
|
+
else:
|
|
1038
|
+
categories["๐ข Data Center - Other"][name] = entry
|
|
1039
|
+
|
|
1040
|
+
# Remove empty categories
|
|
1041
|
+
return {k: v for k, v in categories.items() if v}
|
|
1042
|
+
|
|
1043
|
+
|
|
1044
|
+
def main():
|
|
1045
|
+
# Initialize session state
|
|
1046
|
+
init_session_state()
|
|
1047
|
+
|
|
1048
|
+
# Header
|
|
1049
|
+
st.markdown('<h1 class="main-header">HaoLine ็็บฟ</h1>', unsafe_allow_html=True)
|
|
1050
|
+
st.markdown(
|
|
1051
|
+
'<p class="sub-header">Universal Model Inspector โ See what\'s really inside your neural networks</p>',
|
|
1052
|
+
unsafe_allow_html=True,
|
|
1053
|
+
)
|
|
1054
|
+
|
|
1055
|
+
# Sidebar
|
|
1056
|
+
with st.sidebar:
|
|
1057
|
+
# Mode selector
|
|
1058
|
+
st.markdown("### Mode")
|
|
1059
|
+
mode = st.radio(
|
|
1060
|
+
"Select mode",
|
|
1061
|
+
options=["Analyze", "Compare"],
|
|
1062
|
+
index=0 if st.session_state.current_mode == "analyze" else 1,
|
|
1063
|
+
horizontal=True,
|
|
1064
|
+
label_visibility="collapsed",
|
|
1065
|
+
)
|
|
1066
|
+
st.session_state.current_mode = mode.lower()
|
|
1067
|
+
|
|
1068
|
+
st.markdown("---")
|
|
1069
|
+
|
|
1070
|
+
# Session history
|
|
1071
|
+
if st.session_state.analysis_history:
|
|
1072
|
+
st.markdown("### Recent Analyses")
|
|
1073
|
+
for i, result in enumerate(st.session_state.analysis_history[:5]):
|
|
1074
|
+
time_str = result.timestamp.strftime("%H:%M")
|
|
1075
|
+
col1, col2 = st.columns([3, 1])
|
|
1076
|
+
with col1:
|
|
1077
|
+
st.markdown(
|
|
1078
|
+
f"""
|
|
1079
|
+
<div style="font-size: 0.85rem; color: #f5f5f5; margin-bottom: 0.1rem;">
|
|
1080
|
+
{result.name[:20]}{"..." if len(result.name) > 20 else ""}
|
|
1081
|
+
</div>
|
|
1082
|
+
<div style="font-size: 0.7rem; color: #737373;">
|
|
1083
|
+
{result.summary} ยท {time_str}
|
|
1084
|
+
</div>
|
|
1085
|
+
""",
|
|
1086
|
+
unsafe_allow_html=True,
|
|
1087
|
+
)
|
|
1088
|
+
with col2:
|
|
1089
|
+
if st.session_state.current_mode == "compare":
|
|
1090
|
+
if st.button("A", key=f"hist_a_{i}", help="Set as Model A"):
|
|
1091
|
+
st.session_state.compare_models["model_a"] = result
|
|
1092
|
+
st.rerun()
|
|
1093
|
+
if st.button("B", key=f"hist_b_{i}", help="Set as Model B"):
|
|
1094
|
+
st.session_state.compare_models["model_b"] = result
|
|
1095
|
+
st.rerun()
|
|
1096
|
+
|
|
1097
|
+
if st.button("Clear History", type="secondary"):
|
|
1098
|
+
st.session_state.analysis_history = []
|
|
1099
|
+
st.rerun()
|
|
1100
|
+
|
|
1101
|
+
st.markdown("---")
|
|
1102
|
+
|
|
1103
|
+
st.markdown("### Settings")
|
|
1104
|
+
|
|
1105
|
+
# Hardware selection with categorized picker
|
|
1106
|
+
st.markdown("#### Target Hardware")
|
|
1107
|
+
hardware_categories = get_hardware_options()
|
|
1108
|
+
|
|
1109
|
+
# Search filter
|
|
1110
|
+
search_query = st.text_input(
|
|
1111
|
+
"Search GPUs",
|
|
1112
|
+
placeholder="e.g., RTX 4090, A100, H100...",
|
|
1113
|
+
help="Filter hardware by name",
|
|
1114
|
+
)
|
|
1115
|
+
|
|
1116
|
+
# Build flat list with category info for filtering
|
|
1117
|
+
all_hardware = []
|
|
1118
|
+
for category, profiles in hardware_categories.items():
|
|
1119
|
+
for hw_key, hw_info in profiles.items():
|
|
1120
|
+
display_name = hw_info["name"]
|
|
1121
|
+
if hw_info["vram"] > 0:
|
|
1122
|
+
display_name += f" ({hw_info['vram']}GB"
|
|
1123
|
+
if hw_info["tflops"]:
|
|
1124
|
+
display_name += f", {hw_info['tflops']:.0f} TFLOPS"
|
|
1125
|
+
display_name += ")"
|
|
1126
|
+
all_hardware.append(
|
|
1127
|
+
{
|
|
1128
|
+
"key": hw_key,
|
|
1129
|
+
"display": display_name,
|
|
1130
|
+
"category": category,
|
|
1131
|
+
"vram": hw_info["vram"],
|
|
1132
|
+
"tflops": hw_info["tflops"],
|
|
1133
|
+
}
|
|
1134
|
+
)
|
|
1135
|
+
|
|
1136
|
+
# Filter by search
|
|
1137
|
+
if search_query:
|
|
1138
|
+
filtered_hardware = [
|
|
1139
|
+
h
|
|
1140
|
+
for h in all_hardware
|
|
1141
|
+
if search_query.lower() in h["display"].lower()
|
|
1142
|
+
or search_query.lower() in h["key"].lower()
|
|
1143
|
+
]
|
|
1144
|
+
else:
|
|
1145
|
+
filtered_hardware = all_hardware
|
|
1146
|
+
|
|
1147
|
+
# Category filter
|
|
1148
|
+
available_categories = sorted({h["category"] for h in filtered_hardware})
|
|
1149
|
+
if len(available_categories) > 1:
|
|
1150
|
+
selected_category = st.selectbox(
|
|
1151
|
+
"Category",
|
|
1152
|
+
options=["All Categories"] + available_categories,
|
|
1153
|
+
index=0,
|
|
1154
|
+
)
|
|
1155
|
+
if selected_category != "All Categories":
|
|
1156
|
+
filtered_hardware = [
|
|
1157
|
+
h for h in filtered_hardware if h["category"] == selected_category
|
|
1158
|
+
]
|
|
1159
|
+
|
|
1160
|
+
# Final hardware dropdown
|
|
1161
|
+
if filtered_hardware:
|
|
1162
|
+
hw_options = {h["key"]: h["display"] for h in filtered_hardware}
|
|
1163
|
+
default_key = "auto" if "auto" in hw_options else list(hw_options.keys())[0]
|
|
1164
|
+
selected_hardware = st.selectbox(
|
|
1165
|
+
"Select GPU",
|
|
1166
|
+
options=list(hw_options.keys()),
|
|
1167
|
+
format_func=lambda x: hw_options[x],
|
|
1168
|
+
index=(
|
|
1169
|
+
list(hw_options.keys()).index(default_key) if default_key in hw_options else 0
|
|
1170
|
+
),
|
|
1171
|
+
)
|
|
1172
|
+
else:
|
|
1173
|
+
st.warning("No GPUs match your search. Try a different query.")
|
|
1174
|
+
selected_hardware = "auto"
|
|
1175
|
+
|
|
1176
|
+
# Show selected hardware specs
|
|
1177
|
+
if selected_hardware != "auto":
|
|
1178
|
+
try:
|
|
1179
|
+
profile = HARDWARE_PROFILES.get(selected_hardware)
|
|
1180
|
+
if profile:
|
|
1181
|
+
st.markdown(
|
|
1182
|
+
f"""
|
|
1183
|
+
<div style="background: #1f1f1f;
|
|
1184
|
+
border: 1px solid rgba(16, 185, 129, 0.2);
|
|
1185
|
+
padding: 0.75rem 1rem; border-radius: 10px; margin-top: 0.5rem;">
|
|
1186
|
+
<div style="font-size: 0.85rem; color: #10b981; font-weight: 600;">
|
|
1187
|
+
{profile.name}
|
|
1188
|
+
</div>
|
|
1189
|
+
<div style="font-size: 0.75rem; color: #737373; margin-top: 0.25rem; font-family: 'SF Mono', monospace;">
|
|
1190
|
+
{profile.vram_bytes // (1024**3)} GB VRAM ยท {profile.peak_fp16_tflops or "โ"} TF
|
|
1191
|
+
</div>
|
|
1192
|
+
</div>
|
|
1193
|
+
""",
|
|
1194
|
+
unsafe_allow_html=True,
|
|
1195
|
+
)
|
|
1196
|
+
except Exception:
|
|
1197
|
+
pass
|
|
1198
|
+
|
|
1199
|
+
# Analysis options
|
|
1200
|
+
st.markdown("### Analysis Options")
|
|
1201
|
+
include_graph = st.checkbox(
|
|
1202
|
+
"Interactive Graph", value=True, help="Include zoomable D3.js network visualization"
|
|
1203
|
+
)
|
|
1204
|
+
st.checkbox("Charts", value=True, help="Include matplotlib visualizations")
|
|
1205
|
+
|
|
1206
|
+
# LLM Summary
|
|
1207
|
+
st.markdown("### AI Summary")
|
|
1208
|
+
|
|
1209
|
+
# Check for API key in environment variable first
|
|
1210
|
+
env_api_key = os.environ.get("OPENAI_API_KEY", "")
|
|
1211
|
+
|
|
1212
|
+
enable_llm = st.checkbox(
|
|
1213
|
+
"Generate AI Summary",
|
|
1214
|
+
value=st.session_state.get("enable_llm", False),
|
|
1215
|
+
help="Requires OpenAI API key",
|
|
1216
|
+
key="enable_llm_checkbox",
|
|
1217
|
+
)
|
|
1218
|
+
# Store in session state for persistence across reruns
|
|
1219
|
+
st.session_state["enable_llm"] = enable_llm
|
|
1220
|
+
|
|
1221
|
+
if enable_llm:
|
|
1222
|
+
if env_api_key:
|
|
1223
|
+
# Environment variable takes precedence
|
|
1224
|
+
st.session_state["openai_api_key_value"] = env_api_key
|
|
1225
|
+
st.success("API key loaded from environment")
|
|
1226
|
+
else:
|
|
1227
|
+
# Manual entry
|
|
1228
|
+
api_key_input = st.text_input(
|
|
1229
|
+
"OpenAI API Key",
|
|
1230
|
+
type="password",
|
|
1231
|
+
help="Starts with 'sk-'",
|
|
1232
|
+
key="openai_api_key_input",
|
|
1233
|
+
)
|
|
1234
|
+
|
|
1235
|
+
# Update session state when key is entered
|
|
1236
|
+
if api_key_input:
|
|
1237
|
+
st.session_state["openai_api_key_value"] = api_key_input
|
|
1238
|
+
|
|
1239
|
+
# Show current status
|
|
1240
|
+
current_key = st.session_state.get("openai_api_key_value", "")
|
|
1241
|
+
if current_key:
|
|
1242
|
+
# Validate key format
|
|
1243
|
+
if current_key.startswith("sk-"):
|
|
1244
|
+
st.success(f"API Key Set ({current_key[:7]}...{current_key[-4:]})")
|
|
1245
|
+
else:
|
|
1246
|
+
st.error("Invalid key format (should start with 'sk-')")
|
|
1247
|
+
else:
|
|
1248
|
+
st.warning("No API key - enter above to enable AI summaries")
|
|
1249
|
+
|
|
1250
|
+
st.caption("Key is used once per analysis, never stored permanently.")
|
|
1251
|
+
|
|
1252
|
+
# Privacy notice
|
|
1253
|
+
st.markdown("---")
|
|
1254
|
+
st.markdown(
|
|
1255
|
+
'<div class="privacy-notice">'
|
|
1256
|
+
"<strong>Privacy:</strong> Models and API keys are processed in memory only. "
|
|
1257
|
+
"Nothing is stored. For sensitive work, self-host with <code>pip install haoline[web]</code> "
|
|
1258
|
+
"and run <code>streamlit run streamlit_app.py</code> locally."
|
|
1259
|
+
"</div>",
|
|
1260
|
+
unsafe_allow_html=True,
|
|
1261
|
+
)
|
|
1262
|
+
|
|
1263
|
+
st.markdown(f"---\n*HaoLine v{__version__}*")
|
|
1264
|
+
|
|
1265
|
+
# Main content - different views based on mode
|
|
1266
|
+
if st.session_state.current_mode == "compare":
|
|
1267
|
+
render_compare_mode()
|
|
1268
|
+
return
|
|
1269
|
+
|
|
1270
|
+
# Analyze mode
|
|
1271
|
+
col1, col2, col3 = st.columns([1, 2, 1])
|
|
1272
|
+
|
|
1273
|
+
with col2:
|
|
1274
|
+
# File upload - support multiple formats
|
|
1275
|
+
uploaded_file = st.file_uploader(
|
|
1276
|
+
"Upload your model",
|
|
1277
|
+
type=["onnx", "pt", "pth", "safetensors"],
|
|
1278
|
+
help="ONNX (recommended), PyTorch (.pt/.pth), or SafeTensors",
|
|
1279
|
+
)
|
|
1280
|
+
|
|
1281
|
+
if uploaded_file is None:
|
|
1282
|
+
st.markdown(
|
|
1283
|
+
"""
|
|
1284
|
+
<div style="text-align: center; padding: 1rem 2rem; margin-top: -0.5rem;">
|
|
1285
|
+
<p style="font-size: 0.9rem; margin-bottom: 0.75rem; color: #a3a3a3;">
|
|
1286
|
+
<span style="color: #10b981; font-weight: 600;">ONNX</span> โ
|
|
1287
|
+
<span style="color: #a3a3a3;">PyTorch</span> โป
|
|
1288
|
+
<span style="color: #a3a3a3;">SafeTensors</span> โป
|
|
1289
|
+
</p>
|
|
1290
|
+
</div>
|
|
1291
|
+
""",
|
|
1292
|
+
unsafe_allow_html=True,
|
|
1293
|
+
)
|
|
1294
|
+
|
|
1295
|
+
# Demo model options
|
|
1296
|
+
st.markdown(
|
|
1297
|
+
"""<div style="text-align: center; margin: 1rem 0 0.5rem 0;">
|
|
1298
|
+
<span style="font-size: 0.9rem; color: #a3a3a3; font-weight: 500;">
|
|
1299
|
+
No model handy? Try a demo:
|
|
1300
|
+
</span>
|
|
1301
|
+
</div>""",
|
|
1302
|
+
unsafe_allow_html=True,
|
|
1303
|
+
)
|
|
1304
|
+
|
|
1305
|
+
# Demo model buttons in a row
|
|
1306
|
+
demo_cols = st.columns(len(DEMO_MODELS))
|
|
1307
|
+
for i, (key, info) in enumerate(DEMO_MODELS.items()):
|
|
1308
|
+
with demo_cols[i]:
|
|
1309
|
+
if st.button(
|
|
1310
|
+
f"{info['name']}\n({info['size']})",
|
|
1311
|
+
key=f"demo_{key}",
|
|
1312
|
+
use_container_width=True,
|
|
1313
|
+
help=info["description"],
|
|
1314
|
+
):
|
|
1315
|
+
with st.spinner(f"Downloading {info['name']}..."):
|
|
1316
|
+
try:
|
|
1317
|
+
demo_bytes, demo_name = download_demo_model(key)
|
|
1318
|
+
st.session_state.demo_model = (demo_bytes, demo_name)
|
|
1319
|
+
st.rerun()
|
|
1320
|
+
except Exception as e:
|
|
1321
|
+
st.error(f"Failed to download: {e}")
|
|
1322
|
+
|
|
1323
|
+
st.markdown(
|
|
1324
|
+
"""<div style="text-align: center; margin-top: 1rem;">
|
|
1325
|
+
<p style="font-size: 0.8rem; color: #737373;">
|
|
1326
|
+
Or browse the
|
|
1327
|
+
<a href="https://huggingface.co/models?library=onnx" target="_blank"
|
|
1328
|
+
style="color: #10b981; text-decoration: none;">HuggingFace ONNX Hub</a>
|
|
1329
|
+
</p>
|
|
1330
|
+
</div>""",
|
|
1331
|
+
unsafe_allow_html=True,
|
|
1332
|
+
)
|
|
1333
|
+
|
|
1334
|
+
# Handle demo model if requested
|
|
1335
|
+
demo_model_bytes = None
|
|
1336
|
+
demo_model_name = None
|
|
1337
|
+
if st.session_state.demo_model is not None:
|
|
1338
|
+
demo_model_bytes, demo_model_name = st.session_state.demo_model
|
|
1339
|
+
st.session_state.demo_model = None # Clear after use
|
|
1340
|
+
|
|
1341
|
+
# Analysis - either uploaded file or demo model
|
|
1342
|
+
if uploaded_file is not None or demo_model_bytes is not None:
|
|
1343
|
+
if demo_model_bytes is not None:
|
|
1344
|
+
# Use demo model
|
|
1345
|
+
file_ext = ".onnx"
|
|
1346
|
+
file_name = demo_model_name
|
|
1347
|
+
file_bytes = demo_model_bytes
|
|
1348
|
+
else:
|
|
1349
|
+
# Use uploaded file
|
|
1350
|
+
file_ext = Path(uploaded_file.name).suffix.lower()
|
|
1351
|
+
file_name = uploaded_file.name
|
|
1352
|
+
file_bytes = uploaded_file.getvalue()
|
|
1353
|
+
|
|
1354
|
+
tmp_path = None
|
|
1355
|
+
|
|
1356
|
+
# Check if format needs conversion
|
|
1357
|
+
if file_ext in [".pt", ".pth"]:
|
|
1358
|
+
# Check if PyTorch is available
|
|
1359
|
+
try:
|
|
1360
|
+
import torch
|
|
1361
|
+
|
|
1362
|
+
pytorch_available = True
|
|
1363
|
+
except ImportError:
|
|
1364
|
+
pytorch_available = False
|
|
1365
|
+
|
|
1366
|
+
if pytorch_available:
|
|
1367
|
+
st.info(
|
|
1368
|
+
"**PyTorch model detected** โ We'll try to convert it to ONNX for analysis."
|
|
1369
|
+
)
|
|
1370
|
+
|
|
1371
|
+
# Input shape is required for conversion
|
|
1372
|
+
input_shape_str = st.text_input(
|
|
1373
|
+
"Input Shape (required)",
|
|
1374
|
+
placeholder="1,3,224,224",
|
|
1375
|
+
help="Batch, Channels, Height, Width for image models. E.g., 1,3,224,224",
|
|
1376
|
+
)
|
|
1377
|
+
|
|
1378
|
+
if not input_shape_str:
|
|
1379
|
+
st.warning("โ ๏ธ Please enter the input shape to convert and analyze this model.")
|
|
1380
|
+
st.caption(
|
|
1381
|
+
"**Common shapes:** `1,3,224,224` (ResNet), `1,3,384,384` (ViT-Large), `1,768` (BERT tokens)"
|
|
1382
|
+
)
|
|
1383
|
+
st.stop()
|
|
1384
|
+
|
|
1385
|
+
# Try conversion
|
|
1386
|
+
try:
|
|
1387
|
+
input_shape = tuple(int(x.strip()) for x in input_shape_str.split(","))
|
|
1388
|
+
except ValueError:
|
|
1389
|
+
st.error(
|
|
1390
|
+
f"Invalid input shape: `{input_shape_str}`. Use comma-separated integers like `1,3,224,224`"
|
|
1391
|
+
)
|
|
1392
|
+
st.stop()
|
|
1393
|
+
|
|
1394
|
+
# Save uploaded file
|
|
1395
|
+
with tempfile.NamedTemporaryFile(suffix=file_ext, delete=False) as pt_tmp:
|
|
1396
|
+
pt_tmp.write(file_bytes)
|
|
1397
|
+
pt_path = pt_tmp.name
|
|
1398
|
+
|
|
1399
|
+
# Attempt conversion
|
|
1400
|
+
with st.spinner("Converting PyTorch โ ONNX..."):
|
|
1401
|
+
try:
|
|
1402
|
+
# Try TorchScript first
|
|
1403
|
+
try:
|
|
1404
|
+
model = torch.jit.load(pt_path, map_location="cpu")
|
|
1405
|
+
is_ultralytics = False
|
|
1406
|
+
except Exception:
|
|
1407
|
+
loaded = torch.load(pt_path, map_location="cpu", weights_only=False)
|
|
1408
|
+
is_ultralytics = False
|
|
1409
|
+
|
|
1410
|
+
if isinstance(loaded, dict):
|
|
1411
|
+
# Check for Ultralytics YOLO format
|
|
1412
|
+
if "model" in loaded and hasattr(loaded.get("model"), "forward"):
|
|
1413
|
+
is_ultralytics = True
|
|
1414
|
+
else:
|
|
1415
|
+
st.error(
|
|
1416
|
+
"""
|
|
1417
|
+
**State dict detected** โ This file contains only weights, not the model architecture.
|
|
1418
|
+
|
|
1419
|
+
To analyze, you need the full model. Export to ONNX from your training code:
|
|
1420
|
+
```python
|
|
1421
|
+
torch.onnx.export(model, dummy_input, "model.onnx")
|
|
1422
|
+
```
|
|
1423
|
+
"""
|
|
1424
|
+
)
|
|
1425
|
+
st.stop()
|
|
1426
|
+
|
|
1427
|
+
if not is_ultralytics:
|
|
1428
|
+
model = loaded
|
|
1429
|
+
|
|
1430
|
+
# Handle Ultralytics models with their native export
|
|
1431
|
+
if is_ultralytics:
|
|
1432
|
+
try:
|
|
1433
|
+
from ultralytics import YOLO
|
|
1434
|
+
|
|
1435
|
+
st.info("๐ Ultralytics YOLO detected โ using native export...")
|
|
1436
|
+
yolo_model = YOLO(pt_path)
|
|
1437
|
+
onnx_tmp = tempfile.NamedTemporaryFile(suffix=".onnx", delete=False)
|
|
1438
|
+
yolo_model.export(
|
|
1439
|
+
format="onnx",
|
|
1440
|
+
imgsz=input_shape[2] if len(input_shape) >= 3 else 640,
|
|
1441
|
+
simplify=True,
|
|
1442
|
+
)
|
|
1443
|
+
# Ultralytics saves next to .pt, move to our temp
|
|
1444
|
+
import shutil
|
|
1445
|
+
|
|
1446
|
+
default_onnx = Path(pt_path).with_suffix(".onnx")
|
|
1447
|
+
if default_onnx.exists():
|
|
1448
|
+
shutil.move(str(default_onnx), onnx_tmp.name)
|
|
1449
|
+
tmp_path = onnx_tmp.name
|
|
1450
|
+
st.success("โ
YOLO conversion successful!")
|
|
1451
|
+
except ImportError:
|
|
1452
|
+
st.error(
|
|
1453
|
+
"**Ultralytics required** โ Install with: `pip install ultralytics`"
|
|
1454
|
+
)
|
|
1455
|
+
st.stop()
|
|
1456
|
+
else:
|
|
1457
|
+
model.eval()
|
|
1458
|
+
dummy_input = torch.randn(*input_shape)
|
|
1459
|
+
|
|
1460
|
+
# Convert to ONNX
|
|
1461
|
+
onnx_tmp = tempfile.NamedTemporaryFile(suffix=".onnx", delete=False)
|
|
1462
|
+
torch.onnx.export(
|
|
1463
|
+
model,
|
|
1464
|
+
dummy_input,
|
|
1465
|
+
onnx_tmp.name,
|
|
1466
|
+
opset_version=17,
|
|
1467
|
+
input_names=["input"],
|
|
1468
|
+
output_names=["output"],
|
|
1469
|
+
dynamic_axes={"input": {0: "batch"}, "output": {0: "batch"}},
|
|
1470
|
+
)
|
|
1471
|
+
tmp_path = onnx_tmp.name
|
|
1472
|
+
st.success("โ
Conversion successful!")
|
|
1473
|
+
|
|
1474
|
+
except Exception as e:
|
|
1475
|
+
st.error(
|
|
1476
|
+
f"""
|
|
1477
|
+
**Conversion failed:** {str(e)[:200]}
|
|
1478
|
+
|
|
1479
|
+
Try exporting to ONNX directly from your training code, or use the CLI:
|
|
1480
|
+
```bash
|
|
1481
|
+
haoline --from-pytorch model.pt --input-shape {input_shape_str} --html
|
|
1482
|
+
```
|
|
1483
|
+
"""
|
|
1484
|
+
)
|
|
1485
|
+
st.stop()
|
|
1486
|
+
else:
|
|
1487
|
+
st.warning(
|
|
1488
|
+
f"""
|
|
1489
|
+
**PyTorch model detected**, but PyTorch is not installed in this environment.
|
|
1490
|
+
|
|
1491
|
+
**Options:**
|
|
1492
|
+
1. Use the CLI locally (supports conversion):
|
|
1493
|
+
```bash
|
|
1494
|
+
pip install haoline torch
|
|
1495
|
+
haoline --from-pytorch {file_name} --input-shape 1,3,224,224 --html
|
|
1496
|
+
```
|
|
1497
|
+
|
|
1498
|
+
2. Convert to ONNX first in your code:
|
|
1499
|
+
```python
|
|
1500
|
+
torch.onnx.export(model, dummy_input, "model.onnx")
|
|
1501
|
+
```
|
|
1502
|
+
"""
|
|
1503
|
+
)
|
|
1504
|
+
st.stop()
|
|
1505
|
+
|
|
1506
|
+
elif file_ext == ".safetensors":
|
|
1507
|
+
st.warning(
|
|
1508
|
+
"""
|
|
1509
|
+
**SafeTensors format detected** โ This format contains only weights, not architecture.
|
|
1510
|
+
|
|
1511
|
+
To analyze, export to ONNX from your training code. If using HuggingFace:
|
|
1512
|
+
```python
|
|
1513
|
+
from optimum.exporters.onnx import main_export
|
|
1514
|
+
main_export("model-name", output="model.onnx")
|
|
1515
|
+
```
|
|
1516
|
+
"""
|
|
1517
|
+
)
|
|
1518
|
+
st.stop()
|
|
1519
|
+
|
|
1520
|
+
# Save ONNX to temp file (if not already set by conversion)
|
|
1521
|
+
if tmp_path is None:
|
|
1522
|
+
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as tmp:
|
|
1523
|
+
tmp.write(file_bytes)
|
|
1524
|
+
tmp_path = tmp.name
|
|
1525
|
+
|
|
1526
|
+
try:
|
|
1527
|
+
with st.spinner("Analyzing model architecture..."):
|
|
1528
|
+
# Run analysis
|
|
1529
|
+
inspector = ModelInspector()
|
|
1530
|
+
report = inspector.inspect(tmp_path)
|
|
1531
|
+
|
|
1532
|
+
# Apply hardware estimates
|
|
1533
|
+
if selected_hardware == "auto":
|
|
1534
|
+
profile = detect_local_hardware()
|
|
1535
|
+
else:
|
|
1536
|
+
profile = get_profile(selected_hardware)
|
|
1537
|
+
|
|
1538
|
+
if (
|
|
1539
|
+
profile
|
|
1540
|
+
and report.param_counts
|
|
1541
|
+
and report.flop_counts
|
|
1542
|
+
and report.memory_estimates
|
|
1543
|
+
):
|
|
1544
|
+
estimator = HardwareEstimator()
|
|
1545
|
+
report.hardware_profile = profile
|
|
1546
|
+
report.hardware_estimates = estimator.estimate(
|
|
1547
|
+
model_params=report.param_counts.total,
|
|
1548
|
+
model_flops=report.flop_counts.total,
|
|
1549
|
+
peak_activation_bytes=report.memory_estimates.peak_activation_bytes,
|
|
1550
|
+
hardware=profile,
|
|
1551
|
+
)
|
|
1552
|
+
|
|
1553
|
+
# Save to session history
|
|
1554
|
+
add_to_history(file_name, report, len(file_bytes))
|
|
1555
|
+
|
|
1556
|
+
# Display results
|
|
1557
|
+
st.markdown("---")
|
|
1558
|
+
st.markdown("## Analysis Results")
|
|
1559
|
+
|
|
1560
|
+
# Metrics cards
|
|
1561
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
1562
|
+
|
|
1563
|
+
with col1:
|
|
1564
|
+
params = report.param_counts.total if report.param_counts else 0
|
|
1565
|
+
st.metric("Parameters", format_number(params))
|
|
1566
|
+
|
|
1567
|
+
with col2:
|
|
1568
|
+
flops = report.flop_counts.total if report.flop_counts else 0
|
|
1569
|
+
st.metric("FLOPs", format_number(flops))
|
|
1570
|
+
|
|
1571
|
+
with col3:
|
|
1572
|
+
memory = (
|
|
1573
|
+
report.memory_estimates.peak_activation_bytes
|
|
1574
|
+
if report.memory_estimates
|
|
1575
|
+
else 0
|
|
1576
|
+
)
|
|
1577
|
+
st.metric("Memory", format_bytes(memory))
|
|
1578
|
+
|
|
1579
|
+
with col4:
|
|
1580
|
+
st.metric("Operators", str(report.graph_summary.num_nodes))
|
|
1581
|
+
|
|
1582
|
+
# Tabs for different views
|
|
1583
|
+
tab1, tab2, tab3, tab4 = st.tabs(
|
|
1584
|
+
["Overview", "Interactive Graph", "Details", "Export"]
|
|
1585
|
+
)
|
|
1586
|
+
|
|
1587
|
+
with tab1:
|
|
1588
|
+
st.markdown("### Model Information")
|
|
1589
|
+
|
|
1590
|
+
info_col1, info_col2 = st.columns(2)
|
|
1591
|
+
|
|
1592
|
+
with info_col1:
|
|
1593
|
+
st.markdown(
|
|
1594
|
+
f"""
|
|
1595
|
+
| Property | Value |
|
|
1596
|
+
|----------|-------|
|
|
1597
|
+
| **Model** | `{file_name}` |
|
|
1598
|
+
| **IR Version** | {report.metadata.ir_version} |
|
|
1599
|
+
| **Producer** | {report.metadata.producer_name or "Unknown"} |
|
|
1600
|
+
| **Opset** | {list(report.metadata.opsets.values())[0] if report.metadata.opsets else "Unknown"} |
|
|
1601
|
+
"""
|
|
1602
|
+
)
|
|
1603
|
+
|
|
1604
|
+
with info_col2:
|
|
1605
|
+
params_total = report.param_counts.total if report.param_counts else 0
|
|
1606
|
+
flops_total = report.flop_counts.total if report.flop_counts else 0
|
|
1607
|
+
peak_mem = (
|
|
1608
|
+
report.memory_estimates.peak_activation_bytes
|
|
1609
|
+
if report.memory_estimates
|
|
1610
|
+
else 0
|
|
1611
|
+
)
|
|
1612
|
+
model_size = (
|
|
1613
|
+
report.memory_estimates.model_size_bytes
|
|
1614
|
+
if report.memory_estimates
|
|
1615
|
+
else 0
|
|
1616
|
+
)
|
|
1617
|
+
|
|
1618
|
+
st.markdown(
|
|
1619
|
+
f"""
|
|
1620
|
+
| Metric | Value |
|
|
1621
|
+
|--------|-------|
|
|
1622
|
+
| **Total Parameters** | {params_total:,} |
|
|
1623
|
+
| **Total FLOPs** | {flops_total:,} |
|
|
1624
|
+
| **Peak Memory** | {format_bytes(peak_mem)} |
|
|
1625
|
+
| **Model Size** | {format_bytes(model_size)} |
|
|
1626
|
+
"""
|
|
1627
|
+
)
|
|
1628
|
+
|
|
1629
|
+
# AI Summary (if enabled and API key provided)
|
|
1630
|
+
llm_enabled = st.session_state.get("enable_llm", False)
|
|
1631
|
+
llm_api_key = st.session_state.get("openai_api_key_value", "")
|
|
1632
|
+
|
|
1633
|
+
if llm_enabled:
|
|
1634
|
+
st.markdown("### AI Analysis")
|
|
1635
|
+
|
|
1636
|
+
if not llm_api_key:
|
|
1637
|
+
st.warning(
|
|
1638
|
+
"AI Summary is enabled but no API key is set. "
|
|
1639
|
+
"Enter your OpenAI API key in the sidebar."
|
|
1640
|
+
)
|
|
1641
|
+
elif not llm_api_key.startswith("sk-"):
|
|
1642
|
+
st.error(
|
|
1643
|
+
f"Invalid API key format. Keys should start with 'sk-'. "
|
|
1644
|
+
f"Got: {llm_api_key[:10]}..."
|
|
1645
|
+
)
|
|
1646
|
+
else:
|
|
1647
|
+
with st.spinner("Generating AI summary..."):
|
|
1648
|
+
try:
|
|
1649
|
+
from haoline.llm_summarizer import LLMSummarizer
|
|
1650
|
+
|
|
1651
|
+
summarizer = LLMSummarizer(api_key=llm_api_key)
|
|
1652
|
+
llm_result = summarizer.summarize(report)
|
|
1653
|
+
|
|
1654
|
+
if llm_result and llm_result.success:
|
|
1655
|
+
# Short summary
|
|
1656
|
+
if llm_result.short_summary:
|
|
1657
|
+
st.markdown(
|
|
1658
|
+
f"""<div style="background: linear-gradient(135deg, rgba(16, 185, 129, 0.1) 0%, rgba(5, 150, 105, 0.05) 100%);
|
|
1659
|
+
border-left: 4px solid #10b981; border-radius: 8px; padding: 1rem; margin: 1rem 0;">
|
|
1660
|
+
<p style="font-weight: 600; color: #10b981; margin-bottom: 0.5rem;">AI Summary</p>
|
|
1661
|
+
<p style="color: #e5e5e5; line-height: 1.6;">{llm_result.short_summary}</p>
|
|
1662
|
+
</div>""",
|
|
1663
|
+
unsafe_allow_html=True,
|
|
1664
|
+
)
|
|
1665
|
+
|
|
1666
|
+
# Detailed analysis
|
|
1667
|
+
if llm_result.detailed_summary:
|
|
1668
|
+
with st.expander("Detailed Analysis", expanded=True):
|
|
1669
|
+
st.markdown(llm_result.detailed_summary)
|
|
1670
|
+
|
|
1671
|
+
# Show model/tokens info
|
|
1672
|
+
st.caption(
|
|
1673
|
+
f"Generated by {llm_result.model_used} "
|
|
1674
|
+
f"({llm_result.tokens_used} tokens)"
|
|
1675
|
+
)
|
|
1676
|
+
elif llm_result and llm_result.error_message:
|
|
1677
|
+
st.error(f"AI summary failed: {llm_result.error_message}")
|
|
1678
|
+
else:
|
|
1679
|
+
st.warning("AI summary generation returned empty result.")
|
|
1680
|
+
|
|
1681
|
+
except ImportError:
|
|
1682
|
+
st.error(
|
|
1683
|
+
"LLM module not available. Install with: `pip install haoline[llm]`"
|
|
1684
|
+
)
|
|
1685
|
+
except Exception as e:
|
|
1686
|
+
st.error(f"AI summary generation failed: {e}")
|
|
1687
|
+
|
|
1688
|
+
# Universal IR Summary (if available)
|
|
1689
|
+
if hasattr(report, "universal_graph") and report.universal_graph:
|
|
1690
|
+
with st.expander("Universal IR View", expanded=False):
|
|
1691
|
+
ir = report.universal_graph
|
|
1692
|
+
st.markdown(
|
|
1693
|
+
f"""
|
|
1694
|
+
<div style="background: linear-gradient(135deg, rgba(99, 102, 241, 0.1) 0%, rgba(79, 70, 229, 0.05) 100%);
|
|
1695
|
+
border-left: 4px solid #6366f1; border-radius: 8px; padding: 1rem; margin: 0.5rem 0;">
|
|
1696
|
+
<p style="font-weight: 600; color: #6366f1; margin-bottom: 0.5rem;">Format-Agnostic Graph</p>
|
|
1697
|
+
<p style="color: #e5e5e5; line-height: 1.6;">
|
|
1698
|
+
<strong>Source:</strong> {ir.metadata.source_format.value.upper()}<br>
|
|
1699
|
+
<strong>Nodes:</strong> {ir.num_nodes}<br>
|
|
1700
|
+
<strong>Tensors:</strong> {len(ir.tensors)}<br>
|
|
1701
|
+
<strong>Parameters:</strong> {ir.total_parameters:,}<br>
|
|
1702
|
+
<strong>Weight Size:</strong> {ir.total_weight_bytes / (1024 * 1024):.2f} MB
|
|
1703
|
+
</p>
|
|
1704
|
+
</div>
|
|
1705
|
+
""",
|
|
1706
|
+
unsafe_allow_html=True,
|
|
1707
|
+
)
|
|
1708
|
+
|
|
1709
|
+
# Op type distribution from IR
|
|
1710
|
+
op_counts = ir.op_type_counts
|
|
1711
|
+
if op_counts:
|
|
1712
|
+
st.markdown("**Operation Types (from IR):**")
|
|
1713
|
+
top_ops = sorted(
|
|
1714
|
+
op_counts.items(), key=lambda x: x[1], reverse=True
|
|
1715
|
+
)[:10]
|
|
1716
|
+
for op, count in top_ops:
|
|
1717
|
+
st.text(f" {op}: {count}")
|
|
1718
|
+
|
|
1719
|
+
# Operator distribution
|
|
1720
|
+
if report.graph_summary.op_type_counts:
|
|
1721
|
+
st.markdown("### Operator Distribution")
|
|
1722
|
+
|
|
1723
|
+
import pandas as pd
|
|
1724
|
+
|
|
1725
|
+
op_data = pd.DataFrame(
|
|
1726
|
+
[
|
|
1727
|
+
{"Operator": op, "Count": count}
|
|
1728
|
+
for op, count in sorted(
|
|
1729
|
+
report.graph_summary.op_type_counts.items(),
|
|
1730
|
+
key=lambda x: x[1],
|
|
1731
|
+
reverse=True,
|
|
1732
|
+
)
|
|
1733
|
+
]
|
|
1734
|
+
)
|
|
1735
|
+
st.bar_chart(op_data.set_index("Operator"))
|
|
1736
|
+
|
|
1737
|
+
# Hardware estimates
|
|
1738
|
+
if report.hardware_estimates:
|
|
1739
|
+
st.markdown("### Hardware Estimates")
|
|
1740
|
+
hw = report.hardware_estimates
|
|
1741
|
+
|
|
1742
|
+
hw_col1, hw_col2, hw_col3 = st.columns(3)
|
|
1743
|
+
|
|
1744
|
+
with hw_col1:
|
|
1745
|
+
st.metric("VRAM Required", format_bytes(hw.vram_required_bytes))
|
|
1746
|
+
|
|
1747
|
+
with hw_col2:
|
|
1748
|
+
fits = "Yes" if hw.fits_in_vram else "No"
|
|
1749
|
+
st.metric("Fits in VRAM", fits)
|
|
1750
|
+
|
|
1751
|
+
with hw_col3:
|
|
1752
|
+
st.metric("Theoretical Latency", f"{hw.theoretical_latency_ms:.2f} ms")
|
|
1753
|
+
|
|
1754
|
+
with tab2:
|
|
1755
|
+
if include_graph:
|
|
1756
|
+
st.markdown("### Interactive Architecture Graph")
|
|
1757
|
+
st.caption(
|
|
1758
|
+
"๐ฑ๏ธ Scroll to zoom | Drag to pan | Click nodes to expand/collapse | Use sidebar controls"
|
|
1759
|
+
)
|
|
1760
|
+
|
|
1761
|
+
try:
|
|
1762
|
+
# Build the full interactive D3.js graph
|
|
1763
|
+
import logging
|
|
1764
|
+
|
|
1765
|
+
graph_logger = logging.getLogger("haoline.graph")
|
|
1766
|
+
|
|
1767
|
+
# Load graph info
|
|
1768
|
+
loader = ONNXGraphLoader(logger=graph_logger)
|
|
1769
|
+
_, graph_info = loader.load(tmp_path)
|
|
1770
|
+
|
|
1771
|
+
# Detect patterns/blocks
|
|
1772
|
+
pattern_analyzer = PatternAnalyzer(logger=graph_logger)
|
|
1773
|
+
blocks = pattern_analyzer.group_into_blocks(graph_info)
|
|
1774
|
+
|
|
1775
|
+
# Analyze edges
|
|
1776
|
+
edge_analyzer = EdgeAnalyzer(logger=graph_logger)
|
|
1777
|
+
edge_result = edge_analyzer.analyze(graph_info)
|
|
1778
|
+
|
|
1779
|
+
# Build hierarchical graph
|
|
1780
|
+
builder = HierarchicalGraphBuilder(logger=graph_logger)
|
|
1781
|
+
model_name = Path(file_name).stem
|
|
1782
|
+
hier_graph = builder.build(graph_info, blocks, model_name)
|
|
1783
|
+
|
|
1784
|
+
# Generate the full D3.js HTML
|
|
1785
|
+
# The HTML template auto-detects embedded mode (iframe) and:
|
|
1786
|
+
# - Collapses sidebar for more graph space
|
|
1787
|
+
# - Auto-fits the view
|
|
1788
|
+
graph_html = generate_graph_html(
|
|
1789
|
+
hier_graph,
|
|
1790
|
+
edge_result,
|
|
1791
|
+
title=model_name,
|
|
1792
|
+
model_size_bytes=len(file_bytes),
|
|
1793
|
+
)
|
|
1794
|
+
|
|
1795
|
+
# Embed with generous height for comfortable viewing
|
|
1796
|
+
components.html(graph_html, height=800, scrolling=False)
|
|
1797
|
+
|
|
1798
|
+
except Exception as e:
|
|
1799
|
+
st.warning(f"Could not generate interactive graph: {e}")
|
|
1800
|
+
# Fallback to block list
|
|
1801
|
+
if report.detected_blocks:
|
|
1802
|
+
st.markdown("#### Detected Architecture Blocks")
|
|
1803
|
+
for i, block in enumerate(report.detected_blocks[:15]):
|
|
1804
|
+
with st.expander(
|
|
1805
|
+
f"{block.block_type}: {block.name}", expanded=(i < 3)
|
|
1806
|
+
):
|
|
1807
|
+
st.write(f"**Type:** {block.block_type}")
|
|
1808
|
+
st.write(f"**Nodes:** {len(block.nodes)}")
|
|
1809
|
+
else:
|
|
1810
|
+
st.info(
|
|
1811
|
+
"Enable 'Interactive Graph' in the sidebar to see the architecture visualization."
|
|
1812
|
+
)
|
|
1813
|
+
|
|
1814
|
+
with tab3:
|
|
1815
|
+
st.markdown("### Detected Patterns")
|
|
1816
|
+
|
|
1817
|
+
if report.detected_blocks:
|
|
1818
|
+
for block in report.detected_blocks[:10]: # Limit to first 10
|
|
1819
|
+
with st.expander(f"{block.block_type}: {block.name}"):
|
|
1820
|
+
st.write(
|
|
1821
|
+
f"**Nodes:** {', '.join(block.nodes[:5])}{'...' if len(block.nodes) > 5 else ''}"
|
|
1822
|
+
)
|
|
1823
|
+
else:
|
|
1824
|
+
st.info("No architectural patterns detected.")
|
|
1825
|
+
|
|
1826
|
+
st.markdown("### Risk Signals")
|
|
1827
|
+
|
|
1828
|
+
if report.risk_signals:
|
|
1829
|
+
for risk in report.risk_signals:
|
|
1830
|
+
severity_color = {"high": "๐ด", "medium": "๐ก", "low": "๐ข"}.get(
|
|
1831
|
+
risk.severity, "โช"
|
|
1832
|
+
)
|
|
1833
|
+
|
|
1834
|
+
st.markdown(f"{severity_color} **{risk.id}** ({risk.severity})")
|
|
1835
|
+
st.caption(risk.description)
|
|
1836
|
+
else:
|
|
1837
|
+
st.success("No risk signals detected!")
|
|
1838
|
+
|
|
1839
|
+
with tab4:
|
|
1840
|
+
model_name = file_name.replace(".onnx", "")
|
|
1841
|
+
|
|
1842
|
+
st.markdown(
|
|
1843
|
+
"""
|
|
1844
|
+
<div style="margin-bottom: 1.5rem;">
|
|
1845
|
+
<h3 style="color: #f5f5f5; margin-bottom: 0.25rem;">Export Reports</h3>
|
|
1846
|
+
<p style="color: #737373; font-size: 0.9rem; margin: 0;">
|
|
1847
|
+
Download your analysis in various formats
|
|
1848
|
+
</p>
|
|
1849
|
+
</div>
|
|
1850
|
+
""",
|
|
1851
|
+
unsafe_allow_html=True,
|
|
1852
|
+
)
|
|
1853
|
+
|
|
1854
|
+
# Generate all export data
|
|
1855
|
+
json_data = report.to_json()
|
|
1856
|
+
md_data = report.to_markdown()
|
|
1857
|
+
html_data = report.to_html()
|
|
1858
|
+
|
|
1859
|
+
# Try to generate PDF
|
|
1860
|
+
pdf_data = None
|
|
1861
|
+
try:
|
|
1862
|
+
from haoline.pdf_generator import (
|
|
1863
|
+
PDFGenerator,
|
|
1864
|
+
)
|
|
1865
|
+
from haoline.pdf_generator import (
|
|
1866
|
+
is_available as pdf_available,
|
|
1867
|
+
)
|
|
1868
|
+
|
|
1869
|
+
if pdf_available():
|
|
1870
|
+
import tempfile as tf_pdf
|
|
1871
|
+
|
|
1872
|
+
pdf_gen = PDFGenerator()
|
|
1873
|
+
with tf_pdf.NamedTemporaryFile(suffix=".pdf", delete=False) as pdf_tmp:
|
|
1874
|
+
if pdf_gen.generate_from_html(html_data, pdf_tmp.name):
|
|
1875
|
+
with open(pdf_tmp.name, "rb") as f:
|
|
1876
|
+
pdf_data = f.read()
|
|
1877
|
+
except Exception:
|
|
1878
|
+
pass
|
|
1879
|
+
|
|
1880
|
+
# Custom styled export grid
|
|
1881
|
+
st.markdown(
|
|
1882
|
+
"""
|
|
1883
|
+
<style>
|
|
1884
|
+
.export-grid {
|
|
1885
|
+
display: grid;
|
|
1886
|
+
grid-template-columns: repeat(2, 1fr);
|
|
1887
|
+
gap: 1rem;
|
|
1888
|
+
margin-top: 1rem;
|
|
1889
|
+
}
|
|
1890
|
+
.export-card {
|
|
1891
|
+
background: #1a1a1a;
|
|
1892
|
+
border: 1px solid rgba(255,255,255,0.1);
|
|
1893
|
+
border-radius: 12px;
|
|
1894
|
+
padding: 1.25rem;
|
|
1895
|
+
transition: all 0.2s ease;
|
|
1896
|
+
}
|
|
1897
|
+
.export-card:hover {
|
|
1898
|
+
border-color: #10b981;
|
|
1899
|
+
background: #1f1f1f;
|
|
1900
|
+
}
|
|
1901
|
+
.export-icon {
|
|
1902
|
+
font-size: 1.5rem;
|
|
1903
|
+
margin-bottom: 0.5rem;
|
|
1904
|
+
}
|
|
1905
|
+
.export-title {
|
|
1906
|
+
color: #f5f5f5;
|
|
1907
|
+
font-weight: 600;
|
|
1908
|
+
font-size: 1rem;
|
|
1909
|
+
margin-bottom: 0.25rem;
|
|
1910
|
+
}
|
|
1911
|
+
.export-desc {
|
|
1912
|
+
color: #737373;
|
|
1913
|
+
font-size: 0.8rem;
|
|
1914
|
+
line-height: 1.4;
|
|
1915
|
+
}
|
|
1916
|
+
</style>
|
|
1917
|
+
""",
|
|
1918
|
+
unsafe_allow_html=True,
|
|
1919
|
+
)
|
|
1920
|
+
|
|
1921
|
+
col1, col2 = st.columns(2)
|
|
1922
|
+
|
|
1923
|
+
with col1:
|
|
1924
|
+
st.markdown(
|
|
1925
|
+
"""
|
|
1926
|
+
<div class="export-card">
|
|
1927
|
+
<div class="export-icon">๐</div>
|
|
1928
|
+
<div class="export-title">HTML Report</div>
|
|
1929
|
+
<div class="export-desc">Interactive report with D3.js graph visualization</div>
|
|
1930
|
+
</div>
|
|
1931
|
+
""",
|
|
1932
|
+
unsafe_allow_html=True,
|
|
1933
|
+
)
|
|
1934
|
+
st.download_button(
|
|
1935
|
+
label="Download HTML",
|
|
1936
|
+
data=html_data,
|
|
1937
|
+
file_name=f"{model_name}_report.html",
|
|
1938
|
+
mime="text/html",
|
|
1939
|
+
use_container_width=True,
|
|
1940
|
+
)
|
|
1941
|
+
|
|
1942
|
+
with col2:
|
|
1943
|
+
st.markdown(
|
|
1944
|
+
"""
|
|
1945
|
+
<div class="export-card">
|
|
1946
|
+
<div class="export-icon">๐</div>
|
|
1947
|
+
<div class="export-title">JSON Data</div>
|
|
1948
|
+
<div class="export-desc">Raw analysis data for programmatic use</div>
|
|
1949
|
+
</div>
|
|
1950
|
+
""",
|
|
1951
|
+
unsafe_allow_html=True,
|
|
1952
|
+
)
|
|
1953
|
+
st.download_button(
|
|
1954
|
+
label="Download JSON",
|
|
1955
|
+
data=json_data,
|
|
1956
|
+
file_name=f"{model_name}_report.json",
|
|
1957
|
+
mime="application/json",
|
|
1958
|
+
use_container_width=True,
|
|
1959
|
+
)
|
|
1960
|
+
|
|
1961
|
+
col3, col4 = st.columns(2)
|
|
1962
|
+
|
|
1963
|
+
with col3:
|
|
1964
|
+
st.markdown(
|
|
1965
|
+
"""
|
|
1966
|
+
<div class="export-card">
|
|
1967
|
+
<div class="export-icon">๐</div>
|
|
1968
|
+
<div class="export-title">Markdown</div>
|
|
1969
|
+
<div class="export-desc">Text report for docs, READMEs, or wikis</div>
|
|
1970
|
+
</div>
|
|
1971
|
+
""",
|
|
1972
|
+
unsafe_allow_html=True,
|
|
1973
|
+
)
|
|
1974
|
+
st.download_button(
|
|
1975
|
+
label="Download Markdown",
|
|
1976
|
+
data=md_data,
|
|
1977
|
+
file_name=f"{model_name}_report.md",
|
|
1978
|
+
mime="text/markdown",
|
|
1979
|
+
use_container_width=True,
|
|
1980
|
+
)
|
|
1981
|
+
|
|
1982
|
+
with col4:
|
|
1983
|
+
if pdf_data:
|
|
1984
|
+
st.markdown(
|
|
1985
|
+
"""
|
|
1986
|
+
<div class="export-card">
|
|
1987
|
+
<div class="export-icon">๐</div>
|
|
1988
|
+
<div class="export-title">PDF Report</div>
|
|
1989
|
+
<div class="export-desc">Print-ready document for sharing</div>
|
|
1990
|
+
</div>
|
|
1991
|
+
""",
|
|
1992
|
+
unsafe_allow_html=True,
|
|
1993
|
+
)
|
|
1994
|
+
st.download_button(
|
|
1995
|
+
label="Download PDF",
|
|
1996
|
+
data=pdf_data,
|
|
1997
|
+
file_name=f"{model_name}_report.pdf",
|
|
1998
|
+
mime="application/pdf",
|
|
1999
|
+
use_container_width=True,
|
|
2000
|
+
)
|
|
2001
|
+
else:
|
|
2002
|
+
st.markdown(
|
|
2003
|
+
"""
|
|
2004
|
+
<div class="export-card" style="opacity: 0.5;">
|
|
2005
|
+
<div class="export-icon">๐</div>
|
|
2006
|
+
<div class="export-title">PDF Report</div>
|
|
2007
|
+
<div class="export-desc">Requires Playwright ยท Use CLI for PDF export</div>
|
|
2008
|
+
</div>
|
|
2009
|
+
""",
|
|
2010
|
+
unsafe_allow_html=True,
|
|
2011
|
+
)
|
|
2012
|
+
st.button("PDF unavailable", disabled=True, use_container_width=True)
|
|
2013
|
+
|
|
2014
|
+
except Exception as e:
|
|
2015
|
+
st.error(f"Error analyzing model: {e}")
|
|
2016
|
+
st.exception(e)
|
|
2017
|
+
|
|
2018
|
+
finally:
|
|
2019
|
+
# Clean up temp file
|
|
2020
|
+
Path(tmp_path).unlink(missing_ok=True)
|
|
2021
|
+
|
|
2022
|
+
|
|
2023
|
+
if __name__ == "__main__":
|
|
2024
|
+
main()
|