haoline 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- haoline/.streamlit/config.toml +10 -0
- haoline/__init__.py +248 -0
- haoline/analyzer.py +935 -0
- haoline/cli.py +2712 -0
- haoline/compare.py +811 -0
- haoline/compare_visualizations.py +1564 -0
- haoline/edge_analysis.py +525 -0
- haoline/eval/__init__.py +131 -0
- haoline/eval/adapters.py +844 -0
- haoline/eval/cli.py +390 -0
- haoline/eval/comparison.py +542 -0
- haoline/eval/deployment.py +633 -0
- haoline/eval/schemas.py +833 -0
- haoline/examples/__init__.py +15 -0
- haoline/examples/basic_inspection.py +74 -0
- haoline/examples/compare_models.py +117 -0
- haoline/examples/hardware_estimation.py +78 -0
- haoline/format_adapters.py +1001 -0
- haoline/formats/__init__.py +123 -0
- haoline/formats/coreml.py +250 -0
- haoline/formats/gguf.py +483 -0
- haoline/formats/openvino.py +255 -0
- haoline/formats/safetensors.py +273 -0
- haoline/formats/tflite.py +369 -0
- haoline/hardware.py +2307 -0
- haoline/hierarchical_graph.py +462 -0
- haoline/html_export.py +1573 -0
- haoline/layer_summary.py +769 -0
- haoline/llm_summarizer.py +465 -0
- haoline/op_icons.py +618 -0
- haoline/operational_profiling.py +1492 -0
- haoline/patterns.py +1116 -0
- haoline/pdf_generator.py +265 -0
- haoline/privacy.py +250 -0
- haoline/pydantic_models.py +241 -0
- haoline/report.py +1923 -0
- haoline/report_sections.py +539 -0
- haoline/risks.py +521 -0
- haoline/schema.py +523 -0
- haoline/streamlit_app.py +2024 -0
- haoline/tests/__init__.py +4 -0
- haoline/tests/conftest.py +123 -0
- haoline/tests/test_analyzer.py +868 -0
- haoline/tests/test_compare_visualizations.py +293 -0
- haoline/tests/test_edge_analysis.py +243 -0
- haoline/tests/test_eval.py +604 -0
- haoline/tests/test_format_adapters.py +460 -0
- haoline/tests/test_hardware.py +237 -0
- haoline/tests/test_hardware_recommender.py +90 -0
- haoline/tests/test_hierarchical_graph.py +326 -0
- haoline/tests/test_html_export.py +180 -0
- haoline/tests/test_layer_summary.py +428 -0
- haoline/tests/test_llm_patterns.py +540 -0
- haoline/tests/test_llm_summarizer.py +339 -0
- haoline/tests/test_patterns.py +774 -0
- haoline/tests/test_pytorch.py +327 -0
- haoline/tests/test_report.py +383 -0
- haoline/tests/test_risks.py +398 -0
- haoline/tests/test_schema.py +417 -0
- haoline/tests/test_tensorflow.py +380 -0
- haoline/tests/test_visualizations.py +316 -0
- haoline/universal_ir.py +856 -0
- haoline/visualizations.py +1086 -0
- haoline/visualize_yolo.py +44 -0
- haoline/web.py +110 -0
- haoline-0.3.0.dist-info/METADATA +471 -0
- haoline-0.3.0.dist-info/RECORD +70 -0
- haoline-0.3.0.dist-info/WHEEL +4 -0
- haoline-0.3.0.dist-info/entry_points.txt +5 -0
- haoline-0.3.0.dist-info/licenses/LICENSE +22 -0
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
# Copyright (c) 2025 HaoLine Contributors
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
TFLite format reader.
|
|
6
|
+
|
|
7
|
+
TensorFlow Lite models use FlatBuffer format. This reader extracts
|
|
8
|
+
basic metadata without requiring TensorFlow dependencies.
|
|
9
|
+
|
|
10
|
+
For full analysis, use tflite-runtime or convert to ONNX first.
|
|
11
|
+
|
|
12
|
+
Reference: https://www.tensorflow.org/lite/guide
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import struct
|
|
18
|
+
from dataclasses import dataclass, field
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
# TFLite FlatBuffer identifier
|
|
23
|
+
TFLITE_IDENTIFIER = b"TFL3"
|
|
24
|
+
|
|
25
|
+
# TFLite tensor types
|
|
26
|
+
TFLITE_TYPES: dict[int, tuple[str, int]] = {
|
|
27
|
+
0: ("FLOAT32", 4),
|
|
28
|
+
1: ("FLOAT16", 2),
|
|
29
|
+
2: ("INT32", 4),
|
|
30
|
+
3: ("UINT8", 1),
|
|
31
|
+
4: ("INT64", 8),
|
|
32
|
+
5: ("STRING", 0), # Variable
|
|
33
|
+
6: ("BOOL", 1),
|
|
34
|
+
7: ("INT16", 2),
|
|
35
|
+
8: ("COMPLEX64", 8),
|
|
36
|
+
9: ("INT8", 1),
|
|
37
|
+
10: ("FLOAT64", 8),
|
|
38
|
+
11: ("COMPLEX128", 16),
|
|
39
|
+
12: ("UINT64", 8),
|
|
40
|
+
13: ("RESOURCE", 0),
|
|
41
|
+
14: ("VARIANT", 0),
|
|
42
|
+
15: ("UINT32", 4),
|
|
43
|
+
16: ("UINT16", 2),
|
|
44
|
+
17: ("INT4", 0), # Packed
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
# TFLite builtin operators
|
|
48
|
+
TFLITE_BUILTINS: dict[int, str] = {
|
|
49
|
+
0: "ADD",
|
|
50
|
+
1: "AVERAGE_POOL_2D",
|
|
51
|
+
2: "CONCATENATION",
|
|
52
|
+
3: "CONV_2D",
|
|
53
|
+
4: "DEPTHWISE_CONV_2D",
|
|
54
|
+
5: "DEPTH_TO_SPACE",
|
|
55
|
+
6: "DEQUANTIZE",
|
|
56
|
+
7: "EMBEDDING_LOOKUP",
|
|
57
|
+
8: "FLOOR",
|
|
58
|
+
9: "FULLY_CONNECTED",
|
|
59
|
+
10: "HASHTABLE_LOOKUP",
|
|
60
|
+
11: "L2_NORMALIZATION",
|
|
61
|
+
12: "L2_POOL_2D",
|
|
62
|
+
13: "LOCAL_RESPONSE_NORMALIZATION",
|
|
63
|
+
14: "LOGISTIC",
|
|
64
|
+
15: "LSH_PROJECTION",
|
|
65
|
+
16: "LSTM",
|
|
66
|
+
17: "MAX_POOL_2D",
|
|
67
|
+
18: "MUL",
|
|
68
|
+
19: "RELU",
|
|
69
|
+
20: "RELU_N1_TO_1",
|
|
70
|
+
21: "RELU6",
|
|
71
|
+
22: "RESHAPE",
|
|
72
|
+
23: "RESIZE_BILINEAR",
|
|
73
|
+
24: "RNN",
|
|
74
|
+
25: "SOFTMAX",
|
|
75
|
+
26: "SPACE_TO_DEPTH",
|
|
76
|
+
27: "SVDF",
|
|
77
|
+
28: "TANH",
|
|
78
|
+
29: "CONCAT_EMBEDDINGS",
|
|
79
|
+
30: "SKIP_GRAM",
|
|
80
|
+
31: "CALL",
|
|
81
|
+
32: "CUSTOM",
|
|
82
|
+
33: "EMBEDDING_LOOKUP_SPARSE",
|
|
83
|
+
34: "PAD",
|
|
84
|
+
35: "UNIDIRECTIONAL_SEQUENCE_RNN",
|
|
85
|
+
36: "GATHER",
|
|
86
|
+
37: "BATCH_TO_SPACE_ND",
|
|
87
|
+
38: "SPACE_TO_BATCH_ND",
|
|
88
|
+
39: "TRANSPOSE",
|
|
89
|
+
40: "MEAN",
|
|
90
|
+
41: "SUB",
|
|
91
|
+
42: "DIV",
|
|
92
|
+
43: "SQUEEZE",
|
|
93
|
+
44: "UNIDIRECTIONAL_SEQUENCE_LSTM",
|
|
94
|
+
45: "STRIDED_SLICE",
|
|
95
|
+
46: "BIDIRECTIONAL_SEQUENCE_RNN",
|
|
96
|
+
47: "EXP",
|
|
97
|
+
48: "TOPK_V2",
|
|
98
|
+
49: "SPLIT",
|
|
99
|
+
50: "LOG_SOFTMAX",
|
|
100
|
+
# ... many more, but these are the common ones
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@dataclass
|
|
105
|
+
class TFLiteTensorInfo:
|
|
106
|
+
"""Information about a TFLite tensor."""
|
|
107
|
+
|
|
108
|
+
name: str
|
|
109
|
+
shape: tuple[int, ...]
|
|
110
|
+
type_id: int
|
|
111
|
+
buffer_idx: int
|
|
112
|
+
quantization: dict[str, Any] | None = None
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def type_name(self) -> str:
|
|
116
|
+
"""Human-readable type name."""
|
|
117
|
+
return TFLITE_TYPES.get(self.type_id, ("UNKNOWN", 4))[0]
|
|
118
|
+
|
|
119
|
+
@property
|
|
120
|
+
def bytes_per_element(self) -> int:
|
|
121
|
+
"""Bytes per element."""
|
|
122
|
+
return TFLITE_TYPES.get(self.type_id, ("UNKNOWN", 4))[1]
|
|
123
|
+
|
|
124
|
+
@property
|
|
125
|
+
def n_elements(self) -> int:
|
|
126
|
+
"""Total number of elements."""
|
|
127
|
+
result = 1
|
|
128
|
+
for d in self.shape:
|
|
129
|
+
result *= d
|
|
130
|
+
return result
|
|
131
|
+
|
|
132
|
+
@property
|
|
133
|
+
def size_bytes(self) -> int:
|
|
134
|
+
"""Estimated size in bytes."""
|
|
135
|
+
bpe = self.bytes_per_element
|
|
136
|
+
if bpe == 0:
|
|
137
|
+
return 0 # Variable size types
|
|
138
|
+
return self.n_elements * bpe
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
@dataclass
|
|
142
|
+
class TFLiteOperatorInfo:
|
|
143
|
+
"""Information about a TFLite operator."""
|
|
144
|
+
|
|
145
|
+
opcode_index: int
|
|
146
|
+
builtin_code: int
|
|
147
|
+
inputs: list[int]
|
|
148
|
+
outputs: list[int]
|
|
149
|
+
|
|
150
|
+
@property
|
|
151
|
+
def op_name(self) -> str:
|
|
152
|
+
"""Human-readable operator name."""
|
|
153
|
+
return TFLITE_BUILTINS.get(self.builtin_code, f"CUSTOM_{self.builtin_code}")
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
@dataclass
|
|
157
|
+
class TFLiteInfo:
|
|
158
|
+
"""Parsed TFLite file information."""
|
|
159
|
+
|
|
160
|
+
path: Path
|
|
161
|
+
version: int
|
|
162
|
+
description: str
|
|
163
|
+
tensors: list[TFLiteTensorInfo] = field(default_factory=list)
|
|
164
|
+
operators: list[TFLiteOperatorInfo] = field(default_factory=list)
|
|
165
|
+
inputs: list[int] = field(default_factory=list)
|
|
166
|
+
outputs: list[int] = field(default_factory=list)
|
|
167
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
168
|
+
|
|
169
|
+
@property
|
|
170
|
+
def total_params(self) -> int:
|
|
171
|
+
"""Total parameter count (non-input tensors)."""
|
|
172
|
+
input_set = set(self.inputs)
|
|
173
|
+
return sum(t.n_elements for i, t in enumerate(self.tensors) if i not in input_set)
|
|
174
|
+
|
|
175
|
+
@property
|
|
176
|
+
def total_size_bytes(self) -> int:
|
|
177
|
+
"""Total model size in bytes."""
|
|
178
|
+
return sum(t.size_bytes for t in self.tensors)
|
|
179
|
+
|
|
180
|
+
@property
|
|
181
|
+
def op_counts(self) -> dict[str, int]:
|
|
182
|
+
"""Count of operators by type."""
|
|
183
|
+
counts: dict[str, int] = {}
|
|
184
|
+
for op in self.operators:
|
|
185
|
+
name = op.op_name
|
|
186
|
+
counts[name] = counts.get(name, 0) + 1
|
|
187
|
+
return counts
|
|
188
|
+
|
|
189
|
+
@property
|
|
190
|
+
def type_breakdown(self) -> dict[str, int]:
|
|
191
|
+
"""Count of tensors by type."""
|
|
192
|
+
breakdown: dict[str, int] = {}
|
|
193
|
+
for t in self.tensors:
|
|
194
|
+
type_name = t.type_name
|
|
195
|
+
breakdown[type_name] = breakdown.get(type_name, 0) + 1
|
|
196
|
+
return breakdown
|
|
197
|
+
|
|
198
|
+
@property
|
|
199
|
+
def is_quantized(self) -> bool:
|
|
200
|
+
"""Check if model uses quantized types."""
|
|
201
|
+
quant_types = {"INT8", "UINT8", "INT16", "INT4"}
|
|
202
|
+
return any(t.type_name in quant_types for t in self.tensors)
|
|
203
|
+
|
|
204
|
+
def to_dict(self) -> dict[str, Any]:
|
|
205
|
+
"""Convert to dictionary for JSON serialization."""
|
|
206
|
+
return {
|
|
207
|
+
"path": str(self.path),
|
|
208
|
+
"version": self.version,
|
|
209
|
+
"description": self.description,
|
|
210
|
+
"tensor_count": len(self.tensors),
|
|
211
|
+
"operator_count": len(self.operators),
|
|
212
|
+
"total_params": self.total_params,
|
|
213
|
+
"total_size_bytes": self.total_size_bytes,
|
|
214
|
+
"is_quantized": self.is_quantized,
|
|
215
|
+
"op_counts": self.op_counts,
|
|
216
|
+
"type_breakdown": self.type_breakdown,
|
|
217
|
+
"inputs": self.inputs,
|
|
218
|
+
"outputs": self.outputs,
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
class TFLiteReader:
|
|
223
|
+
"""
|
|
224
|
+
Reader for TFLite format files.
|
|
225
|
+
|
|
226
|
+
This provides basic metadata extraction using pure Python.
|
|
227
|
+
For full tensor access, use tflite-runtime.
|
|
228
|
+
"""
|
|
229
|
+
|
|
230
|
+
def __init__(self, path: str | Path):
|
|
231
|
+
"""
|
|
232
|
+
Initialize reader with file path.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
path: Path to the TFLite file.
|
|
236
|
+
"""
|
|
237
|
+
self.path = Path(path)
|
|
238
|
+
if not self.path.exists():
|
|
239
|
+
raise FileNotFoundError(f"TFLite file not found: {self.path}")
|
|
240
|
+
|
|
241
|
+
def read(self) -> TFLiteInfo:
|
|
242
|
+
"""
|
|
243
|
+
Read and parse the TFLite file.
|
|
244
|
+
|
|
245
|
+
This uses tflite-runtime if available, otherwise falls back
|
|
246
|
+
to basic FlatBuffer parsing.
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
TFLiteInfo with parsed metadata.
|
|
250
|
+
"""
|
|
251
|
+
# Try tflite-runtime first
|
|
252
|
+
try:
|
|
253
|
+
return self._read_with_interpreter()
|
|
254
|
+
except ImportError:
|
|
255
|
+
pass
|
|
256
|
+
|
|
257
|
+
# Fall back to basic parsing
|
|
258
|
+
return self._read_basic()
|
|
259
|
+
|
|
260
|
+
def _read_with_interpreter(self) -> TFLiteInfo:
|
|
261
|
+
"""Read using TFLite Interpreter."""
|
|
262
|
+
from tflite_runtime.interpreter import Interpreter
|
|
263
|
+
|
|
264
|
+
interpreter = Interpreter(model_path=str(self.path))
|
|
265
|
+
interpreter.allocate_tensors()
|
|
266
|
+
|
|
267
|
+
# Get tensor details
|
|
268
|
+
tensor_details = interpreter.get_tensor_details()
|
|
269
|
+
tensors = []
|
|
270
|
+
for td in tensor_details:
|
|
271
|
+
quant = None
|
|
272
|
+
if "quantization" in td:
|
|
273
|
+
quant = {
|
|
274
|
+
"scale": td["quantization"][0],
|
|
275
|
+
"zero_point": td["quantization"][1],
|
|
276
|
+
}
|
|
277
|
+
tensors.append(
|
|
278
|
+
TFLiteTensorInfo(
|
|
279
|
+
name=td["name"],
|
|
280
|
+
shape=tuple(td["shape"]),
|
|
281
|
+
type_id=td["dtype"],
|
|
282
|
+
buffer_idx=td.get("buffer_idx", 0),
|
|
283
|
+
quantization=quant,
|
|
284
|
+
)
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
# Get input/output indices
|
|
288
|
+
input_details = interpreter.get_input_details()
|
|
289
|
+
output_details = interpreter.get_output_details()
|
|
290
|
+
inputs = [d["index"] for d in input_details]
|
|
291
|
+
outputs = [d["index"] for d in output_details]
|
|
292
|
+
|
|
293
|
+
return TFLiteInfo(
|
|
294
|
+
path=self.path,
|
|
295
|
+
version=3, # TFL3
|
|
296
|
+
description="",
|
|
297
|
+
tensors=tensors,
|
|
298
|
+
operators=[], # Not easily accessible via interpreter
|
|
299
|
+
inputs=inputs,
|
|
300
|
+
outputs=outputs,
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
def _read_basic(self) -> TFLiteInfo:
|
|
304
|
+
"""Basic FlatBuffer parsing without tflite-runtime."""
|
|
305
|
+
with open(self.path, "rb") as f:
|
|
306
|
+
data = f.read()
|
|
307
|
+
|
|
308
|
+
# Verify file identifier (at offset 4-8)
|
|
309
|
+
if len(data) < 8:
|
|
310
|
+
raise ValueError("File too small to be a valid TFLite model")
|
|
311
|
+
|
|
312
|
+
identifier = data[4:8]
|
|
313
|
+
if identifier != TFLITE_IDENTIFIER:
|
|
314
|
+
raise ValueError(
|
|
315
|
+
f"Not a TFLite file: expected {TFLITE_IDENTIFIER!r}, got {identifier!r}"
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# FlatBuffer root table offset
|
|
319
|
+
root_offset = struct.unpack("<I", data[0:4])[0]
|
|
320
|
+
|
|
321
|
+
# This is a simplified parser - full FlatBuffer parsing is complex
|
|
322
|
+
# We extract what we can from the structure
|
|
323
|
+
|
|
324
|
+
return TFLiteInfo(
|
|
325
|
+
path=self.path,
|
|
326
|
+
version=3,
|
|
327
|
+
description="TFLite model (basic parsing - install tflite-runtime for full details)",
|
|
328
|
+
tensors=[],
|
|
329
|
+
operators=[],
|
|
330
|
+
inputs=[],
|
|
331
|
+
outputs=[],
|
|
332
|
+
metadata={"file_size": len(data), "root_offset": root_offset},
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
def is_tflite_file(path: str | Path) -> bool:
|
|
337
|
+
"""
|
|
338
|
+
Check if a file is a valid TFLite file.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
path: Path to check.
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
True if the file is a TFLite model.
|
|
345
|
+
"""
|
|
346
|
+
path = Path(path)
|
|
347
|
+
if not path.exists() or not path.is_file():
|
|
348
|
+
return False
|
|
349
|
+
|
|
350
|
+
try:
|
|
351
|
+
with open(path, "rb") as f:
|
|
352
|
+
# Read enough for identifier check
|
|
353
|
+
data = f.read(8)
|
|
354
|
+
if len(data) < 8:
|
|
355
|
+
return False
|
|
356
|
+
identifier = data[4:8]
|
|
357
|
+
return identifier == TFLITE_IDENTIFIER
|
|
358
|
+
except Exception:
|
|
359
|
+
return False
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def is_available() -> bool:
|
|
363
|
+
"""Check if tflite-runtime is available."""
|
|
364
|
+
try:
|
|
365
|
+
from tflite_runtime.interpreter import Interpreter # noqa: F401
|
|
366
|
+
|
|
367
|
+
return True
|
|
368
|
+
except ImportError:
|
|
369
|
+
return False
|