haoline 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- haoline/.streamlit/config.toml +10 -0
- haoline/__init__.py +248 -0
- haoline/analyzer.py +935 -0
- haoline/cli.py +2712 -0
- haoline/compare.py +811 -0
- haoline/compare_visualizations.py +1564 -0
- haoline/edge_analysis.py +525 -0
- haoline/eval/__init__.py +131 -0
- haoline/eval/adapters.py +844 -0
- haoline/eval/cli.py +390 -0
- haoline/eval/comparison.py +542 -0
- haoline/eval/deployment.py +633 -0
- haoline/eval/schemas.py +833 -0
- haoline/examples/__init__.py +15 -0
- haoline/examples/basic_inspection.py +74 -0
- haoline/examples/compare_models.py +117 -0
- haoline/examples/hardware_estimation.py +78 -0
- haoline/format_adapters.py +1001 -0
- haoline/formats/__init__.py +123 -0
- haoline/formats/coreml.py +250 -0
- haoline/formats/gguf.py +483 -0
- haoline/formats/openvino.py +255 -0
- haoline/formats/safetensors.py +273 -0
- haoline/formats/tflite.py +369 -0
- haoline/hardware.py +2307 -0
- haoline/hierarchical_graph.py +462 -0
- haoline/html_export.py +1573 -0
- haoline/layer_summary.py +769 -0
- haoline/llm_summarizer.py +465 -0
- haoline/op_icons.py +618 -0
- haoline/operational_profiling.py +1492 -0
- haoline/patterns.py +1116 -0
- haoline/pdf_generator.py +265 -0
- haoline/privacy.py +250 -0
- haoline/pydantic_models.py +241 -0
- haoline/report.py +1923 -0
- haoline/report_sections.py +539 -0
- haoline/risks.py +521 -0
- haoline/schema.py +523 -0
- haoline/streamlit_app.py +2024 -0
- haoline/tests/__init__.py +4 -0
- haoline/tests/conftest.py +123 -0
- haoline/tests/test_analyzer.py +868 -0
- haoline/tests/test_compare_visualizations.py +293 -0
- haoline/tests/test_edge_analysis.py +243 -0
- haoline/tests/test_eval.py +604 -0
- haoline/tests/test_format_adapters.py +460 -0
- haoline/tests/test_hardware.py +237 -0
- haoline/tests/test_hardware_recommender.py +90 -0
- haoline/tests/test_hierarchical_graph.py +326 -0
- haoline/tests/test_html_export.py +180 -0
- haoline/tests/test_layer_summary.py +428 -0
- haoline/tests/test_llm_patterns.py +540 -0
- haoline/tests/test_llm_summarizer.py +339 -0
- haoline/tests/test_patterns.py +774 -0
- haoline/tests/test_pytorch.py +327 -0
- haoline/tests/test_report.py +383 -0
- haoline/tests/test_risks.py +398 -0
- haoline/tests/test_schema.py +417 -0
- haoline/tests/test_tensorflow.py +380 -0
- haoline/tests/test_visualizations.py +316 -0
- haoline/universal_ir.py +856 -0
- haoline/visualizations.py +1086 -0
- haoline/visualize_yolo.py +44 -0
- haoline/web.py +110 -0
- haoline-0.3.0.dist-info/METADATA +471 -0
- haoline-0.3.0.dist-info/RECORD +70 -0
- haoline-0.3.0.dist-info/WHEEL +4 -0
- haoline-0.3.0.dist-info/entry_points.txt +5 -0
- haoline-0.3.0.dist-info/licenses/LICENSE +22 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# Copyright (c) 2025 HaoLine Contributors
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
HaoLine Example Scripts
|
|
6
|
+
|
|
7
|
+
This package contains example scripts demonstrating common use cases:
|
|
8
|
+
|
|
9
|
+
- basic_inspection.py: Analyze a single model and export JSON
|
|
10
|
+
- compare_models.py: Compare multiple model variants (FP32/FP16/INT8)
|
|
11
|
+
- hardware_estimation.py: Estimate performance on different GPUs
|
|
12
|
+
|
|
13
|
+
Run from the tools/python directory:
|
|
14
|
+
python -m util.haoline.examples.basic_inspection model.onnx
|
|
15
|
+
"""
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# Copyright (c) 2025 HaoLine Contributors
|
|
3
|
+
# SPDX-License-Identifier: MIT
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
Example: Basic Model Inspection
|
|
7
|
+
|
|
8
|
+
This script demonstrates how to use HaoLine to inspect a model
|
|
9
|
+
and generate reports programmatically (without CLI).
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
python basic_inspection.py model.onnx
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import sys
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
from haoline import ModelInspector
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def main():
|
|
22
|
+
if len(sys.argv) < 2:
|
|
23
|
+
print("Usage: python basic_inspection.py <model.onnx>")
|
|
24
|
+
sys.exit(1)
|
|
25
|
+
|
|
26
|
+
model_path = Path(sys.argv[1])
|
|
27
|
+
if not model_path.exists():
|
|
28
|
+
print(f"Error: Model not found: {model_path}")
|
|
29
|
+
sys.exit(1)
|
|
30
|
+
|
|
31
|
+
# Create inspector and analyze model
|
|
32
|
+
inspector = ModelInspector()
|
|
33
|
+
report = inspector.inspect(model_path)
|
|
34
|
+
|
|
35
|
+
# Print summary
|
|
36
|
+
print(f"\n{'=' * 60}")
|
|
37
|
+
print(f"Model: {model_path.name}")
|
|
38
|
+
print(f"{'=' * 60}")
|
|
39
|
+
|
|
40
|
+
# Basic stats
|
|
41
|
+
if report.graph_summary:
|
|
42
|
+
print("\nGraph Structure:")
|
|
43
|
+
print(f" Nodes: {report.graph_summary.num_nodes}")
|
|
44
|
+
print(f" Inputs: {report.graph_summary.num_inputs}")
|
|
45
|
+
print(f" Outputs: {report.graph_summary.num_outputs}")
|
|
46
|
+
|
|
47
|
+
# Parameters
|
|
48
|
+
if report.param_counts:
|
|
49
|
+
total_params = report.param_counts.total
|
|
50
|
+
print(f"\nParameters: {total_params:,}")
|
|
51
|
+
if total_params > 1_000_000:
|
|
52
|
+
print(f" ({total_params / 1_000_000:.1f}M)")
|
|
53
|
+
|
|
54
|
+
# FLOPs
|
|
55
|
+
if report.flop_counts:
|
|
56
|
+
total_flops = report.flop_counts.total
|
|
57
|
+
print(f"\nFLOPs: {total_flops:,}")
|
|
58
|
+
if total_flops > 1_000_000_000:
|
|
59
|
+
print(f" ({total_flops / 1_000_000_000:.1f}G)")
|
|
60
|
+
|
|
61
|
+
# Memory
|
|
62
|
+
if report.memory_estimates:
|
|
63
|
+
print("\nMemory:")
|
|
64
|
+
print(f" Model size: {report.memory_estimates.model_size_mb:.1f} MB")
|
|
65
|
+
print(f" Peak activation: {report.memory_estimates.peak_activation_mb:.1f} MB")
|
|
66
|
+
|
|
67
|
+
# Export to JSON
|
|
68
|
+
json_path = model_path.with_suffix(".json")
|
|
69
|
+
json_path.write_text(report.to_json(), encoding="utf-8")
|
|
70
|
+
print(f"\nJSON report saved to: {json_path}")
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
if __name__ == "__main__":
|
|
74
|
+
main()
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# Copyright (c) 2025 HaoLine Contributors
|
|
3
|
+
# SPDX-License-Identifier: MIT
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
Example: Compare Multiple Models
|
|
7
|
+
|
|
8
|
+
This script demonstrates how to compare multiple model variants
|
|
9
|
+
(e.g., FP32 vs FP16 vs INT8) programmatically.
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
python compare_models.py model_fp32.onnx model_fp16.onnx model_int8.onnx
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import sys
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
from haoline import ModelInspector
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def format_size(bytes_val: int) -> str:
|
|
22
|
+
"""Format bytes as human-readable string."""
|
|
23
|
+
if bytes_val >= 1_000_000_000:
|
|
24
|
+
return f"{bytes_val / 1_000_000_000:.1f} GB"
|
|
25
|
+
elif bytes_val >= 1_000_000:
|
|
26
|
+
return f"{bytes_val / 1_000_000:.1f} MB"
|
|
27
|
+
elif bytes_val >= 1_000:
|
|
28
|
+
return f"{bytes_val / 1_000:.1f} KB"
|
|
29
|
+
return f"{bytes_val} B"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def main():
|
|
33
|
+
if len(sys.argv) < 3:
|
|
34
|
+
print("Usage: python compare_models.py <model1.onnx> <model2.onnx> [model3.onnx ...]")
|
|
35
|
+
sys.exit(1)
|
|
36
|
+
|
|
37
|
+
model_paths = [Path(p) for p in sys.argv[1:]]
|
|
38
|
+
|
|
39
|
+
# Validate all models exist
|
|
40
|
+
for path in model_paths:
|
|
41
|
+
if not path.exists():
|
|
42
|
+
print(f"Error: Model not found: {path}")
|
|
43
|
+
sys.exit(1)
|
|
44
|
+
|
|
45
|
+
# Inspect all models
|
|
46
|
+
inspector = ModelInspector()
|
|
47
|
+
reports = []
|
|
48
|
+
|
|
49
|
+
print("Analyzing models...")
|
|
50
|
+
for path in model_paths:
|
|
51
|
+
print(f" - {path.name}")
|
|
52
|
+
report = inspector.inspect(path)
|
|
53
|
+
reports.append((path.name, report))
|
|
54
|
+
|
|
55
|
+
# Print comparison table
|
|
56
|
+
print(f"\n{'=' * 80}")
|
|
57
|
+
print("MODEL COMPARISON")
|
|
58
|
+
print(f"{'=' * 80}")
|
|
59
|
+
|
|
60
|
+
# Header
|
|
61
|
+
print(f"\n{'Metric':<25}", end="")
|
|
62
|
+
for name, _ in reports:
|
|
63
|
+
print(f"{name:<20}", end="")
|
|
64
|
+
print()
|
|
65
|
+
print("-" * (25 + 20 * len(reports)))
|
|
66
|
+
|
|
67
|
+
# File size
|
|
68
|
+
print(f"{'File Size':<25}", end="")
|
|
69
|
+
for path, _report in zip(model_paths, reports, strict=True):
|
|
70
|
+
size = path.stat().st_size
|
|
71
|
+
print(f"{format_size(size):<20}", end="")
|
|
72
|
+
print()
|
|
73
|
+
|
|
74
|
+
# Parameters
|
|
75
|
+
print(f"{'Parameters':<25}", end="")
|
|
76
|
+
for _, report in reports:
|
|
77
|
+
params = report[1].param_counts.total if report[1].param_counts else 0
|
|
78
|
+
print(f"{params:,}".ljust(20), end="")
|
|
79
|
+
print()
|
|
80
|
+
|
|
81
|
+
# FLOPs
|
|
82
|
+
print(f"{'FLOPs':<25}", end="")
|
|
83
|
+
for _, report in reports:
|
|
84
|
+
flops = report[1].flop_counts.total if report[1].flop_counts else 0
|
|
85
|
+
print(f"{flops:,}".ljust(20), end="")
|
|
86
|
+
print()
|
|
87
|
+
|
|
88
|
+
# Memory
|
|
89
|
+
print(f"{'Peak Activation (MB)':<25}", end="")
|
|
90
|
+
for _, report in reports:
|
|
91
|
+
mem = report[1].memory_estimates.peak_activation_mb if report[1].memory_estimates else 0
|
|
92
|
+
print(f"{mem:.1f}".ljust(20), end="")
|
|
93
|
+
print()
|
|
94
|
+
|
|
95
|
+
# Compute deltas vs first model (baseline)
|
|
96
|
+
baseline_name, baseline_report = reports[0]
|
|
97
|
+
baseline_size = model_paths[0].stat().st_size
|
|
98
|
+
|
|
99
|
+
print(f"\n{'=' * 80}")
|
|
100
|
+
print(f"DELTAS vs {baseline_name} (baseline)")
|
|
101
|
+
print(f"{'=' * 80}")
|
|
102
|
+
|
|
103
|
+
for _i, (path, (name, report)) in enumerate(zip(model_paths[1:], reports[1:], strict=True)):
|
|
104
|
+
size = path.stat().st_size
|
|
105
|
+
size_delta = (size - baseline_size) / baseline_size * 100
|
|
106
|
+
|
|
107
|
+
params = report.param_counts.total if report.param_counts else 0
|
|
108
|
+
baseline_params = baseline_report.param_counts.total if baseline_report.param_counts else 0
|
|
109
|
+
params_delta = (params - baseline_params) / baseline_params * 100 if baseline_params else 0
|
|
110
|
+
|
|
111
|
+
print(f"\n{name}:")
|
|
112
|
+
print(f" Size: {size_delta:+.1f}%")
|
|
113
|
+
print(f" Parameters: {params_delta:+.1f}%")
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
if __name__ == "__main__":
|
|
117
|
+
main()
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# Copyright (c) 2025 HaoLine Contributors
|
|
3
|
+
# SPDX-License-Identifier: MIT
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
Example: Hardware Performance Estimation
|
|
7
|
+
|
|
8
|
+
This script demonstrates how to estimate model performance
|
|
9
|
+
on different hardware targets.
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
python hardware_estimation.py model.onnx
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import sys
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
from haoline import (
|
|
19
|
+
HardwareEstimator,
|
|
20
|
+
ModelInspector,
|
|
21
|
+
get_profile,
|
|
22
|
+
list_available_profiles,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def main():
|
|
27
|
+
if len(sys.argv) < 2:
|
|
28
|
+
print("Usage: python hardware_estimation.py <model.onnx>")
|
|
29
|
+
print("\nAvailable hardware profiles:")
|
|
30
|
+
for name in list_available_profiles()[:10]:
|
|
31
|
+
print(f" - {name}")
|
|
32
|
+
print(" ... and more")
|
|
33
|
+
sys.exit(1)
|
|
34
|
+
|
|
35
|
+
model_path = Path(sys.argv[1])
|
|
36
|
+
if not model_path.exists():
|
|
37
|
+
print(f"Error: Model not found: {model_path}")
|
|
38
|
+
sys.exit(1)
|
|
39
|
+
|
|
40
|
+
# Inspect model
|
|
41
|
+
inspector = ModelInspector()
|
|
42
|
+
report = inspector.inspect(model_path)
|
|
43
|
+
|
|
44
|
+
print(f"\n{'=' * 60}")
|
|
45
|
+
print(f"Hardware Performance Estimates: {model_path.name}")
|
|
46
|
+
print(f"{'=' * 60}")
|
|
47
|
+
|
|
48
|
+
# Test on a few hardware profiles
|
|
49
|
+
test_profiles = ["rtx4090", "rtx3080", "a100_40gb", "t4", "jetson_orin"]
|
|
50
|
+
|
|
51
|
+
estimator = HardwareEstimator()
|
|
52
|
+
|
|
53
|
+
for profile_name in test_profiles:
|
|
54
|
+
profile = get_profile(profile_name)
|
|
55
|
+
if profile is None:
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
estimates = estimator.estimate(
|
|
59
|
+
flops=report.flop_counts.total if report.flop_counts else 0,
|
|
60
|
+
params=report.param_counts.total if report.param_counts else 0,
|
|
61
|
+
activation_bytes=(
|
|
62
|
+
report.memory_estimates.peak_activation_bytes if report.memory_estimates else 0
|
|
63
|
+
),
|
|
64
|
+
hardware=profile,
|
|
65
|
+
precision="fp16",
|
|
66
|
+
batch_size=1,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
print(f"\n{profile.name}:")
|
|
70
|
+
print(f" VRAM Required: {estimates.vram_required_gb:.1f} GB")
|
|
71
|
+
print(f" Fits in VRAM: {'Yes' if estimates.fits_in_vram else 'No'}")
|
|
72
|
+
print(f" Estimated Latency: {estimates.latency_ms:.2f} ms")
|
|
73
|
+
print(f" Estimated Throughput: {estimates.throughput_fps:.0f} fps")
|
|
74
|
+
print(f" Bottleneck: {estimates.bottleneck}")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
if __name__ == "__main__":
|
|
78
|
+
main()
|