haoline 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. haoline/.streamlit/config.toml +10 -0
  2. haoline/__init__.py +248 -0
  3. haoline/analyzer.py +935 -0
  4. haoline/cli.py +2712 -0
  5. haoline/compare.py +811 -0
  6. haoline/compare_visualizations.py +1564 -0
  7. haoline/edge_analysis.py +525 -0
  8. haoline/eval/__init__.py +131 -0
  9. haoline/eval/adapters.py +844 -0
  10. haoline/eval/cli.py +390 -0
  11. haoline/eval/comparison.py +542 -0
  12. haoline/eval/deployment.py +633 -0
  13. haoline/eval/schemas.py +833 -0
  14. haoline/examples/__init__.py +15 -0
  15. haoline/examples/basic_inspection.py +74 -0
  16. haoline/examples/compare_models.py +117 -0
  17. haoline/examples/hardware_estimation.py +78 -0
  18. haoline/format_adapters.py +1001 -0
  19. haoline/formats/__init__.py +123 -0
  20. haoline/formats/coreml.py +250 -0
  21. haoline/formats/gguf.py +483 -0
  22. haoline/formats/openvino.py +255 -0
  23. haoline/formats/safetensors.py +273 -0
  24. haoline/formats/tflite.py +369 -0
  25. haoline/hardware.py +2307 -0
  26. haoline/hierarchical_graph.py +462 -0
  27. haoline/html_export.py +1573 -0
  28. haoline/layer_summary.py +769 -0
  29. haoline/llm_summarizer.py +465 -0
  30. haoline/op_icons.py +618 -0
  31. haoline/operational_profiling.py +1492 -0
  32. haoline/patterns.py +1116 -0
  33. haoline/pdf_generator.py +265 -0
  34. haoline/privacy.py +250 -0
  35. haoline/pydantic_models.py +241 -0
  36. haoline/report.py +1923 -0
  37. haoline/report_sections.py +539 -0
  38. haoline/risks.py +521 -0
  39. haoline/schema.py +523 -0
  40. haoline/streamlit_app.py +2024 -0
  41. haoline/tests/__init__.py +4 -0
  42. haoline/tests/conftest.py +123 -0
  43. haoline/tests/test_analyzer.py +868 -0
  44. haoline/tests/test_compare_visualizations.py +293 -0
  45. haoline/tests/test_edge_analysis.py +243 -0
  46. haoline/tests/test_eval.py +604 -0
  47. haoline/tests/test_format_adapters.py +460 -0
  48. haoline/tests/test_hardware.py +237 -0
  49. haoline/tests/test_hardware_recommender.py +90 -0
  50. haoline/tests/test_hierarchical_graph.py +326 -0
  51. haoline/tests/test_html_export.py +180 -0
  52. haoline/tests/test_layer_summary.py +428 -0
  53. haoline/tests/test_llm_patterns.py +540 -0
  54. haoline/tests/test_llm_summarizer.py +339 -0
  55. haoline/tests/test_patterns.py +774 -0
  56. haoline/tests/test_pytorch.py +327 -0
  57. haoline/tests/test_report.py +383 -0
  58. haoline/tests/test_risks.py +398 -0
  59. haoline/tests/test_schema.py +417 -0
  60. haoline/tests/test_tensorflow.py +380 -0
  61. haoline/tests/test_visualizations.py +316 -0
  62. haoline/universal_ir.py +856 -0
  63. haoline/visualizations.py +1086 -0
  64. haoline/visualize_yolo.py +44 -0
  65. haoline/web.py +110 -0
  66. haoline-0.3.0.dist-info/METADATA +471 -0
  67. haoline-0.3.0.dist-info/RECORD +70 -0
  68. haoline-0.3.0.dist-info/WHEEL +4 -0
  69. haoline-0.3.0.dist-info/entry_points.txt +5 -0
  70. haoline-0.3.0.dist-info/licenses/LICENSE +22 -0
haoline/eval/cli.py ADDED
@@ -0,0 +1,390 @@
1
+ """
2
+ HaoLine Eval Import CLI.
3
+
4
+ Import evaluation results from external tools and combine with architecture analysis.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import argparse
10
+ import json
11
+ import sys
12
+ from pathlib import Path
13
+
14
+ from .schemas import (
15
+ CombinedReport,
16
+ EvalResult,
17
+ create_combined_report,
18
+ link_eval_to_model,
19
+ validate_eval_result,
20
+ )
21
+
22
+
23
+ def create_parser() -> argparse.ArgumentParser:
24
+ """Create argument parser for import-eval command."""
25
+ parser = argparse.ArgumentParser(
26
+ prog="haoline-import-eval",
27
+ description="Import evaluation results from external tools.",
28
+ formatter_class=argparse.RawDescriptionHelpFormatter,
29
+ epilog="""
30
+ Examples:
31
+ # Import Ultralytics YOLO validation results
32
+ haoline-import-eval --from-ultralytics results.json --model yolo.onnx
33
+
34
+ # Import HuggingFace evaluate results (classification)
35
+ haoline-import-eval --from-hf-evaluate eval_results.json --task classification
36
+
37
+ # Import HuggingFace evaluate results (NLP task like QA)
38
+ haoline-import-eval --from-hf-evaluate qa_results.json --task nlp
39
+
40
+ # Import lm-eval-harness LLM benchmark results
41
+ haoline-import-eval --from-lm-eval lm_eval_output.json --model llama.onnx
42
+
43
+ # Import timm benchmark results
44
+ haoline-import-eval --from-timm validate_results.json --model resnet50.onnx
45
+
46
+ # Auto-detect format and import
47
+ haoline-import-eval --auto results.json --out-json standardized.json
48
+
49
+ # Import generic CSV with custom column mapping
50
+ haoline-import-eval --from-csv results.csv --model model.onnx \\
51
+ --map-column accuracy=top1_acc --map-column f1=f1_score
52
+
53
+ # Validate an eval results file
54
+ haoline-import-eval --validate results.json
55
+
56
+ # Combine eval results with model architecture analysis
57
+ haoline-import-eval --from-ultralytics results.json --model yolo.onnx --combine
58
+
59
+ # Combine and output to file
60
+ haoline-import-eval --auto results.json --model model.onnx --combine --out-json combined.json
61
+ """,
62
+ )
63
+
64
+ # Input sources
65
+ input_group = parser.add_argument_group("Input Sources")
66
+ input_group.add_argument(
67
+ "--from-ultralytics",
68
+ type=Path,
69
+ metavar="PATH",
70
+ help="Import from Ultralytics YOLO validation output (JSON).",
71
+ )
72
+ input_group.add_argument(
73
+ "--from-hf-evaluate",
74
+ type=Path,
75
+ metavar="PATH",
76
+ help="Import from HuggingFace evaluate output (JSON).",
77
+ )
78
+ input_group.add_argument(
79
+ "--from-lm-eval",
80
+ type=Path,
81
+ metavar="PATH",
82
+ help="Import from lm-eval-harness output (JSON).",
83
+ )
84
+ input_group.add_argument(
85
+ "--from-csv",
86
+ type=Path,
87
+ metavar="PATH",
88
+ help="Import from generic CSV file.",
89
+ )
90
+ input_group.add_argument(
91
+ "--from-timm",
92
+ type=Path,
93
+ metavar="PATH",
94
+ help="Import from timm benchmark output (JSON).",
95
+ )
96
+ input_group.add_argument(
97
+ "--from-json",
98
+ type=Path,
99
+ metavar="PATH",
100
+ help="Import from generic JSON file (auto-detected or schema-compliant).",
101
+ )
102
+ input_group.add_argument(
103
+ "--auto",
104
+ type=Path,
105
+ metavar="PATH",
106
+ help="Auto-detect format and import (tries all adapters).",
107
+ )
108
+
109
+ # Model linking
110
+ link_group = parser.add_argument_group("Model Linking")
111
+ link_group.add_argument(
112
+ "--model",
113
+ type=Path,
114
+ metavar="PATH",
115
+ help="Path to the model file to link eval results to.",
116
+ )
117
+ link_group.add_argument(
118
+ "--combine",
119
+ action="store_true",
120
+ help="Combine eval results with model architecture analysis (requires --model).",
121
+ )
122
+ link_group.add_argument(
123
+ "--use-hash",
124
+ action="store_true",
125
+ help="Use file hash instead of filename as model identifier.",
126
+ )
127
+
128
+ # Task type
129
+ parser.add_argument(
130
+ "--task",
131
+ choices=["detection", "classification", "nlp", "llm", "segmentation"],
132
+ default=None,
133
+ help="Override task type (auto-detected from adapter if not specified).",
134
+ )
135
+
136
+ # Output
137
+ parser.add_argument(
138
+ "--out-json",
139
+ type=Path,
140
+ metavar="PATH",
141
+ help="Output path for standardized eval results JSON.",
142
+ )
143
+
144
+ # CSV column mapping
145
+ parser.add_argument(
146
+ "--map-column",
147
+ action="append",
148
+ metavar="METRIC=COLUMN",
149
+ help="Map a metric name to a CSV column (can be repeated).",
150
+ )
151
+
152
+ # Validation
153
+ parser.add_argument(
154
+ "--validate",
155
+ type=Path,
156
+ metavar="PATH",
157
+ help="Validate an eval results file against the schema.",
158
+ )
159
+
160
+ # Verbosity
161
+ parser.add_argument(
162
+ "--quiet",
163
+ action="store_true",
164
+ help="Suppress output except errors.",
165
+ )
166
+
167
+ return parser
168
+
169
+
170
+ def import_from_ultralytics(path: Path) -> EvalResult | None:
171
+ """Import eval results from Ultralytics YOLO validation output."""
172
+ try:
173
+ from .adapters import load_ultralytics_json
174
+
175
+ return load_ultralytics_json(path)
176
+ except Exception as e:
177
+ print(f"Error parsing Ultralytics results from {path}: {e}")
178
+ return None
179
+
180
+
181
+ def import_from_hf_evaluate(path: Path, task_type: str = "classification") -> EvalResult | None:
182
+ """Import eval results from HuggingFace evaluate output."""
183
+ try:
184
+ from .adapters import load_hf_evaluate
185
+
186
+ return load_hf_evaluate(path, task_type=task_type)
187
+ except Exception as e:
188
+ print(f"Error parsing HuggingFace evaluate results from {path}: {e}")
189
+ return None
190
+
191
+
192
+ def import_from_lm_eval(path: Path) -> EvalResult | None:
193
+ """Import eval results from lm-eval-harness output."""
194
+ try:
195
+ from .adapters import load_lm_eval
196
+
197
+ return load_lm_eval(path)
198
+ except Exception as e:
199
+ print(f"Error parsing lm-eval-harness results from {path}: {e}")
200
+ return None
201
+
202
+
203
+ def import_from_json(path: Path) -> EvalResult | None:
204
+ """Import eval results from generic JSON (auto-detect or schema-compliant)."""
205
+ try:
206
+ from .adapters import detect_and_parse
207
+
208
+ # Try auto-detect first
209
+ result = detect_and_parse(path)
210
+ if result:
211
+ return result
212
+
213
+ # Fall back to schema-compliant parsing
214
+ with open(path, encoding="utf-8") as f:
215
+ data = json.load(f)
216
+
217
+ if not validate_eval_result(data):
218
+ print(f"Error: Invalid eval result schema in {path}")
219
+ return None
220
+
221
+ validated: EvalResult = EvalResult.model_validate(data)
222
+ return validated
223
+ except Exception as e:
224
+ print(f"Error reading {path}: {e}")
225
+ return None
226
+
227
+
228
+ def validate_file(path: Path) -> bool:
229
+ """Validate an eval results file against the schema."""
230
+ try:
231
+ with open(path, encoding="utf-8") as f:
232
+ data = json.load(f)
233
+
234
+ if validate_eval_result(data):
235
+ print(f"Valid eval result: {path}")
236
+ print(f" model_id: {data.get('model_id')}")
237
+ print(f" task_type: {data.get('task_type')}")
238
+ print(f" metrics: {len(data.get('metrics', []))} metrics")
239
+ return True
240
+ else:
241
+ print(f"Invalid eval result: {path}")
242
+ print(" Missing required fields: model_id and/or task_type")
243
+ return False
244
+ except json.JSONDecodeError as e:
245
+ print(f"Invalid JSON in {path}: {e}")
246
+ return False
247
+ except Exception as e:
248
+ print(f"Error reading {path}: {e}")
249
+ return False
250
+
251
+
252
+ def main() -> int:
253
+ """Main entry point for haoline-import-eval command."""
254
+ parser = create_parser()
255
+ args = parser.parse_args()
256
+
257
+ # Validation mode
258
+ if args.validate:
259
+ return 0 if validate_file(args.validate) else 1
260
+
261
+ # Check for input source
262
+ input_sources = [
263
+ args.from_ultralytics,
264
+ args.from_hf_evaluate,
265
+ args.from_lm_eval,
266
+ args.from_timm,
267
+ args.from_csv,
268
+ args.from_json,
269
+ args.auto,
270
+ ]
271
+ active_sources = [s for s in input_sources if s is not None]
272
+
273
+ if len(active_sources) == 0:
274
+ parser.print_help()
275
+ print("\nError: No input source specified.")
276
+ return 1
277
+
278
+ if len(active_sources) > 1:
279
+ print("Error: Only one input source can be specified at a time.")
280
+ return 1
281
+
282
+ # Import based on source
283
+ result: EvalResult | None = None
284
+
285
+ if args.from_ultralytics:
286
+ result = import_from_ultralytics(args.from_ultralytics)
287
+ elif args.from_hf_evaluate:
288
+ task = args.task if args.task in ("classification", "nlp") else "classification"
289
+ result = import_from_hf_evaluate(args.from_hf_evaluate, task_type=task)
290
+ elif args.from_lm_eval:
291
+ result = import_from_lm_eval(args.from_lm_eval)
292
+ elif args.from_timm:
293
+ try:
294
+ from .adapters import load_timm_benchmark
295
+
296
+ result = load_timm_benchmark(args.from_timm)
297
+ except Exception as e:
298
+ print(f"Error parsing timm results: {e}")
299
+ return 1
300
+ elif args.auto:
301
+ result = import_from_json(args.auto) # Uses detect_and_parse
302
+ elif args.from_json:
303
+ result = import_from_json(args.from_json)
304
+ elif args.from_csv:
305
+ try:
306
+ from .adapters import load_generic_csv
307
+
308
+ # Parse column mappings if provided
309
+ column_mapping: dict[str, str] = {}
310
+ if args.map_column:
311
+ for mapping in args.map_column:
312
+ if "=" in mapping:
313
+ metric, column = mapping.split("=", 1)
314
+ column_mapping[column] = metric
315
+
316
+ results = load_generic_csv(args.from_csv)
317
+ if results:
318
+ result = results[0] # Return first row for single result
319
+ if not args.quiet and len(results) > 1:
320
+ print(f"Note: CSV contains {len(results)} rows, returning first.")
321
+ else:
322
+ print(f"No valid rows found in {args.from_csv}")
323
+ return 1
324
+ except Exception as e:
325
+ print(f"Error parsing CSV: {e}")
326
+ return 1
327
+
328
+ if result is None:
329
+ print("Failed to import eval results.")
330
+ return 1
331
+
332
+ # Link to model if specified
333
+ if args.model:
334
+ result = link_eval_to_model(
335
+ str(args.model),
336
+ result,
337
+ use_hash=args.use_hash,
338
+ )
339
+ if not args.quiet:
340
+ print(f"Linked eval to model: {result.model_id}")
341
+
342
+ # Combine with architecture analysis if requested
343
+ output_data: EvalResult | CombinedReport = result
344
+ if args.combine:
345
+ if not args.model:
346
+ print("Error: --combine requires --model to be specified.")
347
+ return 1
348
+
349
+ if not args.model.exists():
350
+ print(f"Error: Model file not found: {args.model}")
351
+ return 1
352
+
353
+ if not args.quiet:
354
+ print(f"Running architecture analysis on {args.model}...")
355
+
356
+ combined = create_combined_report(
357
+ str(args.model),
358
+ eval_results=[result],
359
+ run_inspection=True,
360
+ )
361
+
362
+ if not args.quiet:
363
+ arch = combined.architecture
364
+ print(f" Parameters: {arch.get('params_total', 0):,}")
365
+ print(f" FLOPs: {arch.get('flops_total', 0):,}")
366
+ print(f" Memory: {arch.get('model_size_bytes', 0) / 1024 / 1024:.1f} MB")
367
+ if combined.latency_ms > 0:
368
+ print(f" Latency: {combined.latency_ms:.2f} ms")
369
+ if combined.throughput_fps > 0:
370
+ print(f" Throughput: {combined.throughput_fps:.1f} fps")
371
+
372
+ output_data = combined
373
+
374
+ # Output
375
+ if args.out_json:
376
+ args.out_json.parent.mkdir(parents=True, exist_ok=True)
377
+ args.out_json.write_text(output_data.to_json(), encoding="utf-8")
378
+ if not args.quiet:
379
+ print(
380
+ f"{'Combined report' if args.combine else 'Eval results'} written to: {args.out_json}"
381
+ )
382
+
383
+ if not args.quiet and not args.out_json:
384
+ print(output_data.to_json())
385
+
386
+ return 0
387
+
388
+
389
+ if __name__ == "__main__":
390
+ sys.exit(main())