mlenvdoctor 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mlenvdoctor/__init__.py +15 -1
- mlenvdoctor/cli.py +80 -30
- mlenvdoctor/config.py +169 -0
- mlenvdoctor/constants.py +63 -0
- mlenvdoctor/diagnose.py +146 -46
- mlenvdoctor/dockerize.py +3 -6
- mlenvdoctor/exceptions.py +51 -0
- mlenvdoctor/export.py +290 -0
- mlenvdoctor/fix.py +19 -13
- mlenvdoctor/gpu.py +15 -9
- mlenvdoctor/icons.py +100 -0
- mlenvdoctor/logger.py +81 -0
- mlenvdoctor/parallel.py +115 -0
- mlenvdoctor/retry.py +92 -0
- mlenvdoctor/utils.py +79 -22
- mlenvdoctor/validators.py +217 -0
- {mlenvdoctor-0.1.0.dist-info → mlenvdoctor-0.1.2.dist-info}/METADATA +3 -2
- mlenvdoctor-0.1.2.dist-info/RECORD +21 -0
- mlenvdoctor-0.1.0.dist-info/RECORD +0 -12
- {mlenvdoctor-0.1.0.dist-info → mlenvdoctor-0.1.2.dist-info}/WHEEL +0 -0
- {mlenvdoctor-0.1.0.dist-info → mlenvdoctor-0.1.2.dist-info}/entry_points.txt +0 -0
- {mlenvdoctor-0.1.0.dist-info → mlenvdoctor-0.1.2.dist-info}/licenses/LICENSE +0 -0
mlenvdoctor/diagnose.py
CHANGED
|
@@ -2,8 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
import importlib
|
|
4
4
|
import re
|
|
5
|
-
import
|
|
6
|
-
from typing import Dict, List, Optional, Tuple
|
|
5
|
+
from typing import List, Optional, Tuple
|
|
7
6
|
|
|
8
7
|
try:
|
|
9
8
|
import torch
|
|
@@ -12,16 +11,8 @@ except ImportError:
|
|
|
12
11
|
|
|
13
12
|
from rich.table import Table
|
|
14
13
|
|
|
15
|
-
from .
|
|
16
|
-
|
|
17
|
-
console,
|
|
18
|
-
format_size,
|
|
19
|
-
get_home_config_dir,
|
|
20
|
-
print_error,
|
|
21
|
-
print_info,
|
|
22
|
-
print_warning,
|
|
23
|
-
run_command,
|
|
24
|
-
)
|
|
14
|
+
from .icons import icon_check, icon_cross, icon_info, icon_search, icon_warning
|
|
15
|
+
from .utils import check_command_exists, console, format_size, get_home_config_dir, run_command
|
|
25
16
|
|
|
26
17
|
|
|
27
18
|
class DiagnosticIssue:
|
|
@@ -43,12 +34,13 @@ class DiagnosticIssue:
|
|
|
43
34
|
|
|
44
35
|
def to_row(self) -> Tuple[str, str, str, str]:
|
|
45
36
|
"""Convert to table row."""
|
|
46
|
-
|
|
47
|
-
"PASS":
|
|
48
|
-
"FAIL":
|
|
49
|
-
"WARN":
|
|
50
|
-
"INFO":
|
|
51
|
-
}
|
|
37
|
+
status_icon_map = {
|
|
38
|
+
"PASS": icon_check(),
|
|
39
|
+
"FAIL": icon_cross(),
|
|
40
|
+
"WARN": icon_warning(),
|
|
41
|
+
"INFO": icon_info(),
|
|
42
|
+
}
|
|
43
|
+
status_icon = status_icon_map.get(self.status.split()[0], "?")
|
|
52
44
|
return (
|
|
53
45
|
self.name,
|
|
54
46
|
f"{status_icon} {self.status}",
|
|
@@ -164,7 +156,7 @@ def check_pytorch_cuda() -> List[DiagnosticIssue]:
|
|
|
164
156
|
name="PyTorch Version",
|
|
165
157
|
status="WARN - Old version",
|
|
166
158
|
severity="warning",
|
|
167
|
-
fix=
|
|
159
|
+
fix="Upgrade: pip install torch>=2.4.0 --index-url https://download.pytorch.org/whl/cu124",
|
|
168
160
|
details=f"Current: {torch_version}, Recommended: >=2.4.0",
|
|
169
161
|
)
|
|
170
162
|
)
|
|
@@ -237,7 +229,7 @@ def check_ml_libraries() -> List[DiagnosticIssue]:
|
|
|
237
229
|
issues.append(
|
|
238
230
|
DiagnosticIssue(
|
|
239
231
|
name=f"{lib_name}",
|
|
240
|
-
status=
|
|
232
|
+
status="PASS - Installed",
|
|
241
233
|
severity="info",
|
|
242
234
|
fix="",
|
|
243
235
|
details=f"Version: {version}",
|
|
@@ -275,7 +267,6 @@ def check_gpu_memory() -> List[DiagnosticIssue]:
|
|
|
275
267
|
free_mem, total_mem = torch.cuda.mem_get_info(0)
|
|
276
268
|
free_gb = free_mem / (1024**3)
|
|
277
269
|
total_gb = total_mem / (1024**3)
|
|
278
|
-
used_gb = total_gb - free_gb
|
|
279
270
|
|
|
280
271
|
if free_gb < 8:
|
|
281
272
|
issues.append(
|
|
@@ -377,7 +368,18 @@ def check_docker_gpu() -> List[DiagnosticIssue]:
|
|
|
377
368
|
return issues
|
|
378
369
|
|
|
379
370
|
try:
|
|
380
|
-
result = run_command(
|
|
371
|
+
result = run_command(
|
|
372
|
+
[
|
|
373
|
+
"docker",
|
|
374
|
+
"run",
|
|
375
|
+
"--rm",
|
|
376
|
+
"--gpus",
|
|
377
|
+
"all",
|
|
378
|
+
"nvidia/cuda:12.4.0-base-ubuntu22.04",
|
|
379
|
+
"nvidia-smi",
|
|
380
|
+
],
|
|
381
|
+
timeout=30,
|
|
382
|
+
)
|
|
381
383
|
if result.returncode == 0:
|
|
382
384
|
issues.append(
|
|
383
385
|
DiagnosticIssue(
|
|
@@ -411,11 +413,19 @@ def check_docker_gpu() -> List[DiagnosticIssue]:
|
|
|
411
413
|
|
|
412
414
|
def check_internet_connectivity() -> List[DiagnosticIssue]:
|
|
413
415
|
"""Check internet connectivity for HF Hub."""
|
|
416
|
+
from .retry import retry_network
|
|
417
|
+
|
|
414
418
|
issues = []
|
|
415
|
-
|
|
419
|
+
|
|
420
|
+
@retry_network
|
|
421
|
+
def _check_connectivity() -> bool:
|
|
416
422
|
import urllib.request
|
|
417
423
|
|
|
418
424
|
urllib.request.urlopen("https://huggingface.co", timeout=5)
|
|
425
|
+
return True
|
|
426
|
+
|
|
427
|
+
try:
|
|
428
|
+
_check_connectivity()
|
|
419
429
|
issues.append(
|
|
420
430
|
DiagnosticIssue(
|
|
421
431
|
name="Internet Connectivity",
|
|
@@ -424,43 +434,128 @@ def check_internet_connectivity() -> List[DiagnosticIssue]:
|
|
|
424
434
|
fix="",
|
|
425
435
|
)
|
|
426
436
|
)
|
|
427
|
-
except Exception:
|
|
437
|
+
except Exception as e:
|
|
428
438
|
issues.append(
|
|
429
439
|
DiagnosticIssue(
|
|
430
440
|
name="Internet Connectivity",
|
|
431
441
|
status="WARN - Cannot reach HF Hub",
|
|
432
442
|
severity="warning",
|
|
433
443
|
fix="Check internet connection and firewall settings",
|
|
444
|
+
details=str(e),
|
|
434
445
|
)
|
|
435
446
|
)
|
|
436
447
|
|
|
437
448
|
return issues
|
|
438
449
|
|
|
439
450
|
|
|
440
|
-
def diagnose_env(full: bool = False) -> List[DiagnosticIssue]:
|
|
441
|
-
"""
|
|
442
|
-
|
|
451
|
+
def diagnose_env(full: bool = False, parallel: bool = True) -> List[DiagnosticIssue]:
|
|
452
|
+
"""
|
|
453
|
+
Run all diagnostic checks.
|
|
443
454
|
|
|
444
|
-
|
|
455
|
+
Args:
|
|
456
|
+
full: Whether to run full diagnostics including extended checks
|
|
457
|
+
parallel: Whether to run independent checks in parallel
|
|
445
458
|
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
459
|
+
Returns:
|
|
460
|
+
List of diagnostic issues found
|
|
461
|
+
"""
|
|
462
|
+
from .parallel import run_parallel_with_results
|
|
463
|
+
|
|
464
|
+
all_issues: List[DiagnosticIssue] = []
|
|
450
465
|
|
|
451
|
-
|
|
466
|
+
console.print(f"[bold blue]{icon_search()} Running ML Environment Diagnostics...[/bold blue]\n")
|
|
467
|
+
|
|
468
|
+
# Core checks (always run) - these can run in parallel
|
|
469
|
+
core_checks = [
|
|
470
|
+
check_cuda_driver,
|
|
471
|
+
check_pytorch_cuda,
|
|
472
|
+
check_ml_libraries,
|
|
473
|
+
]
|
|
474
|
+
|
|
475
|
+
if parallel:
|
|
476
|
+
# Run core checks in parallel
|
|
477
|
+
results = run_parallel_with_results(
|
|
478
|
+
lambda check_func: check_func(),
|
|
479
|
+
core_checks,
|
|
480
|
+
max_workers=3,
|
|
481
|
+
timeout=60.0,
|
|
482
|
+
)
|
|
483
|
+
for check_func, result in results:
|
|
484
|
+
if isinstance(result, Exception):
|
|
485
|
+
# Log error but continue with other checks
|
|
486
|
+
from .logger import logger
|
|
487
|
+
logger.error(f"Check {check_func.__name__} failed: {result}")
|
|
488
|
+
# Add a diagnostic issue for the failure
|
|
489
|
+
all_issues.append(
|
|
490
|
+
DiagnosticIssue(
|
|
491
|
+
name=check_func.__name__.replace("check_", "").replace("_", " ").title(),
|
|
492
|
+
status="FAIL - Check error",
|
|
493
|
+
severity="critical",
|
|
494
|
+
fix="Run diagnostics again or check logs",
|
|
495
|
+
details=str(result),
|
|
496
|
+
)
|
|
497
|
+
)
|
|
498
|
+
else:
|
|
499
|
+
all_issues.extend(result)
|
|
500
|
+
else:
|
|
501
|
+
# Sequential execution (fallback)
|
|
502
|
+
for check_func in core_checks:
|
|
503
|
+
try:
|
|
504
|
+
all_issues.extend(check_func())
|
|
505
|
+
except Exception as e:
|
|
506
|
+
from .logger import logger
|
|
507
|
+
logger.error(f"Check {check_func.__name__} failed: {e}")
|
|
508
|
+
all_issues.append(
|
|
509
|
+
DiagnosticIssue(
|
|
510
|
+
name=check_func.__name__.replace("check_", "").replace("_", " ").title(),
|
|
511
|
+
status="FAIL - Check error",
|
|
512
|
+
severity="critical",
|
|
513
|
+
fix="Run diagnostics again or check logs",
|
|
514
|
+
details=str(e),
|
|
515
|
+
)
|
|
516
|
+
)
|
|
517
|
+
|
|
518
|
+
# Extended checks (if --full) - can also run in parallel
|
|
452
519
|
if full:
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
520
|
+
extended_checks = [
|
|
521
|
+
check_gpu_memory,
|
|
522
|
+
check_disk_space,
|
|
523
|
+
check_docker_gpu,
|
|
524
|
+
check_internet_connectivity,
|
|
525
|
+
]
|
|
526
|
+
|
|
527
|
+
if parallel:
|
|
528
|
+
results = run_parallel_with_results(
|
|
529
|
+
lambda check_func: check_func(),
|
|
530
|
+
extended_checks,
|
|
531
|
+
max_workers=4,
|
|
532
|
+
timeout=120.0,
|
|
533
|
+
)
|
|
534
|
+
for check_func, result in results:
|
|
535
|
+
if isinstance(result, Exception):
|
|
536
|
+
from .logger import logger
|
|
537
|
+
logger.warning(f"Extended check {check_func.__name__} failed: {result}")
|
|
538
|
+
# Extended checks are less critical, so we log but don't fail
|
|
539
|
+
else:
|
|
540
|
+
all_issues.extend(result)
|
|
541
|
+
else:
|
|
542
|
+
for check_func in extended_checks:
|
|
543
|
+
try:
|
|
544
|
+
all_issues.extend(check_func())
|
|
545
|
+
except Exception as e:
|
|
546
|
+
from .logger import logger
|
|
547
|
+
logger.warning(f"Extended check {check_func.__name__} failed: {e}")
|
|
457
548
|
|
|
458
549
|
return all_issues
|
|
459
550
|
|
|
460
551
|
|
|
461
552
|
def print_diagnostic_table(issues: List[DiagnosticIssue]) -> None:
|
|
462
553
|
"""Print diagnostic results as a Rich table."""
|
|
463
|
-
table = Table(
|
|
554
|
+
table = Table(
|
|
555
|
+
title="ML Environment Doctor - Diagnostic Results",
|
|
556
|
+
show_header=True,
|
|
557
|
+
header_style="bold magenta",
|
|
558
|
+
)
|
|
464
559
|
table.add_column("Issue", style="cyan", no_wrap=False)
|
|
465
560
|
table.add_column("Status", style="bold")
|
|
466
561
|
table.add_column("Severity", style="yellow")
|
|
@@ -474,20 +569,25 @@ def print_diagnostic_table(issues: List[DiagnosticIssue]) -> None:
|
|
|
474
569
|
|
|
475
570
|
# Summary
|
|
476
571
|
critical_count = sum(1 for i in issues if i.severity == "critical" and "FAIL" in i.status)
|
|
477
|
-
warning_count = sum(
|
|
572
|
+
warning_count = sum(
|
|
573
|
+
1 for i in issues if i.severity == "warning" and ("WARN" in i.status or "FAIL" in i.status)
|
|
574
|
+
)
|
|
478
575
|
pass_count = sum(1 for i in issues if "PASS" in i.status)
|
|
479
576
|
|
|
480
577
|
console.print()
|
|
481
|
-
console.print(f"[green]
|
|
578
|
+
console.print(f"[green]{icon_check()} Passed: {pass_count}[/green]")
|
|
482
579
|
if warning_count > 0:
|
|
483
|
-
console.print(f"[yellow]
|
|
580
|
+
console.print(f"[yellow]{icon_warning()} Warnings: {warning_count}[/yellow]")
|
|
484
581
|
if critical_count > 0:
|
|
485
|
-
console.print(f"[red]
|
|
582
|
+
console.print(f"[red]{icon_cross()} Critical Issues: {critical_count}[/red]")
|
|
486
583
|
|
|
487
584
|
if critical_count == 0 and warning_count == 0:
|
|
488
|
-
console.print(
|
|
585
|
+
console.print(
|
|
586
|
+
"\n[bold green]🎉 Your ML environment looks ready for fine-tuning![/bold green]"
|
|
587
|
+
)
|
|
489
588
|
elif critical_count > 0:
|
|
490
|
-
console.print("\n[bold red]
|
|
589
|
+
console.print(f"\n[bold red]{icon_warning()} Please fix critical issues before proceeding.[/bold red]")
|
|
491
590
|
else:
|
|
492
|
-
console.print(
|
|
493
|
-
|
|
591
|
+
console.print(
|
|
592
|
+
"\n[bold yellow]💡 Consider addressing warnings for optimal performance.[/bold yellow]"
|
|
593
|
+
)
|
mlenvdoctor/dockerize.py
CHANGED
|
@@ -3,9 +3,7 @@
|
|
|
3
3
|
from pathlib import Path
|
|
4
4
|
from typing import Optional
|
|
5
5
|
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
from .utils import console, print_error, print_info, print_success
|
|
6
|
+
from .utils import console, print_info, print_success
|
|
9
7
|
|
|
10
8
|
# Model-specific templates
|
|
11
9
|
MODEL_TEMPLATES = {
|
|
@@ -149,9 +147,9 @@ CMD ["python", "train.py"]
|
|
|
149
147
|
console.print("[bold]Build and run:[/bold]")
|
|
150
148
|
console.print(f"[cyan] docker build -f {output_file} -t mlenvdoctor .[/cyan]")
|
|
151
149
|
if service:
|
|
152
|
-
console.print(
|
|
150
|
+
console.print("[cyan] docker run --gpus all -p 8000:8000 mlenvdoctor[/cyan]")
|
|
153
151
|
else:
|
|
154
|
-
console.print(
|
|
152
|
+
console.print("[cyan] docker run --gpus all -v $(pwd)/data:/app/data mlenvdoctor[/cyan]")
|
|
155
153
|
|
|
156
154
|
return output_path
|
|
157
155
|
|
|
@@ -201,4 +199,3 @@ if __name__ == "__main__":
|
|
|
201
199
|
output_path.write_text(service_content, encoding="utf-8")
|
|
202
200
|
print_success(f"Generated service template: {output_file}")
|
|
203
201
|
return output_path
|
|
204
|
-
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Custom exceptions for ML Environment Doctor."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class MLEnvDoctorError(Exception):
|
|
5
|
+
"""Base exception for ML Environment Doctor."""
|
|
6
|
+
|
|
7
|
+
def __init__(self, message: str, suggestion: str = ""):
|
|
8
|
+
super().__init__(message)
|
|
9
|
+
self.message = message
|
|
10
|
+
self.suggestion = suggestion
|
|
11
|
+
|
|
12
|
+
def __str__(self) -> str:
|
|
13
|
+
if self.suggestion:
|
|
14
|
+
return f"{self.message}\n💡 Suggestion: {self.suggestion}"
|
|
15
|
+
return self.message
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DiagnosticError(MLEnvDoctorError):
|
|
19
|
+
"""Error during diagnostic checks."""
|
|
20
|
+
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class FixError(MLEnvDoctorError):
|
|
25
|
+
"""Error during auto-fix operations."""
|
|
26
|
+
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class DockerError(MLEnvDoctorError):
|
|
31
|
+
"""Error during Docker operations."""
|
|
32
|
+
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class GPUError(MLEnvDoctorError):
|
|
37
|
+
"""Error related to GPU operations."""
|
|
38
|
+
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class ConfigurationError(MLEnvDoctorError):
|
|
43
|
+
"""Error in configuration."""
|
|
44
|
+
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class InstallationError(MLEnvDoctorError):
|
|
49
|
+
"""Error during package installation."""
|
|
50
|
+
|
|
51
|
+
pass
|
mlenvdoctor/export.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
"""Export functionality for diagnostic results."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, Dict, List, Optional
|
|
7
|
+
|
|
8
|
+
from .diagnose import DiagnosticIssue
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def issue_to_dict(issue: DiagnosticIssue) -> Dict[str, Any]:
|
|
12
|
+
"""Convert DiagnosticIssue to dictionary."""
|
|
13
|
+
return {
|
|
14
|
+
"name": issue.name,
|
|
15
|
+
"status": issue.status,
|
|
16
|
+
"severity": issue.severity,
|
|
17
|
+
"fix": issue.fix,
|
|
18
|
+
"details": issue.details,
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def export_json(
|
|
23
|
+
issues: List[DiagnosticIssue],
|
|
24
|
+
output_file: Optional[Path] = None,
|
|
25
|
+
include_metadata: bool = True,
|
|
26
|
+
) -> Path:
|
|
27
|
+
"""
|
|
28
|
+
Export diagnostic results to JSON.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
issues: List of diagnostic issues
|
|
32
|
+
output_file: Output file path (default: diagnostic-results.json)
|
|
33
|
+
include_metadata: Include metadata (timestamp, version, etc.)
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Path to exported file
|
|
37
|
+
"""
|
|
38
|
+
if output_file is None:
|
|
39
|
+
output_file = Path("diagnostic-results.json")
|
|
40
|
+
|
|
41
|
+
# Convert issues to dictionaries
|
|
42
|
+
issues_data = [issue_to_dict(issue) for issue in issues]
|
|
43
|
+
|
|
44
|
+
# Calculate summary
|
|
45
|
+
critical_count = sum(
|
|
46
|
+
1 for i in issues if i.severity == "critical" and "FAIL" in i.status
|
|
47
|
+
)
|
|
48
|
+
warning_count = sum(
|
|
49
|
+
1 for i in issues if i.severity == "warning" and ("WARN" in i.status or "FAIL" in i.status)
|
|
50
|
+
)
|
|
51
|
+
pass_count = sum(1 for i in issues if "PASS" in i.status)
|
|
52
|
+
|
|
53
|
+
# Build export data
|
|
54
|
+
export_data: Dict[str, Any] = {
|
|
55
|
+
"issues": issues_data,
|
|
56
|
+
"summary": {
|
|
57
|
+
"total": len(issues),
|
|
58
|
+
"passed": pass_count,
|
|
59
|
+
"warnings": warning_count,
|
|
60
|
+
"critical": critical_count,
|
|
61
|
+
},
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Add metadata if requested
|
|
65
|
+
if include_metadata:
|
|
66
|
+
from . import __version__
|
|
67
|
+
|
|
68
|
+
export_data["metadata"] = {
|
|
69
|
+
"version": __version__,
|
|
70
|
+
"timestamp": datetime.now().isoformat(),
|
|
71
|
+
"tool": "mlenvdoctor",
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Write to file
|
|
75
|
+
output_file.write_text(json.dumps(export_data, indent=2, ensure_ascii=False), encoding="utf-8")
|
|
76
|
+
|
|
77
|
+
return output_file
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def export_csv(issues: List[DiagnosticIssue], output_file: Optional[Path] = None) -> Path:
|
|
81
|
+
"""
|
|
82
|
+
Export diagnostic results to CSV.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
issues: List of diagnostic issues
|
|
86
|
+
output_file: Output file path (default: diagnostic-results.csv)
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Path to exported file
|
|
90
|
+
"""
|
|
91
|
+
import csv
|
|
92
|
+
|
|
93
|
+
if output_file is None:
|
|
94
|
+
output_file = Path("diagnostic-results.csv")
|
|
95
|
+
|
|
96
|
+
with output_file.open("w", newline="", encoding="utf-8") as f:
|
|
97
|
+
writer = csv.writer(f)
|
|
98
|
+
writer.writerow(["Issue", "Status", "Severity", "Fix", "Details"])
|
|
99
|
+
|
|
100
|
+
for issue in issues:
|
|
101
|
+
writer.writerow(
|
|
102
|
+
[
|
|
103
|
+
issue.name,
|
|
104
|
+
issue.status,
|
|
105
|
+
issue.severity,
|
|
106
|
+
issue.fix,
|
|
107
|
+
issue.details or "",
|
|
108
|
+
]
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
return output_file
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def export_html(issues: List[DiagnosticIssue], output_file: Optional[Path] = None) -> Path:
|
|
115
|
+
"""
|
|
116
|
+
Export diagnostic results to HTML report.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
issues: List of diagnostic issues
|
|
120
|
+
output_file: Output file path (default: diagnostic-results.html)
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Path to exported file
|
|
124
|
+
"""
|
|
125
|
+
if output_file is None:
|
|
126
|
+
output_file = Path("diagnostic-results.html")
|
|
127
|
+
|
|
128
|
+
# Calculate summary
|
|
129
|
+
critical_count = sum(
|
|
130
|
+
1 for i in issues if i.severity == "critical" and "FAIL" in i.status
|
|
131
|
+
)
|
|
132
|
+
warning_count = sum(
|
|
133
|
+
1 for i in issues if i.severity == "warning" and ("WARN" in i.status or "FAIL" in i.status)
|
|
134
|
+
)
|
|
135
|
+
pass_count = sum(1 for i in issues if "PASS" in i.status)
|
|
136
|
+
|
|
137
|
+
from . import __version__
|
|
138
|
+
|
|
139
|
+
html_content = f"""<!DOCTYPE html>
|
|
140
|
+
<html lang="en">
|
|
141
|
+
<head>
|
|
142
|
+
<meta charset="UTF-8">
|
|
143
|
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
144
|
+
<title>ML Environment Doctor - Diagnostic Report</title>
|
|
145
|
+
<style>
|
|
146
|
+
body {{
|
|
147
|
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
|
|
148
|
+
max-width: 1200px;
|
|
149
|
+
margin: 0 auto;
|
|
150
|
+
padding: 20px;
|
|
151
|
+
background-color: #f5f5f5;
|
|
152
|
+
}}
|
|
153
|
+
.header {{
|
|
154
|
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
|
155
|
+
color: white;
|
|
156
|
+
padding: 30px;
|
|
157
|
+
border-radius: 10px;
|
|
158
|
+
margin-bottom: 20px;
|
|
159
|
+
}}
|
|
160
|
+
.summary {{
|
|
161
|
+
display: grid;
|
|
162
|
+
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
|
163
|
+
gap: 15px;
|
|
164
|
+
margin-bottom: 30px;
|
|
165
|
+
}}
|
|
166
|
+
.summary-card {{
|
|
167
|
+
background: white;
|
|
168
|
+
padding: 20px;
|
|
169
|
+
border-radius: 8px;
|
|
170
|
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
|
171
|
+
}}
|
|
172
|
+
.summary-card h3 {{
|
|
173
|
+
margin: 0 0 10px 0;
|
|
174
|
+
font-size: 14px;
|
|
175
|
+
color: #666;
|
|
176
|
+
}}
|
|
177
|
+
.summary-card .value {{
|
|
178
|
+
font-size: 32px;
|
|
179
|
+
font-weight: bold;
|
|
180
|
+
}}
|
|
181
|
+
.passed {{ color: #10b981; }}
|
|
182
|
+
.warning {{ color: #f59e0b; }}
|
|
183
|
+
.critical {{ color: #ef4444; }}
|
|
184
|
+
table {{
|
|
185
|
+
width: 100%;
|
|
186
|
+
background: white;
|
|
187
|
+
border-collapse: collapse;
|
|
188
|
+
border-radius: 8px;
|
|
189
|
+
overflow: hidden;
|
|
190
|
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
|
191
|
+
}}
|
|
192
|
+
th {{
|
|
193
|
+
background-color: #667eea;
|
|
194
|
+
color: white;
|
|
195
|
+
padding: 15px;
|
|
196
|
+
text-align: left;
|
|
197
|
+
font-weight: 600;
|
|
198
|
+
}}
|
|
199
|
+
td {{
|
|
200
|
+
padding: 12px 15px;
|
|
201
|
+
border-bottom: 1px solid #e5e7eb;
|
|
202
|
+
}}
|
|
203
|
+
tr:hover {{
|
|
204
|
+
background-color: #f9fafb;
|
|
205
|
+
}}
|
|
206
|
+
.status-pass {{ color: #10b981; font-weight: 600; }}
|
|
207
|
+
.status-fail {{ color: #ef4444; font-weight: 600; }}
|
|
208
|
+
.status-warn {{ color: #f59e0b; font-weight: 600; }}
|
|
209
|
+
.severity-critical {{ background-color: #fee2e2; }}
|
|
210
|
+
.severity-warning {{ background-color: #fef3c7; }}
|
|
211
|
+
.severity-info {{ background-color: #dbeafe; }}
|
|
212
|
+
.footer {{
|
|
213
|
+
margin-top: 30px;
|
|
214
|
+
text-align: center;
|
|
215
|
+
color: #666;
|
|
216
|
+
font-size: 12px;
|
|
217
|
+
}}
|
|
218
|
+
</style>
|
|
219
|
+
</head>
|
|
220
|
+
<body>
|
|
221
|
+
<div class="header">
|
|
222
|
+
<h1>ML Environment Doctor</h1>
|
|
223
|
+
<p>Diagnostic Report - Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
|
|
224
|
+
<p style="font-size: 14px; opacity: 0.9;">Version {__version__}</p>
|
|
225
|
+
</div>
|
|
226
|
+
|
|
227
|
+
<div class="summary">
|
|
228
|
+
<div class="summary-card">
|
|
229
|
+
<h3>Total Checks</h3>
|
|
230
|
+
<div class="value">{len(issues)}</div>
|
|
231
|
+
</div>
|
|
232
|
+
<div class="summary-card">
|
|
233
|
+
<h3>Passed</h3>
|
|
234
|
+
<div class="value passed">{pass_count}</div>
|
|
235
|
+
</div>
|
|
236
|
+
<div class="summary-card">
|
|
237
|
+
<h3>Warnings</h3>
|
|
238
|
+
<div class="value warning">{warning_count}</div>
|
|
239
|
+
</div>
|
|
240
|
+
<div class="summary-card">
|
|
241
|
+
<h3>Critical Issues</h3>
|
|
242
|
+
<div class="value critical">{critical_count}</div>
|
|
243
|
+
</div>
|
|
244
|
+
</div>
|
|
245
|
+
|
|
246
|
+
<table>
|
|
247
|
+
<thead>
|
|
248
|
+
<tr>
|
|
249
|
+
<th>Issue</th>
|
|
250
|
+
<th>Status</th>
|
|
251
|
+
<th>Severity</th>
|
|
252
|
+
<th>Fix</th>
|
|
253
|
+
<th>Details</th>
|
|
254
|
+
</tr>
|
|
255
|
+
</thead>
|
|
256
|
+
<tbody>
|
|
257
|
+
"""
|
|
258
|
+
|
|
259
|
+
for issue in issues:
|
|
260
|
+
status_class = "status-pass"
|
|
261
|
+
if "FAIL" in issue.status:
|
|
262
|
+
status_class = "status-fail"
|
|
263
|
+
elif "WARN" in issue.status:
|
|
264
|
+
status_class = "status-warn"
|
|
265
|
+
|
|
266
|
+
severity_class = f"severity-{issue.severity}"
|
|
267
|
+
|
|
268
|
+
html_content += f"""
|
|
269
|
+
<tr class="{severity_class}">
|
|
270
|
+
<td><strong>{issue.name}</strong></td>
|
|
271
|
+
<td class="{status_class}">{issue.status}</td>
|
|
272
|
+
<td>{issue.severity.upper()}</td>
|
|
273
|
+
<td>{issue.fix or '-'}</td>
|
|
274
|
+
<td>{issue.details or '-'}</td>
|
|
275
|
+
</tr>
|
|
276
|
+
"""
|
|
277
|
+
|
|
278
|
+
html_content += """
|
|
279
|
+
</tbody>
|
|
280
|
+
</table>
|
|
281
|
+
|
|
282
|
+
<div class="footer">
|
|
283
|
+
<p>Generated by ML Environment Doctor | <a href="https://github.com/dheena731/ml_env_doctor">GitHub</a></p>
|
|
284
|
+
</div>
|
|
285
|
+
</body>
|
|
286
|
+
</html>
|
|
287
|
+
"""
|
|
288
|
+
|
|
289
|
+
output_file.write_text(html_content, encoding="utf-8")
|
|
290
|
+
return output_file
|