mlenvdoctor 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mlenvdoctor/__init__.py +0 -1
- mlenvdoctor/cli.py +25 -11
- mlenvdoctor/diagnose.py +30 -21
- mlenvdoctor/dockerize.py +3 -6
- mlenvdoctor/fix.py +17 -12
- mlenvdoctor/gpu.py +8 -8
- mlenvdoctor/utils.py +0 -3
- {mlenvdoctor-0.1.0.dist-info → mlenvdoctor-0.1.1.dist-info}/METADATA +2 -2
- mlenvdoctor-0.1.1.dist-info/RECORD +12 -0
- mlenvdoctor-0.1.0.dist-info/RECORD +0 -12
- {mlenvdoctor-0.1.0.dist-info → mlenvdoctor-0.1.1.dist-info}/WHEEL +0 -0
- {mlenvdoctor-0.1.0.dist-info → mlenvdoctor-0.1.1.dist-info}/entry_points.txt +0 -0
- {mlenvdoctor-0.1.0.dist-info → mlenvdoctor-0.1.1.dist-info}/licenses/LICENSE +0 -0
mlenvdoctor/__init__.py
CHANGED
mlenvdoctor/cli.py
CHANGED
|
@@ -8,7 +8,7 @@ from . import __version__
|
|
|
8
8
|
from .diagnose import diagnose_env, print_diagnostic_table
|
|
9
9
|
from .dockerize import generate_dockerfile, generate_service_template
|
|
10
10
|
from .fix import auto_fix
|
|
11
|
-
from .gpu import benchmark_gpu_ops, smoke_test_lora, test_model
|
|
11
|
+
from .gpu import benchmark_gpu_ops, smoke_test_lora, test_model as gpu_test_model
|
|
12
12
|
from .utils import console
|
|
13
13
|
|
|
14
14
|
app = typer.Typer(
|
|
@@ -21,14 +21,21 @@ app = typer.Typer(
|
|
|
21
21
|
def version_callback(value: bool):
|
|
22
22
|
"""Print version and exit."""
|
|
23
23
|
if value:
|
|
24
|
-
console.print(
|
|
24
|
+
console.print(
|
|
25
|
+
f"[bold blue]ML Environment Doctor[/bold blue] version [cyan]{__version__}[/cyan]"
|
|
26
|
+
)
|
|
25
27
|
raise typer.Exit()
|
|
26
28
|
|
|
27
29
|
|
|
28
30
|
@app.callback()
|
|
29
31
|
def main(
|
|
30
32
|
version: Optional[bool] = typer.Option(
|
|
31
|
-
None,
|
|
33
|
+
None,
|
|
34
|
+
"--version",
|
|
35
|
+
"-v",
|
|
36
|
+
callback=version_callback,
|
|
37
|
+
is_eager=True,
|
|
38
|
+
help="Show version and exit",
|
|
32
39
|
),
|
|
33
40
|
):
|
|
34
41
|
"""ML Environment Doctor - Diagnose & fix ML environments for LLM fine-tuning."""
|
|
@@ -37,7 +44,9 @@ def main(
|
|
|
37
44
|
|
|
38
45
|
@app.command()
|
|
39
46
|
def diagnose(
|
|
40
|
-
full: bool = typer.Option(
|
|
47
|
+
full: bool = typer.Option(
|
|
48
|
+
False, "--full", "-f", help="Run full diagnostics including GPU benchmarks"
|
|
49
|
+
),
|
|
41
50
|
):
|
|
42
51
|
"""
|
|
43
52
|
🔍 Diagnose your ML environment.
|
|
@@ -85,8 +94,12 @@ def fix(
|
|
|
85
94
|
@app.command()
|
|
86
95
|
def dockerize(
|
|
87
96
|
model: Optional[str] = typer.Argument(None, help="Model name (mistral-7b, tinyllama, gpt2)"),
|
|
88
|
-
service: bool = typer.Option(
|
|
89
|
-
|
|
97
|
+
service: bool = typer.Option(
|
|
98
|
+
False, "--service", "-s", help="Generate FastAPI service template"
|
|
99
|
+
),
|
|
100
|
+
output: str = typer.Option(
|
|
101
|
+
"Dockerfile.mlenvdoctor", "--output", "-o", help="Output Dockerfile name"
|
|
102
|
+
),
|
|
90
103
|
):
|
|
91
104
|
"""
|
|
92
105
|
🐳 Generate Dockerfile for ML fine-tuning.
|
|
@@ -105,8 +118,8 @@ def dockerize(
|
|
|
105
118
|
console.print("[bold green]✅ Dockerfile generated![/bold green]")
|
|
106
119
|
|
|
107
120
|
|
|
108
|
-
@app.command()
|
|
109
|
-
def
|
|
121
|
+
@app.command(name="test-model")
|
|
122
|
+
def test_model_cmd(
|
|
110
123
|
model: str = typer.Argument("tinyllama", help="Model to test (tinyllama, gpt2, mistral-7b)"),
|
|
111
124
|
):
|
|
112
125
|
"""
|
|
@@ -115,7 +128,7 @@ def test_model(
|
|
|
115
128
|
Tests model loading and forward pass to verify fine-tuning readiness.
|
|
116
129
|
"""
|
|
117
130
|
console.print(f"[bold blue]🧪 Testing model: {model}[/bold blue]\n")
|
|
118
|
-
success =
|
|
131
|
+
success = gpu_test_model(model_name=model)
|
|
119
132
|
if success:
|
|
120
133
|
console.print()
|
|
121
134
|
console.print("[bold green]✅ Model test passed! Ready for fine-tuning.[/bold green]")
|
|
@@ -139,7 +152,9 @@ def smoke_test():
|
|
|
139
152
|
console.print("[bold green]✅ Smoke test passed! Environment is ready.[/bold green]")
|
|
140
153
|
else:
|
|
141
154
|
console.print()
|
|
142
|
-
console.print(
|
|
155
|
+
console.print(
|
|
156
|
+
"[bold red]❌ Smoke test failed. Run 'mlenvdoctor diagnose' for details.[/bold red]"
|
|
157
|
+
)
|
|
143
158
|
raise typer.Exit(1)
|
|
144
159
|
|
|
145
160
|
|
|
@@ -150,4 +165,3 @@ def main_cli():
|
|
|
150
165
|
|
|
151
166
|
if __name__ == "__main__":
|
|
152
167
|
main_cli()
|
|
153
|
-
|
mlenvdoctor/diagnose.py
CHANGED
|
@@ -2,8 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
import importlib
|
|
4
4
|
import re
|
|
5
|
-
import
|
|
6
|
-
from typing import Dict, List, Optional, Tuple
|
|
5
|
+
from typing import List, Optional, Tuple
|
|
7
6
|
|
|
8
7
|
try:
|
|
9
8
|
import torch
|
|
@@ -12,16 +11,7 @@ except ImportError:
|
|
|
12
11
|
|
|
13
12
|
from rich.table import Table
|
|
14
13
|
|
|
15
|
-
from .utils import
|
|
16
|
-
check_command_exists,
|
|
17
|
-
console,
|
|
18
|
-
format_size,
|
|
19
|
-
get_home_config_dir,
|
|
20
|
-
print_error,
|
|
21
|
-
print_info,
|
|
22
|
-
print_warning,
|
|
23
|
-
run_command,
|
|
24
|
-
)
|
|
14
|
+
from .utils import check_command_exists, console, format_size, get_home_config_dir, run_command
|
|
25
15
|
|
|
26
16
|
|
|
27
17
|
class DiagnosticIssue:
|
|
@@ -164,7 +154,7 @@ def check_pytorch_cuda() -> List[DiagnosticIssue]:
|
|
|
164
154
|
name="PyTorch Version",
|
|
165
155
|
status="WARN - Old version",
|
|
166
156
|
severity="warning",
|
|
167
|
-
fix=
|
|
157
|
+
fix="Upgrade: pip install torch>=2.4.0 --index-url https://download.pytorch.org/whl/cu124",
|
|
168
158
|
details=f"Current: {torch_version}, Recommended: >=2.4.0",
|
|
169
159
|
)
|
|
170
160
|
)
|
|
@@ -237,7 +227,7 @@ def check_ml_libraries() -> List[DiagnosticIssue]:
|
|
|
237
227
|
issues.append(
|
|
238
228
|
DiagnosticIssue(
|
|
239
229
|
name=f"{lib_name}",
|
|
240
|
-
status=
|
|
230
|
+
status="PASS - Installed",
|
|
241
231
|
severity="info",
|
|
242
232
|
fix="",
|
|
243
233
|
details=f"Version: {version}",
|
|
@@ -275,7 +265,6 @@ def check_gpu_memory() -> List[DiagnosticIssue]:
|
|
|
275
265
|
free_mem, total_mem = torch.cuda.mem_get_info(0)
|
|
276
266
|
free_gb = free_mem / (1024**3)
|
|
277
267
|
total_gb = total_mem / (1024**3)
|
|
278
|
-
used_gb = total_gb - free_gb
|
|
279
268
|
|
|
280
269
|
if free_gb < 8:
|
|
281
270
|
issues.append(
|
|
@@ -377,7 +366,18 @@ def check_docker_gpu() -> List[DiagnosticIssue]:
|
|
|
377
366
|
return issues
|
|
378
367
|
|
|
379
368
|
try:
|
|
380
|
-
result = run_command(
|
|
369
|
+
result = run_command(
|
|
370
|
+
[
|
|
371
|
+
"docker",
|
|
372
|
+
"run",
|
|
373
|
+
"--rm",
|
|
374
|
+
"--gpus",
|
|
375
|
+
"all",
|
|
376
|
+
"nvidia/cuda:12.4.0-base-ubuntu22.04",
|
|
377
|
+
"nvidia-smi",
|
|
378
|
+
],
|
|
379
|
+
timeout=30,
|
|
380
|
+
)
|
|
381
381
|
if result.returncode == 0:
|
|
382
382
|
issues.append(
|
|
383
383
|
DiagnosticIssue(
|
|
@@ -460,7 +460,11 @@ def diagnose_env(full: bool = False) -> List[DiagnosticIssue]:
|
|
|
460
460
|
|
|
461
461
|
def print_diagnostic_table(issues: List[DiagnosticIssue]) -> None:
|
|
462
462
|
"""Print diagnostic results as a Rich table."""
|
|
463
|
-
table = Table(
|
|
463
|
+
table = Table(
|
|
464
|
+
title="ML Environment Doctor - Diagnostic Results",
|
|
465
|
+
show_header=True,
|
|
466
|
+
header_style="bold magenta",
|
|
467
|
+
)
|
|
464
468
|
table.add_column("Issue", style="cyan", no_wrap=False)
|
|
465
469
|
table.add_column("Status", style="bold")
|
|
466
470
|
table.add_column("Severity", style="yellow")
|
|
@@ -474,7 +478,9 @@ def print_diagnostic_table(issues: List[DiagnosticIssue]) -> None:
|
|
|
474
478
|
|
|
475
479
|
# Summary
|
|
476
480
|
critical_count = sum(1 for i in issues if i.severity == "critical" and "FAIL" in i.status)
|
|
477
|
-
warning_count = sum(
|
|
481
|
+
warning_count = sum(
|
|
482
|
+
1 for i in issues if i.severity == "warning" and ("WARN" in i.status or "FAIL" in i.status)
|
|
483
|
+
)
|
|
478
484
|
pass_count = sum(1 for i in issues if "PASS" in i.status)
|
|
479
485
|
|
|
480
486
|
console.print()
|
|
@@ -485,9 +491,12 @@ def print_diagnostic_table(issues: List[DiagnosticIssue]) -> None:
|
|
|
485
491
|
console.print(f"[red]❌ Critical Issues: {critical_count}[/red]")
|
|
486
492
|
|
|
487
493
|
if critical_count == 0 and warning_count == 0:
|
|
488
|
-
console.print(
|
|
494
|
+
console.print(
|
|
495
|
+
"\n[bold green]🎉 Your ML environment looks ready for fine-tuning![/bold green]"
|
|
496
|
+
)
|
|
489
497
|
elif critical_count > 0:
|
|
490
498
|
console.print("\n[bold red]⚠️ Please fix critical issues before proceeding.[/bold red]")
|
|
491
499
|
else:
|
|
492
|
-
console.print(
|
|
493
|
-
|
|
500
|
+
console.print(
|
|
501
|
+
"\n[bold yellow]💡 Consider addressing warnings for optimal performance.[/bold yellow]"
|
|
502
|
+
)
|
mlenvdoctor/dockerize.py
CHANGED
|
@@ -3,9 +3,7 @@
|
|
|
3
3
|
from pathlib import Path
|
|
4
4
|
from typing import Optional
|
|
5
5
|
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
from .utils import console, print_error, print_info, print_success
|
|
6
|
+
from .utils import console, print_info, print_success
|
|
9
7
|
|
|
10
8
|
# Model-specific templates
|
|
11
9
|
MODEL_TEMPLATES = {
|
|
@@ -149,9 +147,9 @@ CMD ["python", "train.py"]
|
|
|
149
147
|
console.print("[bold]Build and run:[/bold]")
|
|
150
148
|
console.print(f"[cyan] docker build -f {output_file} -t mlenvdoctor .[/cyan]")
|
|
151
149
|
if service:
|
|
152
|
-
console.print(
|
|
150
|
+
console.print("[cyan] docker run --gpus all -p 8000:8000 mlenvdoctor[/cyan]")
|
|
153
151
|
else:
|
|
154
|
-
console.print(
|
|
152
|
+
console.print("[cyan] docker run --gpus all -v $(pwd)/data:/app/data mlenvdoctor[/cyan]")
|
|
155
153
|
|
|
156
154
|
return output_path
|
|
157
155
|
|
|
@@ -201,4 +199,3 @@ if __name__ == "__main__":
|
|
|
201
199
|
output_path.write_text(service_content, encoding="utf-8")
|
|
202
200
|
print_success(f"Generated service template: {output_file}")
|
|
203
201
|
return output_path
|
|
204
|
-
|
mlenvdoctor/fix.py
CHANGED
|
@@ -1,14 +1,10 @@
|
|
|
1
1
|
"""Auto-fix and requirements generation for ML Environment Doctor."""
|
|
2
2
|
|
|
3
|
-
import subprocess
|
|
4
3
|
import sys
|
|
5
4
|
from pathlib import Path
|
|
6
|
-
from typing import
|
|
5
|
+
from typing import Optional
|
|
7
6
|
|
|
8
|
-
from
|
|
9
|
-
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
10
|
-
|
|
11
|
-
from .diagnose import DiagnosticIssue, diagnose_env
|
|
7
|
+
from .diagnose import diagnose_env
|
|
12
8
|
from .utils import (
|
|
13
9
|
check_command_exists,
|
|
14
10
|
console,
|
|
@@ -39,7 +35,9 @@ ML_STACKS = {
|
|
|
39
35
|
}
|
|
40
36
|
|
|
41
37
|
|
|
42
|
-
def generate_requirements_txt(
|
|
38
|
+
def generate_requirements_txt(
|
|
39
|
+
stack: str = "trl-peft", output_file: str = "requirements-mlenvdoctor.txt"
|
|
40
|
+
) -> Path:
|
|
43
41
|
"""Generate requirements.txt file."""
|
|
44
42
|
if stack not in ML_STACKS:
|
|
45
43
|
print_error(f"Unknown stack: {stack}. Available: {list(ML_STACKS.keys())}")
|
|
@@ -61,7 +59,9 @@ def generate_requirements_txt(stack: str = "trl-peft", output_file: str = "requi
|
|
|
61
59
|
content = "# Standard PyTorch (CPU or CUDA)\n\n"
|
|
62
60
|
except ImportError:
|
|
63
61
|
content = "# PyTorch installation\n"
|
|
64
|
-
content +=
|
|
62
|
+
content += (
|
|
63
|
+
"# For CUDA: pip install torch --index-url https://download.pytorch.org/whl/cu124\n"
|
|
64
|
+
)
|
|
65
65
|
content += "# For CPU: pip install torch\n\n"
|
|
66
66
|
|
|
67
67
|
content += "\n".join(requirements)
|
|
@@ -72,7 +72,9 @@ def generate_requirements_txt(stack: str = "trl-peft", output_file: str = "requi
|
|
|
72
72
|
return output_path
|
|
73
73
|
|
|
74
74
|
|
|
75
|
-
def generate_conda_env(
|
|
75
|
+
def generate_conda_env(
|
|
76
|
+
stack: str = "trl-peft", output_file: str = "environment-mlenvdoctor.yml"
|
|
77
|
+
) -> Path:
|
|
76
78
|
"""Generate conda environment file."""
|
|
77
79
|
if stack not in ML_STACKS:
|
|
78
80
|
print_error(f"Unknown stack: {stack}. Available: {list(ML_STACKS.keys())}")
|
|
@@ -197,7 +199,11 @@ def create_virtualenv(env_name: str = ".venv") -> Optional[Path]:
|
|
|
197
199
|
|
|
198
200
|
venv.create(env_path, with_pip=True)
|
|
199
201
|
print_success(f"Virtual environment created: {env_name}")
|
|
200
|
-
|
|
202
|
+
if sys.platform == "win32":
|
|
203
|
+
activate_cmd = r".venv\Scripts\activate"
|
|
204
|
+
else:
|
|
205
|
+
activate_cmd = "source .venv/bin/activate"
|
|
206
|
+
print_info(f"Activate with: {activate_cmd}")
|
|
201
207
|
return env_path
|
|
202
208
|
except Exception as e:
|
|
203
209
|
print_error(f"Failed to create virtual environment: {e}")
|
|
@@ -243,7 +249,6 @@ def auto_fix(use_conda: bool = False, create_venv: bool = False, stack: str = "t
|
|
|
243
249
|
if install.lower() in ["y", "yes"]:
|
|
244
250
|
return install_requirements(str(req_file), use_conda=use_conda)
|
|
245
251
|
else:
|
|
246
|
-
print_info(
|
|
252
|
+
print_info("Requirements file generated. Install manually with:")
|
|
247
253
|
console.print(f"[cyan] pip install -r {req_file}[/cyan]")
|
|
248
254
|
return True
|
|
249
|
-
|
mlenvdoctor/gpu.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""GPU benchmarks and smoke tests for ML Environment Doctor."""
|
|
2
2
|
|
|
3
3
|
import time
|
|
4
|
-
from typing import Dict
|
|
4
|
+
from typing import Dict
|
|
5
5
|
|
|
6
6
|
try:
|
|
7
7
|
import torch
|
|
@@ -9,7 +9,6 @@ except ImportError:
|
|
|
9
9
|
torch = None # type: ignore
|
|
10
10
|
|
|
11
11
|
from rich.console import Console
|
|
12
|
-
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
13
12
|
|
|
14
13
|
from .utils import print_error, print_info, print_success
|
|
15
14
|
|
|
@@ -78,7 +77,8 @@ def smoke_test_lora() -> bool:
|
|
|
78
77
|
if tokenizer.pad_token is None:
|
|
79
78
|
tokenizer.pad_token = tokenizer.eos_token
|
|
80
79
|
model = AutoModelForCausalLM.from_pretrained(
|
|
81
|
-
model_name,
|
|
80
|
+
model_name,
|
|
81
|
+
torch_dtype=torch.float16 if device.type == "cuda" else torch.float32,
|
|
82
82
|
).to(device)
|
|
83
83
|
|
|
84
84
|
# Configure LoRA
|
|
@@ -101,8 +101,7 @@ def smoke_test_lora() -> bool:
|
|
|
101
101
|
# Forward pass
|
|
102
102
|
with console.status("[bold green]Running forward pass..."):
|
|
103
103
|
with torch.no_grad():
|
|
104
|
-
|
|
105
|
-
loss = outputs.loss if hasattr(outputs, "loss") else None
|
|
104
|
+
_ = model(**inputs)
|
|
106
105
|
|
|
107
106
|
print_success("LoRA smoke test passed!")
|
|
108
107
|
return True
|
|
@@ -146,7 +145,9 @@ def test_model(model_name: str = "tinyllama") -> bool:
|
|
|
146
145
|
# Estimate memory requirements (rough)
|
|
147
146
|
if "7b" in actual_model_name.lower() or "7B" in actual_model_name:
|
|
148
147
|
if free_gb < 16:
|
|
149
|
-
print_error(
|
|
148
|
+
print_error(
|
|
149
|
+
f"Insufficient GPU memory: {free_gb:.1f}GB free, need ~16GB for 7B model"
|
|
150
|
+
)
|
|
150
151
|
return False
|
|
151
152
|
|
|
152
153
|
with console.status(f"[bold green]Loading {actual_model_name}..."):
|
|
@@ -166,7 +167,7 @@ def test_model(model_name: str = "tinyllama") -> bool:
|
|
|
166
167
|
inputs = tokenizer(dummy_text, return_tensors="pt").to(device)
|
|
167
168
|
|
|
168
169
|
with torch.no_grad():
|
|
169
|
-
|
|
170
|
+
_ = model(**inputs)
|
|
170
171
|
|
|
171
172
|
print_success(f"Model {actual_model_name} loaded and tested successfully!")
|
|
172
173
|
return True
|
|
@@ -181,4 +182,3 @@ def test_model(model_name: str = "tinyllama") -> bool:
|
|
|
181
182
|
except Exception as e:
|
|
182
183
|
print_error(f"Model test error: {e}")
|
|
183
184
|
return False
|
|
184
|
-
|
mlenvdoctor/utils.py
CHANGED
|
@@ -7,7 +7,6 @@ from typing import List, Optional, Tuple
|
|
|
7
7
|
|
|
8
8
|
from rich.console import Console
|
|
9
9
|
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
10
|
-
from rich.text import Text
|
|
11
10
|
|
|
12
11
|
console = Console()
|
|
13
12
|
|
|
@@ -103,5 +102,3 @@ def format_size(size_bytes: int) -> str:
|
|
|
103
102
|
def get_python_version() -> Tuple[int, int, int]:
|
|
104
103
|
"""Get Python version as tuple."""
|
|
105
104
|
return sys.version_info[:3]
|
|
106
|
-
|
|
107
|
-
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mlenvdoctor
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.1
|
|
4
4
|
Summary: Diagnose & fix ML environments for LLM fine-tuning
|
|
5
5
|
Author: ML Environment Doctor Contributors
|
|
6
6
|
License: MIT
|
|
@@ -34,7 +34,7 @@ Description-Content-Type: text/markdown
|
|
|
34
34
|
|
|
35
35
|
[](https://www.python.org/downloads/)
|
|
36
36
|
[](https://opensource.org/licenses/MIT)
|
|
37
|
-
[](https://pypi.org/project/mlenvdoctor/)
|
|
38
38
|
|
|
39
39
|
> **Single command fixes 90% of "my torch.cuda.is_available() is False" issues.**
|
|
40
40
|
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
mlenvdoctor/__init__.py,sha256=vYK9Wp5kAcHKL7njV76xMfPA24ILdxJvIhoumF6-Sz4,110
|
|
2
|
+
mlenvdoctor/cli.py,sha256=aQ2rpjxsfMDwYAVKsn7cHSar42AZJTrr1tSTl-iM0L4,5488
|
|
3
|
+
mlenvdoctor/diagnose.py,sha256=xa3aqCornGApMJkEWQNIGHwNBRhGA3ud1hBQ6wIVhVQ,17099
|
|
4
|
+
mlenvdoctor/dockerize.py,sha256=AC8HX5sRkSFAM0O0caBnKW4HAdS49MVmMcsplKEDXI4,5562
|
|
5
|
+
mlenvdoctor/fix.py,sha256=P4Qce41LLgjaHugbMFFSg7ldfsSSNDFBz5_T_YA9mig,8945
|
|
6
|
+
mlenvdoctor/gpu.py,sha256=iuiLAW8lZLBpuUL1yapOkx5VYLtY_i1SwK9cE5koZTE,6129
|
|
7
|
+
mlenvdoctor/utils.py,sha256=2gtbiJogEI33IpOLHGEfks6b7Jd1Y7pyfojW9wpYsjU,2893
|
|
8
|
+
mlenvdoctor-0.1.1.dist-info/METADATA,sha256=NQHbKeu7KnZfHWPJcqQEYUXZqIRL7ORCesdMyyqFKU8,8887
|
|
9
|
+
mlenvdoctor-0.1.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
10
|
+
mlenvdoctor-0.1.1.dist-info/entry_points.txt,sha256=Y-WH-ANeiTdECIaqi_EB3ZEf_kACkvsYBHnNhXsCI4k,52
|
|
11
|
+
mlenvdoctor-0.1.1.dist-info/licenses/LICENSE,sha256=rGHdyWGvGWYnEFlthqtB-RtRCTa7WaAOElom5qD-nHw,1114
|
|
12
|
+
mlenvdoctor-0.1.1.dist-info/RECORD,,
|
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
mlenvdoctor/__init__.py,sha256=hPyn5R1E9SDxyqhMb9AUsDw-TRxj9K2AWcadl5LmvqY,112
|
|
2
|
-
mlenvdoctor/cli.py,sha256=DjF6UYC4QlrGSvbmG3wokX0urAWls9hu0FSIh-jZyYA,5305
|
|
3
|
-
mlenvdoctor/diagnose.py,sha256=zk_E3UF2OlJr6ZFx3OjejyswgcZWX7qz1BHDXCB-vmk,16980
|
|
4
|
-
mlenvdoctor/dockerize.py,sha256=q7afAUSkpL3RHSmbMZy2G9VZ7yRbus4xDADbSjIIuJM,5615
|
|
5
|
-
mlenvdoctor/fix.py,sha256=V-mK30r2xSk-_3uuEHqH01iA9Vt8mjjlCisb4kL_A_Q,8959
|
|
6
|
-
mlenvdoctor/gpu.py,sha256=PTS_dj6JaAoXKHQNzqgh8xOuZHomPoQxuLQMGWQXHqQ,6239
|
|
7
|
-
mlenvdoctor/utils.py,sha256=fAZHgVX6iyOpcd4NU-oSyLjFMO1AXnfgOL5_KL_-0Po,2925
|
|
8
|
-
mlenvdoctor-0.1.0.dist-info/METADATA,sha256=mZnlvzRuaaW9ydTZf5HsP4-wtDs7LUKUEde2xu_STmo,8889
|
|
9
|
-
mlenvdoctor-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
10
|
-
mlenvdoctor-0.1.0.dist-info/entry_points.txt,sha256=Y-WH-ANeiTdECIaqi_EB3ZEf_kACkvsYBHnNhXsCI4k,52
|
|
11
|
-
mlenvdoctor-0.1.0.dist-info/licenses/LICENSE,sha256=rGHdyWGvGWYnEFlthqtB-RtRCTa7WaAOElom5qD-nHw,1114
|
|
12
|
-
mlenvdoctor-0.1.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|