mlx-stack 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mlx_stack/__init__.py +5 -0
- mlx_stack/_version.py +24 -0
- mlx_stack/cli/__init__.py +5 -0
- mlx_stack/cli/bench.py +221 -0
- mlx_stack/cli/config.py +166 -0
- mlx_stack/cli/down.py +109 -0
- mlx_stack/cli/init.py +180 -0
- mlx_stack/cli/install.py +165 -0
- mlx_stack/cli/logs.py +234 -0
- mlx_stack/cli/main.py +187 -0
- mlx_stack/cli/models.py +304 -0
- mlx_stack/cli/profile.py +65 -0
- mlx_stack/cli/pull.py +134 -0
- mlx_stack/cli/recommend.py +397 -0
- mlx_stack/cli/status.py +111 -0
- mlx_stack/cli/up.py +163 -0
- mlx_stack/cli/watch.py +252 -0
- mlx_stack/core/__init__.py +1 -0
- mlx_stack/core/benchmark.py +1182 -0
- mlx_stack/core/catalog.py +560 -0
- mlx_stack/core/config.py +471 -0
- mlx_stack/core/deps.py +323 -0
- mlx_stack/core/hardware.py +304 -0
- mlx_stack/core/launchd.py +531 -0
- mlx_stack/core/litellm_gen.py +188 -0
- mlx_stack/core/log_rotation.py +231 -0
- mlx_stack/core/log_viewer.py +386 -0
- mlx_stack/core/models.py +639 -0
- mlx_stack/core/paths.py +79 -0
- mlx_stack/core/process.py +887 -0
- mlx_stack/core/pull.py +815 -0
- mlx_stack/core/scoring.py +611 -0
- mlx_stack/core/stack_down.py +317 -0
- mlx_stack/core/stack_init.py +524 -0
- mlx_stack/core/stack_status.py +229 -0
- mlx_stack/core/stack_up.py +856 -0
- mlx_stack/core/watchdog.py +744 -0
- mlx_stack/data/__init__.py +1 -0
- mlx_stack/data/catalog/__init__.py +1 -0
- mlx_stack/data/catalog/deepseek-r1-32b.yaml +46 -0
- mlx_stack/data/catalog/deepseek-r1-8b.yaml +45 -0
- mlx_stack/data/catalog/gemma3-12b.yaml +45 -0
- mlx_stack/data/catalog/gemma3-27b.yaml +45 -0
- mlx_stack/data/catalog/gemma3-4b.yaml +45 -0
- mlx_stack/data/catalog/llama3.3-8b.yaml +44 -0
- mlx_stack/data/catalog/nemotron-49b.yaml +41 -0
- mlx_stack/data/catalog/nemotron-8b.yaml +44 -0
- mlx_stack/data/catalog/qwen3-8b.yaml +45 -0
- mlx_stack/data/catalog/qwen3.5-0.8b.yaml +45 -0
- mlx_stack/data/catalog/qwen3.5-14b.yaml +46 -0
- mlx_stack/data/catalog/qwen3.5-32b.yaml +45 -0
- mlx_stack/data/catalog/qwen3.5-3b.yaml +44 -0
- mlx_stack/data/catalog/qwen3.5-72b.yaml +42 -0
- mlx_stack/data/catalog/qwen3.5-8b.yaml +45 -0
- mlx_stack/py.typed +1 -0
- mlx_stack/utils/__init__.py +1 -0
- mlx_stack-0.1.0.dist-info/METADATA +397 -0
- mlx_stack-0.1.0.dist-info/RECORD +61 -0
- mlx_stack-0.1.0.dist-info/WHEEL +4 -0
- mlx_stack-0.1.0.dist-info/entry_points.txt +2 -0
- mlx_stack-0.1.0.dist-info/licenses/LICENSE +21 -0
mlx_stack/__init__.py
ADDED
mlx_stack/_version.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# file generated by vcs-versioning
|
|
2
|
+
# don't change, don't track in version control
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"__version__",
|
|
7
|
+
"__version_tuple__",
|
|
8
|
+
"version",
|
|
9
|
+
"version_tuple",
|
|
10
|
+
"__commit_id__",
|
|
11
|
+
"commit_id",
|
|
12
|
+
]
|
|
13
|
+
|
|
14
|
+
version: str
|
|
15
|
+
__version__: str
|
|
16
|
+
__version_tuple__: tuple[int | str, ...]
|
|
17
|
+
version_tuple: tuple[int | str, ...]
|
|
18
|
+
commit_id: str | None
|
|
19
|
+
__commit_id__: str | None
|
|
20
|
+
|
|
21
|
+
__version__ = version = '0.1.0'
|
|
22
|
+
__version_tuple__ = version_tuple = (0, 1, 0)
|
|
23
|
+
|
|
24
|
+
__commit_id__ = commit_id = None
|
mlx_stack/cli/bench.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
"""CLI command for benchmarking — ``mlx-stack bench``.
|
|
2
|
+
|
|
3
|
+
Runs 3 iterations of 1024-token prompt + 100-token generation against
|
|
4
|
+
a running tier's vllm-mlx instance or a temporary instance for a local
|
|
5
|
+
model. Reports mean ± std dev for prompt_tps and gen_tps. Compares
|
|
6
|
+
against catalog thresholds: PASS (<15%), WARN (15-30%), FAIL (>30%).
|
|
7
|
+
|
|
8
|
+
Supports --save to persist results to ~/.mlx-stack/benchmarks/<profile_id>.json.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import click
|
|
14
|
+
from rich.console import Console
|
|
15
|
+
from rich.table import Table
|
|
16
|
+
from rich.text import Text
|
|
17
|
+
|
|
18
|
+
from mlx_stack.core.benchmark import (
|
|
19
|
+
CLASSIFICATION_PASS,
|
|
20
|
+
CLASSIFICATION_WARN,
|
|
21
|
+
BenchmarkError,
|
|
22
|
+
BenchmarkResult_,
|
|
23
|
+
BenchmarkRunError,
|
|
24
|
+
BenchmarkTargetError,
|
|
25
|
+
)
|
|
26
|
+
from mlx_stack.core.catalog import CatalogError
|
|
27
|
+
from mlx_stack.core.deps import DependencyError, DependencyInstallError
|
|
28
|
+
|
|
29
|
+
console = Console(stderr=True)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@click.command()
|
|
33
|
+
@click.argument("target", required=True)
|
|
34
|
+
@click.option("--save", is_flag=True, default=False, help="Save benchmark results.")
|
|
35
|
+
def bench(target: str, save: bool) -> None:
|
|
36
|
+
"""Benchmark a tier or model.
|
|
37
|
+
|
|
38
|
+
TARGET is a running tier name (e.g., 'fast', 'standard') or a
|
|
39
|
+
catalog model ID (e.g., 'qwen3.5-8b'). For running tiers, targets
|
|
40
|
+
the existing vllm-mlx instance. For local models, starts a temporary
|
|
41
|
+
instance with full cleanup.
|
|
42
|
+
|
|
43
|
+
Runs 3 iterations of 1024-token prompt + 100-token generation and
|
|
44
|
+
reports mean ± std dev for prompt_tps and gen_tps.
|
|
45
|
+
|
|
46
|
+
Compares against catalog benchmarks: PASS (within 15%), WARN (15-30%
|
|
47
|
+
below), FAIL (>30% below) per metric with delta percentage.
|
|
48
|
+
|
|
49
|
+
Use --save to persist results to ~/.mlx-stack/benchmarks/<profile_id>.json.
|
|
50
|
+
"""
|
|
51
|
+
out = Console()
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
from mlx_stack.core.benchmark import run_benchmark
|
|
55
|
+
|
|
56
|
+
result = run_benchmark(target=target, save=save)
|
|
57
|
+
_display_results(result, out, save=save)
|
|
58
|
+
|
|
59
|
+
except BenchmarkTargetError as exc:
|
|
60
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
61
|
+
raise SystemExit(1) from None
|
|
62
|
+
except BenchmarkRunError as exc:
|
|
63
|
+
console.print(f"[bold red]Benchmark error:[/bold red] {exc}")
|
|
64
|
+
raise SystemExit(1) from None
|
|
65
|
+
except BenchmarkError as exc:
|
|
66
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
67
|
+
raise SystemExit(1) from None
|
|
68
|
+
except DependencyInstallError as exc:
|
|
69
|
+
console.print(f"[bold red]Dependency error:[/bold red] {exc}")
|
|
70
|
+
raise SystemExit(1) from None
|
|
71
|
+
except DependencyError as exc:
|
|
72
|
+
console.print(f"[bold red]Dependency error:[/bold red] {exc}")
|
|
73
|
+
raise SystemExit(1) from None
|
|
74
|
+
except CatalogError as exc:
|
|
75
|
+
console.print(f"[bold red]Catalog error:[/bold red] {exc}")
|
|
76
|
+
raise SystemExit(1) from None
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _display_results(result: BenchmarkResult_, out: Console, save: bool = False) -> None:
|
|
80
|
+
"""Display benchmark results with Rich formatting.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
result: The benchmark result to display.
|
|
84
|
+
out: Rich console for output.
|
|
85
|
+
save: Whether results were saved.
|
|
86
|
+
"""
|
|
87
|
+
out.print()
|
|
88
|
+
|
|
89
|
+
# Header
|
|
90
|
+
header = Text("Benchmark Results", style="bold cyan")
|
|
91
|
+
header.append(f" — {result.model_id} ({result.quant})")
|
|
92
|
+
out.print(header)
|
|
93
|
+
out.print()
|
|
94
|
+
|
|
95
|
+
# Instance type indicator
|
|
96
|
+
if result.used_temporary_instance:
|
|
97
|
+
out.print("[dim]Using temporary vllm-mlx instance[/dim]")
|
|
98
|
+
else:
|
|
99
|
+
out.print("[dim]Using running tier instance[/dim]")
|
|
100
|
+
out.print()
|
|
101
|
+
|
|
102
|
+
# Performance results table
|
|
103
|
+
perf_table = Table(title="Performance", show_header=True, header_style="bold")
|
|
104
|
+
perf_table.add_column("Metric", style="cyan")
|
|
105
|
+
perf_table.add_column("Mean", justify="right")
|
|
106
|
+
perf_table.add_column("Std Dev", justify="right")
|
|
107
|
+
|
|
108
|
+
perf_table.add_row(
|
|
109
|
+
"Prompt TPS",
|
|
110
|
+
f"{result.prompt_tps_mean:.1f} tok/s",
|
|
111
|
+
f"± {result.prompt_tps_std:.1f}",
|
|
112
|
+
)
|
|
113
|
+
perf_table.add_row(
|
|
114
|
+
"Generation TPS",
|
|
115
|
+
f"{result.gen_tps_mean:.1f} tok/s",
|
|
116
|
+
f"± {result.gen_tps_std:.1f}",
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
out.print(perf_table)
|
|
120
|
+
out.print()
|
|
121
|
+
|
|
122
|
+
# Comparison against catalog
|
|
123
|
+
if result.classifications:
|
|
124
|
+
comp_table = Table(
|
|
125
|
+
title="Catalog Comparison",
|
|
126
|
+
show_header=True,
|
|
127
|
+
header_style="bold",
|
|
128
|
+
)
|
|
129
|
+
comp_table.add_column("Metric", style="cyan")
|
|
130
|
+
comp_table.add_column("Measured", justify="right")
|
|
131
|
+
comp_table.add_column("Catalog", justify="right")
|
|
132
|
+
comp_table.add_column("Delta", justify="right")
|
|
133
|
+
comp_table.add_column("Result", justify="center")
|
|
134
|
+
|
|
135
|
+
for cls in result.classifications:
|
|
136
|
+
# Color the classification
|
|
137
|
+
if cls.classification == CLASSIFICATION_PASS:
|
|
138
|
+
result_style = "[bold green]PASS[/bold green]"
|
|
139
|
+
elif cls.classification == CLASSIFICATION_WARN:
|
|
140
|
+
result_style = "[bold yellow]WARN[/bold yellow]"
|
|
141
|
+
else:
|
|
142
|
+
result_style = "[bold red]FAIL[/bold red]"
|
|
143
|
+
|
|
144
|
+
# Format delta
|
|
145
|
+
delta_str = f"{cls.delta_pct:+.1f}%"
|
|
146
|
+
if cls.delta_pct > 0:
|
|
147
|
+
delta_str = f"-{cls.delta_pct:.1f}%" # Below catalog
|
|
148
|
+
else:
|
|
149
|
+
delta_str = f"+{abs(cls.delta_pct):.1f}%" # Above catalog
|
|
150
|
+
|
|
151
|
+
metric_name = cls.metric.replace("_", " ").title().replace("Tps", "TPS")
|
|
152
|
+
|
|
153
|
+
comp_table.add_row(
|
|
154
|
+
metric_name,
|
|
155
|
+
f"{cls.measured:.1f}",
|
|
156
|
+
f"{cls.catalog:.1f}",
|
|
157
|
+
delta_str,
|
|
158
|
+
result_style,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
out.print(comp_table)
|
|
162
|
+
out.print()
|
|
163
|
+
elif not result.catalog_data_available:
|
|
164
|
+
out.print(
|
|
165
|
+
"[yellow]No catalog benchmark data available for your hardware profile.[/yellow]\n"
|
|
166
|
+
"Use [bold]--save[/bold] to record this benchmark for future comparisons."
|
|
167
|
+
)
|
|
168
|
+
out.print()
|
|
169
|
+
|
|
170
|
+
# Tool-calling result
|
|
171
|
+
if result.tool_call_result is not None:
|
|
172
|
+
out.print(Text("Tool Calling", style="bold cyan"))
|
|
173
|
+
tc = result.tool_call_result
|
|
174
|
+
if tc.success:
|
|
175
|
+
out.print(
|
|
176
|
+
f" [green]✓ Valid tool call[/green] — "
|
|
177
|
+
f"round-trip: {tc.round_trip_time:.2f}s"
|
|
178
|
+
)
|
|
179
|
+
else:
|
|
180
|
+
out.print(
|
|
181
|
+
f" [red]✗ Tool call failed[/red] — {tc.error}"
|
|
182
|
+
)
|
|
183
|
+
out.print()
|
|
184
|
+
elif not result.tool_call_result:
|
|
185
|
+
# Check if model supports tool calling from entry
|
|
186
|
+
if not result.catalog_data_available:
|
|
187
|
+
pass # Skip silently if no catalog data
|
|
188
|
+
else:
|
|
189
|
+
out.print(
|
|
190
|
+
"[dim]Tool calling: skipped (model does not support tool calling)[/dim]"
|
|
191
|
+
)
|
|
192
|
+
out.print()
|
|
193
|
+
|
|
194
|
+
# Iteration details
|
|
195
|
+
if result.iterations:
|
|
196
|
+
iter_table = Table(
|
|
197
|
+
title="Iteration Details",
|
|
198
|
+
show_header=True,
|
|
199
|
+
header_style="bold",
|
|
200
|
+
)
|
|
201
|
+
iter_table.add_column("#", style="dim", justify="right")
|
|
202
|
+
iter_table.add_column("Prompt TPS", justify="right")
|
|
203
|
+
iter_table.add_column("Gen TPS", justify="right")
|
|
204
|
+
iter_table.add_column("Time", justify="right")
|
|
205
|
+
|
|
206
|
+
for i, it in enumerate(result.iterations, 1):
|
|
207
|
+
iter_table.add_row(
|
|
208
|
+
str(i),
|
|
209
|
+
f"{it.prompt_tps:.1f}",
|
|
210
|
+
f"{it.gen_tps:.1f}",
|
|
211
|
+
f"{it.total_time:.1f}s",
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
out.print(iter_table)
|
|
215
|
+
out.print()
|
|
216
|
+
|
|
217
|
+
# Save confirmation
|
|
218
|
+
if save:
|
|
219
|
+
out.print("[green]✓ Results saved.[/green] "
|
|
220
|
+
"These will be used by 'recommend' and 'init' for scoring.")
|
|
221
|
+
out.print()
|
mlx_stack/cli/config.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""CLI commands for configuration management — `mlx-stack config`.
|
|
2
|
+
|
|
3
|
+
Provides set, get, list, and reset subcommands for managing
|
|
4
|
+
persistent configuration in ~/.mlx-stack/config.yaml.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import click
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.table import Table
|
|
12
|
+
|
|
13
|
+
from mlx_stack.core.config import (
|
|
14
|
+
ConfigCorruptError,
|
|
15
|
+
ConfigError,
|
|
16
|
+
ConfigValidationError,
|
|
17
|
+
get_all_config,
|
|
18
|
+
get_value,
|
|
19
|
+
mask_value,
|
|
20
|
+
reset_config,
|
|
21
|
+
set_value,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
console = Console(stderr=True)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@click.group()
|
|
28
|
+
def config() -> None:
|
|
29
|
+
"""Manage mlx-stack configuration."""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@config.command("set")
|
|
33
|
+
@click.argument("key")
|
|
34
|
+
@click.argument("value")
|
|
35
|
+
def config_set(key: str, value: str) -> None:
|
|
36
|
+
"""Set a configuration value.
|
|
37
|
+
|
|
38
|
+
KEY is the config key name (e.g., default-quant, litellm-port).
|
|
39
|
+
VALUE is the value to set.
|
|
40
|
+
|
|
41
|
+
Valid keys: openrouter-key, default-quant, memory-budget-pct,
|
|
42
|
+
litellm-port, model-dir, auto-health-check.
|
|
43
|
+
"""
|
|
44
|
+
try:
|
|
45
|
+
result = set_value(key, value)
|
|
46
|
+
display = mask_value(key, result)
|
|
47
|
+
console.print(f"[green]✓[/green] Set [bold]{key}[/bold] = {display}")
|
|
48
|
+
except ConfigValidationError as exc:
|
|
49
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
50
|
+
raise SystemExit(1) from None
|
|
51
|
+
except ConfigCorruptError as exc:
|
|
52
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
53
|
+
raise SystemExit(1) from None
|
|
54
|
+
except ConfigError as exc:
|
|
55
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
56
|
+
raise SystemExit(1) from None
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@config.command("get")
|
|
60
|
+
@click.argument("key")
|
|
61
|
+
def config_get(key: str) -> None:
|
|
62
|
+
"""Get a configuration value.
|
|
63
|
+
|
|
64
|
+
KEY is the config key name to retrieve.
|
|
65
|
+
|
|
66
|
+
Valid keys: openrouter-key, default-quant, memory-budget-pct,
|
|
67
|
+
litellm-port, model-dir, auto-health-check.
|
|
68
|
+
"""
|
|
69
|
+
try:
|
|
70
|
+
value = get_value(key)
|
|
71
|
+
display = mask_value(key, value)
|
|
72
|
+
console.print(display)
|
|
73
|
+
except ConfigCorruptError as exc:
|
|
74
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
75
|
+
raise SystemExit(1) from None
|
|
76
|
+
except ConfigError as exc:
|
|
77
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
78
|
+
raise SystemExit(1) from None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@config.command("list")
|
|
82
|
+
def config_list() -> None:
|
|
83
|
+
"""List all configuration values.
|
|
84
|
+
|
|
85
|
+
Shows a table of all config keys with their current values,
|
|
86
|
+
defaults, and whether each value is user-set or default.
|
|
87
|
+
The openrouter-key is masked for security.
|
|
88
|
+
"""
|
|
89
|
+
try:
|
|
90
|
+
entries = get_all_config()
|
|
91
|
+
except ConfigCorruptError as exc:
|
|
92
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
93
|
+
raise SystemExit(1) from None
|
|
94
|
+
|
|
95
|
+
out = Console()
|
|
96
|
+
table = Table(title="Configuration", show_header=True, header_style="bold cyan")
|
|
97
|
+
table.add_column("Key", style="bold")
|
|
98
|
+
table.add_column("Value")
|
|
99
|
+
table.add_column("Default")
|
|
100
|
+
table.add_column("Source", style="dim")
|
|
101
|
+
|
|
102
|
+
for entry in entries:
|
|
103
|
+
source_style = "[dim]default[/dim]" if entry["is_default"] else "[green]user-set[/green]"
|
|
104
|
+
value_display = entry["masked_value"]
|
|
105
|
+
|
|
106
|
+
# Show empty string as (not set) for clarity
|
|
107
|
+
if entry["name"] == "openrouter-key" and entry["is_default"]:
|
|
108
|
+
value_display = "[dim](not set)[/dim]"
|
|
109
|
+
|
|
110
|
+
default_display = str(entry["default"])
|
|
111
|
+
if entry["name"] == "openrouter-key":
|
|
112
|
+
default_display = "(not set)"
|
|
113
|
+
|
|
114
|
+
table.add_row(
|
|
115
|
+
entry["name"],
|
|
116
|
+
value_display,
|
|
117
|
+
default_display,
|
|
118
|
+
source_style,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
out.print()
|
|
122
|
+
out.print(table)
|
|
123
|
+
out.print()
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
@config.command("reset")
|
|
127
|
+
@click.option("--yes", is_flag=True, help="Confirm reset without prompting.")
|
|
128
|
+
@click.option("--force", is_flag=True, help="Alias for --yes.")
|
|
129
|
+
def config_reset(yes: bool, force: bool) -> None:
|
|
130
|
+
"""Reset configuration to defaults.
|
|
131
|
+
|
|
132
|
+
Removes all user-set values, restoring defaults for all keys.
|
|
133
|
+
Requires --yes or --force confirmation in non-interactive mode.
|
|
134
|
+
"""
|
|
135
|
+
confirmed = yes or force
|
|
136
|
+
|
|
137
|
+
if not confirmed:
|
|
138
|
+
# Check if stdin is a TTY for interactive confirmation
|
|
139
|
+
try:
|
|
140
|
+
if click.get_text_stream("stdin").isatty():
|
|
141
|
+
confirmed = click.confirm(
|
|
142
|
+
"Reset all configuration to defaults?", default=False
|
|
143
|
+
)
|
|
144
|
+
else:
|
|
145
|
+
console.print(
|
|
146
|
+
"[bold red]Error:[/bold red] Reset requires --yes or --force flag "
|
|
147
|
+
"in non-interactive mode."
|
|
148
|
+
)
|
|
149
|
+
raise SystemExit(1)
|
|
150
|
+
except (AttributeError, OSError):
|
|
151
|
+
console.print(
|
|
152
|
+
"[bold red]Error:[/bold red] Reset requires --yes or --force flag "
|
|
153
|
+
"in non-interactive mode."
|
|
154
|
+
)
|
|
155
|
+
raise SystemExit(1) from None
|
|
156
|
+
|
|
157
|
+
if not confirmed:
|
|
158
|
+
console.print("[yellow]Reset cancelled.[/yellow]")
|
|
159
|
+
return
|
|
160
|
+
|
|
161
|
+
try:
|
|
162
|
+
reset_config()
|
|
163
|
+
console.print("[green]✓[/green] Configuration reset to defaults.")
|
|
164
|
+
except ConfigError as exc:
|
|
165
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
166
|
+
raise SystemExit(1) from None
|
mlx_stack/cli/down.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
"""CLI command for stopping services — `mlx-stack down`.
|
|
2
|
+
|
|
3
|
+
Stops all managed services, or a single tier with --tier.
|
|
4
|
+
Reports per-service shutdown results including graceful vs forced.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import click
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.table import Table
|
|
12
|
+
from rich.text import Text
|
|
13
|
+
|
|
14
|
+
from mlx_stack.core.process import LockError
|
|
15
|
+
from mlx_stack.core.stack_down import DownError, DownResult, run_down
|
|
16
|
+
|
|
17
|
+
console = Console(stderr=True)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _display_results(result: DownResult) -> None:
|
|
21
|
+
"""Display shutdown results.
|
|
22
|
+
|
|
23
|
+
Shows a table of stopped services with per-service shutdown method
|
|
24
|
+
(graceful vs forced) and cleanup information.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
result: The DownResult from shutdown.
|
|
28
|
+
"""
|
|
29
|
+
out = Console()
|
|
30
|
+
out.print()
|
|
31
|
+
|
|
32
|
+
# Warnings
|
|
33
|
+
for warning in result.warnings:
|
|
34
|
+
out.print(f"[yellow]⚠ {warning}[/yellow]")
|
|
35
|
+
|
|
36
|
+
if result.warnings:
|
|
37
|
+
out.print()
|
|
38
|
+
|
|
39
|
+
# Results table
|
|
40
|
+
table = Table(
|
|
41
|
+
title="Shutdown Summary",
|
|
42
|
+
show_header=True,
|
|
43
|
+
header_style="bold cyan",
|
|
44
|
+
)
|
|
45
|
+
table.add_column("Service", style="bold", min_width=12)
|
|
46
|
+
table.add_column("PID", justify="right", min_width=8)
|
|
47
|
+
table.add_column("Status", min_width=14)
|
|
48
|
+
table.add_column("Method", min_width=10)
|
|
49
|
+
|
|
50
|
+
for svc in result.services:
|
|
51
|
+
pid_str = str(svc.pid) if svc.pid is not None else "-"
|
|
52
|
+
|
|
53
|
+
# Status styling
|
|
54
|
+
if svc.status == "stopped":
|
|
55
|
+
status_display = "[bold green]stopped[/bold green]"
|
|
56
|
+
elif svc.status == "stale":
|
|
57
|
+
status_display = "[yellow]stale (cleaned)[/yellow]"
|
|
58
|
+
elif svc.status == "corrupt":
|
|
59
|
+
status_display = "[yellow]corrupt (cleaned)[/yellow]"
|
|
60
|
+
elif svc.status == "not-running":
|
|
61
|
+
status_display = "[dim]not running[/dim]"
|
|
62
|
+
else:
|
|
63
|
+
status_display = svc.status
|
|
64
|
+
|
|
65
|
+
# Method display
|
|
66
|
+
if svc.graceful is True:
|
|
67
|
+
method_display = "[green]graceful (SIGTERM)[/green]"
|
|
68
|
+
elif svc.graceful is False:
|
|
69
|
+
method_display = "[red]forced (SIGKILL)[/red]"
|
|
70
|
+
else:
|
|
71
|
+
method_display = "-"
|
|
72
|
+
|
|
73
|
+
table.add_row(svc.name, pid_str, status_display, method_display)
|
|
74
|
+
|
|
75
|
+
out.print(table)
|
|
76
|
+
out.print()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
@click.command()
|
|
80
|
+
@click.option("--tier", "tier_filter", type=str, help="Stop only the specified tier.")
|
|
81
|
+
def down(tier_filter: str | None) -> None:
|
|
82
|
+
"""Stop all managed services.
|
|
83
|
+
|
|
84
|
+
Terminates processes in correct order: LiteLLM first, then model
|
|
85
|
+
servers in reverse startup order. Uses SIGTERM with a 10-second
|
|
86
|
+
grace period, escalating to SIGKILL if needed.
|
|
87
|
+
|
|
88
|
+
Use --tier to stop only a specific tier while leaving others running.
|
|
89
|
+
"""
|
|
90
|
+
try:
|
|
91
|
+
result = run_down(
|
|
92
|
+
tier_filter=tier_filter,
|
|
93
|
+
)
|
|
94
|
+
except DownError as exc:
|
|
95
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
96
|
+
raise SystemExit(1) from None
|
|
97
|
+
except LockError as exc:
|
|
98
|
+
console.print(f"[bold red]Error:[/bold red] {exc}")
|
|
99
|
+
raise SystemExit(1) from None
|
|
100
|
+
|
|
101
|
+
if result.nothing_to_stop:
|
|
102
|
+
out = Console()
|
|
103
|
+
out.print()
|
|
104
|
+
out.print(Text("Nothing to stop.", style="bold yellow"))
|
|
105
|
+
out.print("[dim]No PID files found — no managed services are running.[/dim]")
|
|
106
|
+
out.print()
|
|
107
|
+
return
|
|
108
|
+
|
|
109
|
+
_display_results(result)
|