opencode-llmstack 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,365 @@
1
+ """``llmstack install`` -- regenerate ``opencode.json`` (and AGENTS.md copy).
2
+
3
+ Renders the opencode config atomically (tmp file in target dir, validate,
4
+ ``mv``), copies AGENTS.md alongside it, and pins the default channel for
5
+ ``start`` to pick up. ``llama-swap.yaml`` is *not* generated here -- it's
6
+ a runtime-only artifact owned by ``llmstack start`` (which knows the
7
+ chosen channel and regenerates the yaml on each launch).
8
+
9
+ This is also where the **channel** is decided -- everything downstream
10
+ (``start``, ``status``, the activate hook) reads the persisted choice
11
+ from ``.llmstack/default-channel`` and never re-derives it. Three
12
+ channels exist:
13
+
14
+ * ``current`` -- local stack, canonical channel (default)
15
+ * ``next`` -- local stack, queued-upgrade channel
16
+ * ``external`` -- thin client; no daemons launched. Opt in via
17
+ ``--external [URL]`` (URL defaults to the local
18
+ router, ``http://127.0.0.1:10101``, so two
19
+ projects on one host can share daemons without
20
+ fighting for ports). ``LLMSTACK_REMOTE_URL`` in the
21
+ environment is honoured as an alternative way in.
22
+
23
+ ``--print`` writes the opencode config to stdout instead of files.
24
+
25
+ When this command seeds a fresh ``models.ini`` from the bundled template
26
+ and the ``bedrock`` extra is installed (i.e. ``import boto3`` succeeds),
27
+ any block fenced with ``; >>> AUTO-ENABLE-WHEN-BEDROCK-AVAILABLE >>>``
28
+ markers in the seeded file is uncommented in place. The auto-enable
29
+ runs only on the *initial* seed; subsequent ``install`` runs never
30
+ mutate the user's models.ini.
31
+ """
32
+
33
+ from __future__ import annotations
34
+
35
+ import os
36
+ import shutil
37
+ import urllib.error
38
+ import urllib.request
39
+ from pathlib import Path
40
+
41
+ from llmstack.generators import render_to
42
+ from llmstack.generators.opencode import render as render_opencode
43
+ from llmstack.generators.opencode import validate as validate_opencode
44
+ from llmstack.paths import (
45
+ AGENTS_TEMPLATE,
46
+ DEFAULT_REMOTE_URL,
47
+ ChannelMark,
48
+ ensure_models_ini,
49
+ ensure_state_dirs,
50
+ env_remote_url,
51
+ write_marker,
52
+ )
53
+
54
+ _BEDROCK_BEGIN = "; >>> AUTO-ENABLE-WHEN-BEDROCK-AVAILABLE >>>"
55
+ _BEDROCK_END = "; <<< AUTO-ENABLE-WHEN-BEDROCK-AVAILABLE <<<"
56
+
57
+
58
+ def _try_enable_bedrock_blocks(ini_path: Path) -> int:
59
+ """Activate any ``AUTO-ENABLE-WHEN-BEDROCK-AVAILABLE`` block in
60
+ ``ini_path`` when ``boto3`` is importable.
61
+
62
+ For each fenced block we drop the BEGIN / END marker lines and
63
+ strip a single leading ``"; "`` (or ``";\\t"``) from every line in
64
+ between -- so a doubly-commented line like ``; ; aws_profile = ...``
65
+ becomes a still-commented ``; aws_profile = ...`` in the active
66
+ config (preserving the "uncomment to use" semantics of literal
67
+ in-file comments). Returns the number of blocks rewritten; ``0``
68
+ when boto3 is missing, no markers exist, or every block is already
69
+ expanded.
70
+ """
71
+ try:
72
+ import boto3 # noqa: F401 -- presence check only
73
+ except ImportError:
74
+ return 0
75
+
76
+ text = ini_path.read_text()
77
+ if _BEDROCK_BEGIN not in text or _BEDROCK_END not in text:
78
+ return 0
79
+
80
+ out: list[str] = []
81
+ inside = False
82
+ blocks = 0
83
+ for line in text.splitlines(keepends=True):
84
+ bare = line.rstrip("\r\n").rstrip()
85
+ if bare == _BEDROCK_BEGIN:
86
+ inside = True
87
+ blocks += 1
88
+ continue
89
+ if bare == _BEDROCK_END:
90
+ inside = False
91
+ continue
92
+ if inside:
93
+ if line.startswith("; ") or line.startswith(";\t"):
94
+ out.append(line[2:])
95
+ elif bare == ";":
96
+ out.append(line[1:])
97
+ else:
98
+ out.append(line)
99
+ else:
100
+ out.append(line)
101
+
102
+ if blocks == 0:
103
+ return 0
104
+ ini_path.write_text("".join(out))
105
+ return blocks
106
+
107
+
108
+ def _print_help() -> None:
109
+ print(
110
+ "usage: llmstack install [--print] [--current | --next] "
111
+ "[--external [URL]]"
112
+ )
113
+
114
+
115
+ def _parse_args(args: list[str]) -> tuple[bool, str, str | None, bool]:
116
+ """Parse ``install``'s flags.
117
+
118
+ Returns ``(print_only, local_channel, external_url, want_external)``:
119
+
120
+ * ``local_channel`` is ``current`` or ``next`` -- ignored when
121
+ ``want_external`` is ``True``.
122
+ * ``external_url`` is the explicit URL given to ``--external <url>``,
123
+ if any. ``None`` when the flag was bare or absent.
124
+ * ``want_external`` is ``True`` iff the user passed ``--external``
125
+ (with or without a URL). The env-var fallback is layered in by
126
+ the caller, not here, so this stays a pure CLI parse.
127
+
128
+ ``--external`` accepts either ``--external <url>`` (separate arg) or
129
+ ``--external=<url>``. Mutually exclusive with ``--current`` /
130
+ ``--next`` -- mixing them raises ``SystemExit``.
131
+ """
132
+ print_only = False
133
+ local_channel = "current"
134
+ local_explicit = False
135
+ external_url: str | None = None
136
+ want_external = False
137
+
138
+ i = 0
139
+ while i < len(args):
140
+ arg = args[i]
141
+ if arg in ("--print", "-n"):
142
+ print_only = True
143
+ elif arg == "--next":
144
+ local_channel = "next"
145
+ local_explicit = True
146
+ elif arg == "--current":
147
+ local_channel = "current"
148
+ local_explicit = True
149
+ elif arg == "--external":
150
+ want_external = True
151
+ # Optional URL as next positional, but only if it looks like
152
+ # a URL (not the next flag).
153
+ if i + 1 < len(args) and not args[i + 1].startswith("-"):
154
+ external_url = args[i + 1]
155
+ i += 1
156
+ elif arg.startswith("--external="):
157
+ want_external = True
158
+ external_url = arg[len("--external="):]
159
+ elif arg in ("-h", "--help"):
160
+ _print_help()
161
+ raise SystemExit(0)
162
+ else:
163
+ print(
164
+ f"[!] unknown arg to install: {arg} "
165
+ "(try --print, --current, --next, --external, -h)"
166
+ )
167
+ raise SystemExit(2)
168
+ i += 1
169
+
170
+ if want_external and local_explicit:
171
+ print(
172
+ "[!] --external is mutually exclusive with --current / --next "
173
+ "(external installs don't run local daemons).",
174
+ )
175
+ raise SystemExit(2)
176
+
177
+ return print_only, local_channel, external_url, want_external
178
+
179
+
180
+ def _resolve_external_url(flag_url: str | None) -> str:
181
+ """Pick the URL to bake into opencode.json + the channel marker.
182
+
183
+ Precedence: explicit ``--external <url>`` arg > ``$LLMSTACK_REMOTE_URL``
184
+ env var > :data:`DEFAULT_REMOTE_URL` (the local router). The default
185
+ is what makes the "two projects on one host" workflow zero-config:
186
+ ``llmstack install --external`` with nothing else set wires this
187
+ project as a thin client of localhost so it can ride alongside
188
+ whichever project actually owns the daemons.
189
+
190
+ The ``$LLMSTACK_REMOTE_URL`` rung is what the activate hook
191
+ populates when the user ``cd``-s into a project pinned to
192
+ ``external`` -- so re-running ``llmstack install`` from an active
193
+ shell inside an external project doesn't require the URL again.
194
+ """
195
+ if flag_url:
196
+ return flag_url.rstrip("/")
197
+ env = env_remote_url()
198
+ if env:
199
+ return env
200
+ return DEFAULT_REMOTE_URL
201
+
202
+
203
+ def _fetch_remote_models_ini(url: str) -> str:
204
+ """Pull the live ``models.ini`` from a remote llmstack router.
205
+
206
+ External installs use the router as the source of truth for tier
207
+ inventory: the same file the router parsed at startup is what the
208
+ thin client renders ``opencode.json`` against, so tier names +
209
+ descriptions agree with what the router actually serves. The fetch
210
+ is also the canonical health check -- a 200 with parseable INI
211
+ content proves both that the router is reachable and that the
212
+ operator on the remote side has wired their config.
213
+
214
+ Raises ``SystemExit`` (with a user-facing message) on any failure
215
+ -- DNS, connection refused, non-2xx, empty body. The thin-client
216
+ install is meaningless without the file, so we refuse to write a
217
+ stale opencode.json from cached state. There is no client-side
218
+ cache: every ``install`` re-fetches.
219
+ """
220
+ fetch_url = f"{url.rstrip('/')}/models.ini"
221
+ req = urllib.request.Request(fetch_url, headers={"Accept": "text/plain"})
222
+ try:
223
+ with urllib.request.urlopen(req, timeout=15.0) as resp:
224
+ if resp.status != 200:
225
+ raise SystemExit(
226
+ f"[!] {fetch_url} returned HTTP {resp.status} -- "
227
+ "the remote router is up but doesn't have a "
228
+ "models.ini. Run `llmstack install` on the router "
229
+ "host to seed one, then retry here."
230
+ )
231
+ charset = resp.headers.get_content_charset() or "utf-8"
232
+ text = resp.read().decode(charset)
233
+ except urllib.error.HTTPError as e:
234
+ raise SystemExit(
235
+ f"[!] {fetch_url} returned HTTP {e.code} {e.reason}.\n"
236
+ " is the remote running an llmstack version with "
237
+ "GET /models.ini? (added in v3.x)"
238
+ ) from e
239
+ except (urllib.error.URLError, TimeoutError, OSError) as e:
240
+ raise SystemExit(
241
+ f"[!] failed to reach {fetch_url}: {e}\n"
242
+ " check the URL, the network path, and that the remote "
243
+ "router is up."
244
+ ) from e
245
+
246
+ if not text.strip():
247
+ raise SystemExit(
248
+ f"[!] {fetch_url} returned an empty body -- nothing to "
249
+ "render opencode.json from."
250
+ )
251
+ return text
252
+
253
+
254
+ def run(args: list[str]) -> int:
255
+ try:
256
+ print_only, local_channel, external_url_arg, want_external = _parse_args(args)
257
+ except SystemExit as e:
258
+ return int(e.code) if isinstance(e.code, int) else 0
259
+
260
+ # Env-var fallback: ``LLMSTACK_REMOTE_URL`` set without ``--external``
261
+ # still implies external mode. The activate hook re-exports this
262
+ # var from the channel marker when the user ``cd``-s into an
263
+ # external project, so re-running ``llmstack install`` from inside
264
+ # an active shell doesn't need the URL or the flag again.
265
+ if not want_external and env_remote_url() is not None:
266
+ want_external = True
267
+
268
+ if want_external:
269
+ remote = _resolve_external_url(external_url_arg)
270
+ channel: str = "external"
271
+ else:
272
+ remote = None
273
+ channel = local_channel
274
+
275
+ # Source of the INI is mode-dependent. Local mode reads (and
276
+ # seeds-if-missing) the per-project file. External mode pulls the
277
+ # router's live copy on every install -- the thin client never
278
+ # keeps a local models.ini, since that would just be a stale
279
+ # mirror of the router's truth.
280
+ ini_text: str | None = None
281
+ ini_source_label: str
282
+ if remote is not None:
283
+ # Flush so the "fetching" line lands before the network call;
284
+ # otherwise an error written to stderr from inside
285
+ # _fetch_remote_models_ini races ahead of buffered stdout and
286
+ # the user sees the failure message before the "what we're
287
+ # doing" message.
288
+ print(f"[*] fetching models.ini from {remote}/models.ini ...", flush=True)
289
+ ini_text = _fetch_remote_models_ini(remote)
290
+ print(f"[OK] {len(ini_text.splitlines())} lines from {remote}")
291
+ ini_source_label = f"{remote}/models.ini"
292
+ else:
293
+ ini_path, seeded = ensure_models_ini()
294
+ if seeded:
295
+ print(f"[*] no models.ini found -- seeded default at {ini_path}")
296
+ enabled = _try_enable_bedrock_blocks(ini_path)
297
+ if enabled:
298
+ print(
299
+ f"[*] boto3 detected -- enabled {enabled} bedrock-backed "
300
+ f"tier block(s) in {ini_path}"
301
+ )
302
+ print(" edit it to taste, then re-run `llmstack install`.")
303
+ ini_source_label = str(ini_path)
304
+
305
+ paths = ensure_state_dirs()
306
+
307
+ if print_only:
308
+ if remote is not None:
309
+ print(f"# external mode (channel: external, remote: {remote}); llama-swap.yaml not used.")
310
+ print()
311
+ print("----- opencode.json -----")
312
+ print(render_opencode(ini_text=ini_text, remote=remote))
313
+ return 0
314
+
315
+ print("[1/2] AGENTS.md")
316
+ if AGENTS_TEMPLATE.is_file():
317
+ shutil.copyfile(AGENTS_TEMPLATE, paths.agents_local)
318
+ os.chmod(paths.agents_local, 0o644)
319
+ print(f"[OK] copied AGENTS.md -> {paths.agents_local}")
320
+ else:
321
+ print(f"[!] AGENTS.md template not found at {AGENTS_TEMPLATE}; skipping copy")
322
+
323
+ print()
324
+ print("[2/2] opencode.json")
325
+ prev = os.environ.get("OPENCODE_INSTRUCTIONS")
326
+ os.environ["OPENCODE_INSTRUCTIONS"] = str(paths.agents_local)
327
+ try:
328
+ render_to(
329
+ paths.opencode_json,
330
+ render=lambda p: Path(p).write_text(
331
+ render_opencode(ini_text=ini_text, remote=remote)
332
+ ),
333
+ validate=validate_opencode,
334
+ )
335
+ finally:
336
+ if prev is None:
337
+ os.environ.pop("OPENCODE_INSTRUCTIONS", None)
338
+ else:
339
+ os.environ["OPENCODE_INSTRUCTIONS"] = prev
340
+ print(f"[OK] installed {paths.opencode_json}")
341
+
342
+ if remote is not None:
343
+ write_marker(paths.default_marker, ChannelMark("external", remote))
344
+ print(f"[OK] default channel: external (remote: {remote})")
345
+ else:
346
+ write_marker(paths.default_marker, ChannelMark(channel))
347
+ print(f"[OK] default channel: {channel}")
348
+
349
+ print()
350
+ print(f"[OK] opencode config generated from {ini_source_label}.")
351
+ print()
352
+ print(f" config: {paths.opencode_json}")
353
+ print(f" instructions: {paths.agents_local}")
354
+ if remote is not None:
355
+ print(f" remote: {remote}")
356
+ else:
357
+ print(f" channel: {channel}")
358
+ print()
359
+ print("Next:")
360
+ if remote is not None:
361
+ print(" llmstack start # re-fetch /models.ini + drop into the client subshell")
362
+ else:
363
+ print(" llmstack start # generate llama-swap.yaml + bring up the stack")
364
+ print(" llmstack check # snapshot configured GGUFs + drift check")
365
+ return 0
@@ -0,0 +1,36 @@
1
+ """``llmstack install-llama-swap`` -- (re-)download the llama-swap binary only.
2
+
3
+ Exposes the previously-internal ``_install_llama_swap`` helper as a
4
+ first-class subcommand. Useful for ``--force`` upgrades without touching
5
+ the generated configs.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from llmstack.download.binary import install_llama_swap
11
+ from llmstack.paths import is_remote, remote_url
12
+
13
+
14
+ def _print_help() -> None:
15
+ print("usage: llmstack install-llama-swap [--force]")
16
+
17
+
18
+ def run(args: list[str]) -> int:
19
+ force = False
20
+ for arg in args:
21
+ if arg in ("-f", "--force"):
22
+ force = True
23
+ elif arg in ("-h", "--help"):
24
+ _print_help()
25
+ return 0
26
+ else:
27
+ print(f"[!] unknown arg to install-llama-swap: {arg}")
28
+ return 2
29
+
30
+ if is_remote():
31
+ print(f"[!] this project is wired as a thin client of {remote_url()} (channel: external);")
32
+ print(" the binary lives on the remote. `llmstack install-llama-swap` is a local-only command.")
33
+ return 1
34
+
35
+ install_llama_swap(force=force)
36
+ return 0
@@ -0,0 +1,59 @@
1
+ """``llmstack reload`` -- refresh env + prompt of the current shell.
2
+
3
+ The activate hook normally re-evaluates env and ``PROMPT`` / ``PS1`` on
4
+ every chpwd, so cd-ing into a project (or switching between projects)
5
+ keeps things current. But mid-session events -- ``llmstack start --next``
6
+ inside an already-active shell, the channel marker getting rewritten by
7
+ another process, etc. -- don't trigger a chpwd, so the prompt would stay
8
+ stale until the next directory change.
9
+
10
+ Pipe this command's output through your shell's eval to apply the
11
+ current channel's env + prompt in-place::
12
+
13
+ # zsh / bash
14
+ eval "$(llmstack reload)"
15
+
16
+ # powershell
17
+ Invoke-Expression (& llmstack reload | Out-String)
18
+
19
+ We resolve the channel from ``.llmstack/active-channel`` (live, written
20
+ by ``start``), falling back to ``.llmstack/default-channel`` (intent,
21
+ written by ``install``), and finally to ``current``. All informational
22
+ output goes to stderr so stdout stays eval-safe.
23
+ """
24
+
25
+ from __future__ import annotations
26
+
27
+ import sys
28
+
29
+ from llmstack.paths import read_marker, resolve
30
+ from llmstack.shell_env import emit_shell_refresh
31
+
32
+
33
+ def _print_help() -> None:
34
+ print('usage: eval "$(llmstack reload)"', file=sys.stderr)
35
+
36
+
37
+ def run(args: list[str]) -> int:
38
+ for a in args:
39
+ if a in ("-h", "--help"):
40
+ _print_help()
41
+ return 0
42
+ print(f"[!] unknown arg to reload: {a}", file=sys.stderr)
43
+ return 2
44
+
45
+ paths = resolve()
46
+ if not paths.opencode_json.is_file():
47
+ # No project -- emit nothing on stdout (eval no-op) and a hint
48
+ # on stderr. We don't fail-hard because users may have their
49
+ # rc wired to call reload defensively.
50
+ print(
51
+ f"[!] no .llmstack/opencode.json under {paths.work_dir} -- nothing to reload.",
52
+ file=sys.stderr,
53
+ )
54
+ return 0
55
+
56
+ mark = read_marker(paths.active_marker) or read_marker(paths.default_marker)
57
+ channel = mark.channel if mark else "current"
58
+ emit_shell_refresh(channel)
59
+ return 0
@@ -0,0 +1,12 @@
1
+ """``llmstack restart`` -- ``stop`` followed by ``start`` (passes flags through)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from llmstack.commands import start, stop
6
+
7
+
8
+ def run(args: list[str]) -> int:
9
+ rc = stop.run([])
10
+ if rc not in (0, None):
11
+ return rc
12
+ return start.run(args)
@@ -0,0 +1,146 @@
1
+ """``llmstack setup`` -- first-time walkthrough.
2
+
3
+ Mirrors the shell ``cmd_setup``:
4
+
5
+ 1. Kick off GGUF downloads in the background (skip with ``--skip-download``).
6
+ 2. Wait for them to finish (skip with ``--skip-wait``).
7
+ 3. Install the ``llama-swap`` binary.
8
+ 4. Print the shell activation hook eval line + the auto-detected hook.
9
+ 5. Verify ``opencode`` is on PATH.
10
+
11
+ Does NOT run ``install`` or ``start`` -- those are separate steps once
12
+ downloads finish.
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import shutil
18
+ import subprocess
19
+
20
+ from llmstack._platform import IS_WINDOWS, shell_family
21
+ from llmstack.commands.activate import write_hook
22
+ from llmstack.download.binary import install_llama_swap
23
+ from llmstack.download.ggufs import download_all, wait_for_downloads
24
+ from llmstack.paths import is_remote, remote_url
25
+ from llmstack.shell_env import _user_shell
26
+
27
+
28
+ def _print_help() -> None:
29
+ print("usage: llmstack setup [--skip-download] [--skip-wait]")
30
+
31
+
32
+ def run(args: list[str]) -> int:
33
+ skip_download = False
34
+ skip_wait = False
35
+ for arg in args:
36
+ if arg == "--skip-download":
37
+ skip_download = True
38
+ elif arg == "--skip-wait":
39
+ skip_wait = True
40
+ elif arg in ("-h", "--help"):
41
+ _print_help()
42
+ return 0
43
+ else:
44
+ print(f"[!] unknown arg to setup: {arg} (try --skip-download, --skip-wait, -h)")
45
+ return 2
46
+
47
+ if is_remote():
48
+ print(f"[!] external mode is in effect ({remote_url()}); setup is local-only.")
49
+ print(" in client mode you only need:")
50
+ print(" llmstack install --external # generate .llmstack/opencode.json (points at remote)")
51
+ print(" llmstack start # verify remote + enter the client subshell")
52
+ return 1
53
+
54
+ if not skip_download:
55
+ print("[1/3] downloading required GGUFs...")
56
+ download_all()
57
+ print()
58
+ else:
59
+ print("[1/3] (skipped) downloads")
60
+ print()
61
+
62
+ if not skip_download and not skip_wait:
63
+ print("[2/3] waiting for downloads to finish...")
64
+ wait_for_downloads()
65
+ print()
66
+ else:
67
+ print("[2/3] (skipped) wait")
68
+ print()
69
+
70
+ _, shell_name = _user_shell()
71
+ family = shell_family(shell_name)
72
+
73
+ print("[3/3] installing llama-swap binary...")
74
+ install_llama_swap()
75
+ print()
76
+
77
+ print("[4/4] wiring shell activation hook...")
78
+ print()
79
+ if family in ("bash", "zsh"):
80
+ rc_hint = f"~/.{shell_name}rc"
81
+ hook_arg = shell_name
82
+ elif family == "powershell":
83
+ rc_hint = "$PROFILE"
84
+ hook_arg = "powershell"
85
+ else:
86
+ rc_hint = "your shell rc file"
87
+ hook_arg = None
88
+
89
+ eval_line: str | None = None
90
+ if hook_arg is not None:
91
+ path, _src = write_hook(hook_arg)
92
+ eval_line = f'eval "$(llmstack activate {hook_arg})"'
93
+ print(f"[OK] hook installed: {path}")
94
+ print()
95
+ print("To turn it on in this shell now (and persist across new shells, paste")
96
+ print(f"the same line into {rc_hint}):")
97
+ print()
98
+ print(f" {eval_line}")
99
+
100
+ print()
101
+ print("[5/5] checking opencode...")
102
+ if shutil.which("opencode"):
103
+ path = shutil.which("opencode") or "opencode"
104
+ print(f"[OK] opencode found: {path}")
105
+ try:
106
+ ver = subprocess.run(
107
+ ["opencode", "--version"],
108
+ check=False,
109
+ stdout=subprocess.PIPE,
110
+ stderr=subprocess.STDOUT,
111
+ text=True,
112
+ timeout=5,
113
+ ).stdout.strip()
114
+ except (OSError, subprocess.SubprocessError):
115
+ ver = "(unknown)"
116
+ print(f" version: {ver or '(unknown)'}")
117
+ else:
118
+ print("[!] opencode not found in PATH.")
119
+ print()
120
+ print("Install it with:")
121
+ if IS_WINDOWS:
122
+ print(" irm https://opencode.ai/install.ps1 | iex")
123
+ else:
124
+ print(" curl -fsSL https://opencode.ai/install | sh")
125
+ print(" # or via npm:")
126
+ print(" npm install -g opencode-ai")
127
+ print()
128
+ print("After installing, run:")
129
+ print(" llmstack install # generate configs for this project")
130
+ print(" llmstack start # bring up the stack")
131
+ return 0
132
+
133
+ print()
134
+ print("[OK] setup complete.")
135
+ print()
136
+ print("Next steps:")
137
+ if eval_line is not None:
138
+ print(f" 1. Run (and paste into {rc_hint} for persistence): {eval_line}")
139
+ else:
140
+ print(" 1. Source the generated hook in your shell rc (see above)")
141
+ print(" 2. llmstack install # generate .llmstack/ configs for this project")
142
+ print(" 3. llmstack start # bring up the stack")
143
+ print()
144
+ print("To check configured GGUFs + drift vs models.ini:")
145
+ print(" llmstack check")
146
+ return 0