opencode-llama-cpp-launcher 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. opencode_llama_cpp_launcher-0.1.0/.github/workflows/release.yml +80 -0
  2. opencode_llama_cpp_launcher-0.1.0/.gitignore +13 -0
  3. opencode_llama_cpp_launcher-0.1.0/LICENSE +21 -0
  4. opencode_llama_cpp_launcher-0.1.0/PKG-INFO +127 -0
  5. opencode_llama_cpp_launcher-0.1.0/README.md +105 -0
  6. opencode_llama_cpp_launcher-0.1.0/opencode-llama.example.yaml +15 -0
  7. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/__init__.py +3 -0
  8. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/cli/__init__.py +1 -0
  9. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/cli/entrypoint.py +114 -0
  10. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/models/__init__.py +1 -0
  11. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/models/launch_config.py +43 -0
  12. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/models/launch_status.py +14 -0
  13. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/__init__.py +1 -0
  14. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/binaries.py +12 -0
  15. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/errors.py +5 -0
  16. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/health.py +59 -0
  17. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/launch_config_loader.py +105 -0
  18. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/launch_preparer.py +130 -0
  19. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/launcher.py +197 -0
  20. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/opencode_config.py +34 -0
  21. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/services/ports.py +18 -0
  22. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/storage/__init__.py +1 -0
  23. opencode_llama_cpp_launcher-0.1.0/opencode_llama_cpp_launcher/storage/config_loader.py +134 -0
  24. opencode_llama_cpp_launcher-0.1.0/pyproject.toml +48 -0
  25. opencode_llama_cpp_launcher-0.1.0/tests/test_cli.py +120 -0
  26. opencode_llama_cpp_launcher-0.1.0/tests/test_config_loader.py +235 -0
  27. opencode_llama_cpp_launcher-0.1.0/tests/test_launcher.py +261 -0
  28. opencode_llama_cpp_launcher-0.1.0/tests/test_opencode_config.py +28 -0
  29. opencode_llama_cpp_launcher-0.1.0/tests/test_ports.py +23 -0
  30. opencode_llama_cpp_launcher-0.1.0/uv.lock +212 -0
@@ -0,0 +1,80 @@
1
+ name: Release
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - "v*.*.*"
7
+
8
+ jobs:
9
+ build:
10
+ name: Test and build
11
+ runs-on: ubuntu-latest
12
+
13
+ steps:
14
+ - name: Check out repository
15
+ uses: actions/checkout@v6
16
+
17
+ - name: Set up Python
18
+ uses: actions/setup-python@v6
19
+ with:
20
+ python-version: "3.12"
21
+
22
+ - name: Install the latest version of uv
23
+ uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0
24
+
25
+ - name: Install dependencies
26
+ run: uv sync --frozen --dev
27
+
28
+ - name: Run tests
29
+ run: uv run pytest
30
+
31
+ - name: Build package
32
+ run: uv build
33
+
34
+ - name: Upload distributions
35
+ uses: actions/upload-artifact@v4
36
+ with:
37
+ name: python-package-distributions
38
+ path: dist/
39
+
40
+ github-release:
41
+ name: Create GitHub release
42
+ runs-on: ubuntu-latest
43
+ needs: build
44
+
45
+ permissions:
46
+ contents: write
47
+
48
+ steps:
49
+ - name: Download distributions
50
+ uses: actions/download-artifact@v4
51
+ with:
52
+ name: python-package-distributions
53
+ path: dist/
54
+
55
+ - name: Create GitHub release
56
+ run: gh release create "$GITHUB_REF_NAME" dist/* --generate-notes
57
+ env:
58
+ GH_TOKEN: ${{ github.token }}
59
+
60
+ pypi-publish:
61
+ name: Publish to PyPI
62
+ runs-on: ubuntu-latest
63
+ needs: build
64
+
65
+ environment:
66
+ name: pypi
67
+ url: https://pypi.org/project/opencode-llama-cpp-launcher/
68
+
69
+ permissions:
70
+ id-token: write
71
+
72
+ steps:
73
+ - name: Download distributions
74
+ uses: actions/download-artifact@v4
75
+ with:
76
+ name: python-package-distributions
77
+ path: dist/
78
+
79
+ - name: Publish distributions to PyPI
80
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,13 @@
1
+ .venv/
2
+ __pycache__/
3
+ *.py[cod]
4
+ .pytest_cache/
5
+
6
+ opencode-llama.yaml
7
+ .opencode-llama.yaml
8
+ .opencode-llama.yml
9
+
10
+ # Build and packaging output.
11
+ build/
12
+ dist/
13
+ *.egg-info/
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 ribomo
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,127 @@
1
+ Metadata-Version: 2.4
2
+ Name: opencode-llama-cpp-launcher
3
+ Version: 0.1.0
4
+ Summary: One command launcher for running OpenCode with a local llama.cpp model.
5
+ License-Expression: MIT
6
+ License-File: LICENSE
7
+ Keywords: agentic-coding,cli,llama-cpp,local-llm,opencode
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Environment :: Console
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
15
+ Classifier: Programming Language :: Python :: 3.14
16
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
+ Classifier: Topic :: Software Development
18
+ Requires-Python: >=3.12
19
+ Requires-Dist: pyyaml>=6.0.2
20
+ Requires-Dist: typer>=0.12.0
21
+ Description-Content-Type: text/markdown
22
+
23
+ # OpenCode llama.cpp Launcher
24
+
25
+ A one command solution for launching [OpenCode](https://opencode.ai/) with any
26
+ local LLM that `llama-server` can serve, including models like Qwen, DeepSeek,
27
+ and Gemma. This launcher starts `llama-server`, waits for it to become ready,
28
+ wires the OpenAI compatible provider config into OpenCode, and cleans up when
29
+ the local agentic coding session ends.
30
+
31
+ ## Requirements
32
+
33
+ - Python 3.12+
34
+ - OpenCode
35
+ - llama.cpp's `llama-server`
36
+ - A local model supported by `llama-server`, for example Qwen, DeepSeek, or
37
+ Gemma
38
+
39
+ The launcher finds `llama-server` on `PATH`, or you can set `llama_server` in
40
+ your config.
41
+
42
+ ## Install
43
+
44
+ From this repository:
45
+
46
+ ```bash
47
+ uv sync --dev
48
+ ```
49
+
50
+ Check that the required external binaries are available:
51
+
52
+ ```bash
53
+ uv run opencode-llama doctor
54
+ ```
55
+
56
+ ## Configure
57
+
58
+ Create a project-local config in the project where you want OpenCode to run:
59
+
60
+ ```bash
61
+ cp opencode-llama.example.yaml opencode-llama.yaml
62
+ ```
63
+
64
+ Then edit `opencode-llama.yaml`:
65
+
66
+ ```yaml
67
+ model: /absolute/path/to/model.gguf
68
+ llama_server: /optional/path/to/llama-server
69
+ port: 8080
70
+ ctx_size: 8192
71
+ ```
72
+
73
+ Config lookup order:
74
+
75
+ 1. The path passed with `--config`
76
+ 2. `opencode-llama.yaml` or `opencode-llama.yml` in the project directory
77
+ 3. `~/.config/opencode-llama.yaml`
78
+
79
+ ## Usage
80
+
81
+ Run with an explicit config file:
82
+
83
+ ```bash
84
+ uv run opencode-llama --config opencode-llama.yaml
85
+ ```
86
+
87
+ Or pass the model directly:
88
+
89
+ ```bash
90
+ uv run opencode-llama --model /absolute/path/to/model.gguf
91
+ ```
92
+
93
+ Useful options:
94
+
95
+ ```bash
96
+ uv run opencode-llama --help
97
+ uv run opencode-llama --dry-run
98
+ uv run opencode-llama --config opencode-llama.yaml
99
+ uv run opencode-llama --port 9001
100
+ uv run opencode-llama --ctx-size 8192
101
+ uv run opencode-llama --llama-server /absolute/path/to/llama-server
102
+ ```
103
+
104
+ If `llama-server` fails before becoming healthy, the launcher includes a bounded
105
+ tail of the server's startup output in the error message. Successful runs stay
106
+ quiet.
107
+
108
+ ## Development
109
+
110
+ Run the test suite:
111
+
112
+ ```bash
113
+ uv run pytest
114
+ ```
115
+
116
+ Before publishing, check for local files:
117
+
118
+ ```bash
119
+ git status --short --ignored
120
+ ```
121
+
122
+ Do not commit local launcher configs, virtual environments, caches, build
123
+ artifacts, or model paths.
124
+
125
+ ## License
126
+
127
+ MIT
@@ -0,0 +1,105 @@
1
+ # OpenCode llama.cpp Launcher
2
+
3
+ A one command solution for launching [OpenCode](https://opencode.ai/) with any
4
+ local LLM that `llama-server` can serve, including models like Qwen, DeepSeek,
5
+ and Gemma. This launcher starts `llama-server`, waits for it to become ready,
6
+ wires the OpenAI compatible provider config into OpenCode, and cleans up when
7
+ the local agentic coding session ends.
8
+
9
+ ## Requirements
10
+
11
+ - Python 3.12+
12
+ - OpenCode
13
+ - llama.cpp's `llama-server`
14
+ - A local model supported by `llama-server`, for example Qwen, DeepSeek, or
15
+ Gemma
16
+
17
+ The launcher finds `llama-server` on `PATH`, or you can set `llama_server` in
18
+ your config.
19
+
20
+ ## Install
21
+
22
+ From this repository:
23
+
24
+ ```bash
25
+ uv sync --dev
26
+ ```
27
+
28
+ Check that the required external binaries are available:
29
+
30
+ ```bash
31
+ uv run opencode-llama doctor
32
+ ```
33
+
34
+ ## Configure
35
+
36
+ Create a project-local config in the project where you want OpenCode to run:
37
+
38
+ ```bash
39
+ cp opencode-llama.example.yaml opencode-llama.yaml
40
+ ```
41
+
42
+ Then edit `opencode-llama.yaml`:
43
+
44
+ ```yaml
45
+ model: /absolute/path/to/model.gguf
46
+ llama_server: /optional/path/to/llama-server
47
+ port: 8080
48
+ ctx_size: 8192
49
+ ```
50
+
51
+ Config lookup order:
52
+
53
+ 1. The path passed with `--config`
54
+ 2. `opencode-llama.yaml` or `opencode-llama.yml` in the project directory
55
+ 3. `~/.config/opencode-llama.yaml`
56
+
57
+ ## Usage
58
+
59
+ Run with an explicit config file:
60
+
61
+ ```bash
62
+ uv run opencode-llama --config opencode-llama.yaml
63
+ ```
64
+
65
+ Or pass the model directly:
66
+
67
+ ```bash
68
+ uv run opencode-llama --model /absolute/path/to/model.gguf
69
+ ```
70
+
71
+ Useful options:
72
+
73
+ ```bash
74
+ uv run opencode-llama --help
75
+ uv run opencode-llama --dry-run
76
+ uv run opencode-llama --config opencode-llama.yaml
77
+ uv run opencode-llama --port 9001
78
+ uv run opencode-llama --ctx-size 8192
79
+ uv run opencode-llama --llama-server /absolute/path/to/llama-server
80
+ ```
81
+
82
+ If `llama-server` fails before becoming healthy, the launcher includes a bounded
83
+ tail of the server's startup output in the error message. Successful runs stay
84
+ quiet.
85
+
86
+ ## Development
87
+
88
+ Run the test suite:
89
+
90
+ ```bash
91
+ uv run pytest
92
+ ```
93
+
94
+ Before publishing, check for local files:
95
+
96
+ ```bash
97
+ git status --short --ignored
98
+ ```
99
+
100
+ Do not commit local launcher configs, virtual environments, caches, build
101
+ artifacts, or model paths.
102
+
103
+ ## License
104
+
105
+ MIT
@@ -0,0 +1,15 @@
1
+ # Example launcher config.
2
+ # Rename to .opencode-llama.yaml in the project where you want OpenCode to run.
3
+
4
+ # Absolute path to your local GGUF model file.
5
+ model: /absolute/path/to/model.gguf
6
+
7
+ # Optional absolute path to llama-server. Omit this to use PATH lookup.
8
+ llama_server: /optional/path/to/llama-server
9
+
10
+ # Preferred llama-server port. If this port is busy, the launcher will choose
11
+ # an available port automatically.
12
+ port: 8080
13
+
14
+ # llama-server context size.
15
+ ctx_size: 8192
@@ -0,0 +1,3 @@
1
+ """OpenCode llama.cpp launcher package."""
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,114 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import typer
6
+
7
+ from opencode_llama_cpp_launcher.services.binaries import require_binary
8
+ from opencode_llama_cpp_launcher.services.errors import LauncherError
9
+ from opencode_llama_cpp_launcher.services.launch_config_loader import LaunchConfigLoader
10
+ from opencode_llama_cpp_launcher.services.launcher import Launcher
11
+
12
+
13
+ app = typer.Typer(
14
+ context_settings={"help_option_names": ["-h", "--help"]},
15
+ invoke_without_command=True,
16
+ no_args_is_help=False,
17
+ help="Launch OpenCode against a local llama.cpp GGUF model.",
18
+ )
19
+
20
+
21
+ @app.callback()
22
+ def launch_callback(
23
+ ctx: typer.Context,
24
+ model: Path | None = typer.Option(
25
+ None,
26
+ "--model",
27
+ "-m",
28
+ help="GGUF model path. Overrides YAML config.",
29
+ ),
30
+ llama_server: Path | None = typer.Option(
31
+ None,
32
+ "--llama-server",
33
+ help="llama-server binary path. Overrides YAML config and PATH lookup.",
34
+ ),
35
+ project: Path = typer.Option(
36
+ Path("."),
37
+ "--project",
38
+ "-p",
39
+ help="Project directory where OpenCode should run.",
40
+ ),
41
+ config: Path | None = typer.Option(
42
+ None,
43
+ "--config",
44
+ "-f",
45
+ help="Path to opencode-llama.yaml.",
46
+ ),
47
+ port: int | None = typer.Option(
48
+ None,
49
+ "--port",
50
+ help="Preferred llama-server port.",
51
+ ),
52
+ ctx_size: int | None = typer.Option(
53
+ None,
54
+ "--ctx-size",
55
+ help="llama-server context size.",
56
+ ),
57
+ dry_run: bool = typer.Option(
58
+ False,
59
+ "--dry-run",
60
+ help="Print resolved commands and generated config without launching.",
61
+ ),
62
+ ) -> None:
63
+ if ctx.invoked_subcommand is not None:
64
+ return
65
+
66
+ try:
67
+ launch_config = LaunchConfigLoader().load(
68
+ model=model,
69
+ llama_server=llama_server,
70
+ project=project,
71
+ config=config,
72
+ port=port,
73
+ ctx_size=ctx_size,
74
+ dry_run=dry_run,
75
+ )
76
+ raise typer.Exit(_shell_exit_code(Launcher().run(launch_config)))
77
+ except LauncherError as exc:
78
+ _print_error(str(exc))
79
+ raise typer.Exit(1) from exc
80
+
81
+
82
+ @app.command()
83
+ def doctor() -> None:
84
+ """Check whether required external binaries are available."""
85
+ failed = False
86
+
87
+ for binary_name in ("llama-server", "opencode"):
88
+ try:
89
+ binary_path = require_binary(binary_name)
90
+ typer.echo(f"OK {binary_name}: {binary_path}")
91
+ except LauncherError as exc:
92
+ failed = True
93
+ typer.echo(f"Missing {binary_name}: {exc}", err=True)
94
+
95
+ if failed:
96
+ raise typer.Exit(1)
97
+
98
+
99
+ def _print_error(message: str) -> None:
100
+ typer.echo(f"Error: {message}", err=True)
101
+
102
+
103
+ def _shell_exit_code(return_code: int) -> int:
104
+ if return_code >= 0:
105
+ return return_code
106
+ return 128 + abs(return_code)
107
+
108
+
109
+ def main() -> None:
110
+ app()
111
+
112
+
113
+ if __name__ == "__main__":
114
+ main()
@@ -0,0 +1 @@
1
+ """Data models for launcher configuration and status."""
@@ -0,0 +1,43 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from pathlib import Path
5
+
6
+
7
+ DEFAULT_HOST = "127.0.0.1"
8
+ DEFAULT_PORT = 8080
9
+ MAX_PORT = 65535
10
+ DEFAULT_CTX_SIZE = 8192
11
+
12
+
13
+ @dataclass(frozen=True)
14
+ class LaunchConfig:
15
+ project: Path
16
+ model_path: Path
17
+ llama_server_path: Path | None = None
18
+ port: int = DEFAULT_PORT
19
+ ctx_size: int = DEFAULT_CTX_SIZE
20
+ host: str = DEFAULT_HOST
21
+ dry_run: bool = False
22
+
23
+
24
+ @dataclass(frozen=True)
25
+ class LlamaServerConfig:
26
+ model_path: Path
27
+ command: list[str]
28
+ selected_port: int
29
+ root_url: str
30
+ base_url: str
31
+
32
+
33
+ @dataclass(frozen=True)
34
+ class OpenCodeConfig:
35
+ command: list[str]
36
+ config: dict
37
+
38
+
39
+ @dataclass(frozen=True)
40
+ class BuiltLaunchConfig:
41
+ project: Path
42
+ llama_server: LlamaServerConfig
43
+ opencode: OpenCodeConfig
@@ -0,0 +1,14 @@
1
+ from __future__ import annotations
2
+
3
+ from enum import StrEnum
4
+
5
+
6
+ class LaunchStatus(StrEnum):
7
+ IDLE = "Idle"
8
+ CHECKING_PREREQUISITES = "Checking prerequisites"
9
+ STARTING_LLAMA_SERVER = "Starting llama-server"
10
+ WAITING_FOR_LLAMA_SERVER = "Waiting for llama-server"
11
+ DETECTING_MODEL = "Detecting model"
12
+ STARTING_OPENCODE = "Starting OpenCode"
13
+ READY = "Ready"
14
+ ERROR = "Error"
@@ -0,0 +1,12 @@
1
+ from __future__ import annotations
2
+
3
+ import shutil
4
+
5
+ from opencode_llama_cpp_launcher.services.errors import LauncherError
6
+
7
+
8
+ def require_binary(name: str) -> str:
9
+ binary_path = shutil.which(name)
10
+ if binary_path is None:
11
+ raise LauncherError(f"`{name}` was not found in PATH.")
12
+ return binary_path
@@ -0,0 +1,5 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ class LauncherError(RuntimeError):
5
+ """Expected user-facing launcher failure."""
@@ -0,0 +1,59 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import time
5
+ import urllib.error
6
+ import urllib.request
7
+ from collections.abc import Callable
8
+
9
+ from opencode_llama_cpp_launcher.services.errors import LauncherError
10
+
11
+
12
+ def wait_for_health(
13
+ root_url: str,
14
+ timeout_seconds: float = 120.0,
15
+ interval_seconds: float = 0.5,
16
+ is_server_running: Callable[[], bool] | None = None,
17
+ ) -> None:
18
+ deadline = time.monotonic() + timeout_seconds
19
+ last_error: Exception | None = None
20
+
21
+ while time.monotonic() < deadline:
22
+ if is_server_running is not None and not is_server_running():
23
+ raise LauncherError("llama-server exited before becoming ready.")
24
+
25
+ try:
26
+ with urllib.request.urlopen(f"{root_url}/health", timeout=2.0) as response:
27
+ if response.status == 200:
28
+ return
29
+ except OSError as exc:
30
+ last_error = exc
31
+
32
+ if is_server_running is not None and not is_server_running():
33
+ raise LauncherError("llama-server exited before becoming ready.")
34
+
35
+ time.sleep(interval_seconds)
36
+
37
+ suffix = f" Last error: {last_error}" if last_error else ""
38
+ raise LauncherError(f"llama-server did not become ready at {root_url}.{suffix}")
39
+
40
+
41
+ def fetch_first_model_id(base_url: str) -> str:
42
+ try:
43
+ with urllib.request.urlopen(f"{base_url}/models", timeout=5.0) as response:
44
+ payload = json.loads(response.read().decode("utf-8"))
45
+ except urllib.error.URLError as exc:
46
+ raise LauncherError(f"Could not query llama-server models: {exc}") from exc
47
+ except (OSError, json.JSONDecodeError, UnicodeDecodeError) as exc:
48
+ raise LauncherError("llama-server returned invalid JSON from /v1/models.") from exc
49
+
50
+ data = payload.get("data") if isinstance(payload, dict) else None
51
+ if not isinstance(data, list) or not data:
52
+ raise LauncherError("No models returned from llama-server /v1/models.")
53
+
54
+ first_model = data[0]
55
+ model_id = first_model.get("id") if isinstance(first_model, dict) else None
56
+ if not isinstance(model_id, str) or not model_id:
57
+ raise LauncherError("First llama-server model entry did not include an id.")
58
+
59
+ return model_id