dlab-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dlab/__init__.py +6 -0
- dlab/cli.py +1075 -0
- dlab/config.py +190 -0
- dlab/create_dpack.py +1096 -0
- dlab/create_dpack_wizard.py +1471 -0
- dlab/create_parallel_agent_wizard.py +582 -0
- dlab/data/__init__.py +0 -0
- dlab/data/models.json +1793 -0
- dlab/docker.py +591 -0
- dlab/local.py +269 -0
- dlab/model_fallback.py +360 -0
- dlab/parallel_tool.py +18 -0
- dlab/session.py +389 -0
- dlab/timeline.py +684 -0
- dlab/tui/__init__.py +9 -0
- dlab/tui/app.py +664 -0
- dlab/tui/log_watcher.py +208 -0
- dlab/tui/models.py +438 -0
- dlab/tui/widgets/__init__.py +18 -0
- dlab/tui/widgets/agent_list.py +170 -0
- dlab/tui/widgets/artifacts_pane.py +618 -0
- dlab/tui/widgets/log_view.py +505 -0
- dlab/tui/widgets/search_popup.py +151 -0
- dlab/tui/widgets/status_bar.py +106 -0
- dlab_cli-0.1.0.dist-info/METADATA +237 -0
- dlab_cli-0.1.0.dist-info/RECORD +30 -0
- dlab_cli-0.1.0.dist-info/WHEEL +5 -0
- dlab_cli-0.1.0.dist-info/entry_points.txt +2 -0
- dlab_cli-0.1.0.dist-info/licenses/LICENSE +201 -0
- dlab_cli-0.1.0.dist-info/top_level.txt +1 -0
dlab/local.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Local execution backend for running opencode without Docker.
|
|
3
|
+
|
|
4
|
+
Used when --no-sandboxing is passed or Docker is not available.
|
|
5
|
+
Instead of replicating the Docker environment, this copies the docker/
|
|
6
|
+
directory into the work dir as _docker/ and prepends instructions to the
|
|
7
|
+
prompt telling the agent to set up its own environment.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
import shutil
|
|
12
|
+
import subprocess
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def is_docker_available() -> bool:
|
|
18
|
+
"""
|
|
19
|
+
Check if Docker CLI exists and the daemon is running.
|
|
20
|
+
|
|
21
|
+
Returns
|
|
22
|
+
-------
|
|
23
|
+
bool
|
|
24
|
+
True if docker is installed and the daemon responds.
|
|
25
|
+
"""
|
|
26
|
+
if shutil.which("docker") is None:
|
|
27
|
+
return False
|
|
28
|
+
try:
|
|
29
|
+
result: subprocess.CompletedProcess[str] = subprocess.run(
|
|
30
|
+
["docker", "info"],
|
|
31
|
+
capture_output=True,
|
|
32
|
+
timeout=10,
|
|
33
|
+
)
|
|
34
|
+
return result.returncode == 0
|
|
35
|
+
except (subprocess.TimeoutExpired, FileNotFoundError):
|
|
36
|
+
return False
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def detect_package_manager(config_dir: str) -> str:
|
|
40
|
+
"""
|
|
41
|
+
Detect package manager from docker/ contents.
|
|
42
|
+
|
|
43
|
+
Parameters
|
|
44
|
+
----------
|
|
45
|
+
config_dir : str
|
|
46
|
+
Path to decision-pack directory.
|
|
47
|
+
|
|
48
|
+
Returns
|
|
49
|
+
-------
|
|
50
|
+
str
|
|
51
|
+
One of "conda", "pixi", "pip".
|
|
52
|
+
"""
|
|
53
|
+
docker_dir: Path = Path(config_dir) / "docker"
|
|
54
|
+
if (docker_dir / "environment.yml").exists():
|
|
55
|
+
return "conda"
|
|
56
|
+
if (docker_dir / "pixi.toml").exists():
|
|
57
|
+
return "pixi"
|
|
58
|
+
return "pip"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def copy_docker_dir(config_dir: str, work_dir: str) -> None:
|
|
62
|
+
"""
|
|
63
|
+
Copy the decision-pack's docker/ directory into the work dir as _docker/.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
config_dir : str
|
|
68
|
+
Path to decision-pack directory.
|
|
69
|
+
work_dir : str
|
|
70
|
+
Session work directory.
|
|
71
|
+
"""
|
|
72
|
+
docker_src: Path = Path(config_dir) / "docker"
|
|
73
|
+
docker_dst: Path = Path(work_dir) / "_docker"
|
|
74
|
+
if docker_src.exists():
|
|
75
|
+
if docker_dst.exists():
|
|
76
|
+
shutil.rmtree(docker_dst)
|
|
77
|
+
shutil.copytree(str(docker_src), str(docker_dst))
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def build_local_prompt(prompt: str, config: dict[str, Any]) -> str:
|
|
81
|
+
"""
|
|
82
|
+
Prepend system instructions for unsandboxed local execution.
|
|
83
|
+
|
|
84
|
+
Parameters
|
|
85
|
+
----------
|
|
86
|
+
prompt : str
|
|
87
|
+
Original user prompt.
|
|
88
|
+
config : dict[str, Any]
|
|
89
|
+
decision-pack configuration.
|
|
90
|
+
|
|
91
|
+
Returns
|
|
92
|
+
-------
|
|
93
|
+
str
|
|
94
|
+
Prompt with system instructions prepended.
|
|
95
|
+
"""
|
|
96
|
+
pkg_mgr: str = config.get(
|
|
97
|
+
"package_manager",
|
|
98
|
+
detect_package_manager(config["config_dir"]),
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
work_dir_abs: str = str(Path(config["config_dir"]).resolve().parent)
|
|
102
|
+
# Use the actual work dir if available, fall back to config dir parent
|
|
103
|
+
# (the caller should pass work_dir in config if needed)
|
|
104
|
+
|
|
105
|
+
system_instructions: str = (
|
|
106
|
+
"IMPORTANT --- SYSTEM INSTRUCTIONS (NO-SANDBOXING MODE):\n\n"
|
|
107
|
+
"You're running locally, NOT inside a Docker container. The decision-pack "
|
|
108
|
+
f"was designed for a Docker environment with python managed by {pkg_mgr}.\n\n"
|
|
109
|
+
"## Step 1: Set up the environment\n\n"
|
|
110
|
+
"Read `_docker/Dockerfile` carefully. It shows:\n"
|
|
111
|
+
"- Which base image and package manager was intended\n"
|
|
112
|
+
"- Which dependency file to install from (requirements.txt, environment.yml, pixi.toml)\n"
|
|
113
|
+
"- Which directories from _docker/ would have been COPY'd into the container "
|
|
114
|
+
"(e.g., custom Python libraries like `COPY <SUB_DIR>/ /opt/<SUB_DIR>/`)\n\n"
|
|
115
|
+
f"Use `{pkg_mgr}` to create and install a local environment from the dependency "
|
|
116
|
+
"file in `_docker/`. For example:\n"
|
|
117
|
+
"- pip: `python -m venv .venv && .venv/bin/pip install -r _docker/requirements.txt`\n"
|
|
118
|
+
"- conda: `conda create -p .conda-env --yes && conda env update -p .conda-env -f _docker/environment.yml`\n"
|
|
119
|
+
"- uv: `uv venv .venv && uv pip install --python .venv/bin/python -r _docker/requirements.txt`\n"
|
|
120
|
+
"- pixi: `cp _docker/pixi.toml . && pixi install`\n\n"
|
|
121
|
+
"If the Dockerfile COPY'd any Python libraries from _docker/ into the container, "
|
|
122
|
+
"you need to make those importable by setting the ABSOLUTE path to _docker/ in "
|
|
123
|
+
"PYTHONPATH when running scripts:\n"
|
|
124
|
+
"`PYTHONPATH=/absolute/path/to/workdir/_docker:$PYTHONPATH python my_script.py`\n\n"
|
|
125
|
+
"## Step 2: Verify the environment works\n\n"
|
|
126
|
+
"After setting up, run a small test script that imports the key packages from the "
|
|
127
|
+
"dependency file to confirm everything is installed correctly. Fix any import errors "
|
|
128
|
+
"before proceeding.\n\n"
|
|
129
|
+
"## Step 3: Read the hooks\n\n"
|
|
130
|
+
"Read `_hooks/` — these are scripts that would have run inside the container before "
|
|
131
|
+
"and after the agent session. Adapt and run pre-run hooks if they make sense locally "
|
|
132
|
+
"(e.g., skip Modal deployment if not applicable, but run data setup scripts).\n\n"
|
|
133
|
+
"## Step 4: Subagent environment instructions\n\n"
|
|
134
|
+
"When you call parallel-agents or task subagents, you MUST include in each prompt:\n"
|
|
135
|
+
"- The absolute path to the correct python binary (e.g., `/absolute/path/to/.venv/bin/python`)\n"
|
|
136
|
+
"- The correct PYTHONPATH value (e.g., `PYTHONPATH=/absolute/path/to/workdir/_docker:$PYTHONPATH`)\n"
|
|
137
|
+
"- Instructions to use this python for all script execution\n\n"
|
|
138
|
+
"Subagents do NOT inherit your environment setup. They start fresh and need explicit "
|
|
139
|
+
"instructions on which python to use.\n\n"
|
|
140
|
+
"---\n\n"
|
|
141
|
+
"Now follows the User's request:\n"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return f"{system_instructions}{prompt}"
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def build_local_env(env_file: str | None = None) -> dict[str, str]:
|
|
148
|
+
"""
|
|
149
|
+
Build environment variables dict for local execution.
|
|
150
|
+
|
|
151
|
+
Parameters
|
|
152
|
+
----------
|
|
153
|
+
env_file : str | None
|
|
154
|
+
Optional .env file to parse and include.
|
|
155
|
+
|
|
156
|
+
Returns
|
|
157
|
+
-------
|
|
158
|
+
dict[str, str]
|
|
159
|
+
Environment variables.
|
|
160
|
+
"""
|
|
161
|
+
env: dict[str, str] = dict(os.environ)
|
|
162
|
+
|
|
163
|
+
if env_file:
|
|
164
|
+
for line in Path(env_file).read_text().splitlines():
|
|
165
|
+
line = line.strip()
|
|
166
|
+
if not line or line.startswith("#"):
|
|
167
|
+
continue
|
|
168
|
+
key, _, value = line.partition("=")
|
|
169
|
+
value = value.strip().strip("'\"")
|
|
170
|
+
env[key.strip()] = value
|
|
171
|
+
|
|
172
|
+
return env
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def run_local_command(
|
|
176
|
+
command: list[str],
|
|
177
|
+
work_dir: str,
|
|
178
|
+
env: dict[str, str],
|
|
179
|
+
timeout: int | None = None,
|
|
180
|
+
) -> tuple[int, str, str]:
|
|
181
|
+
"""
|
|
182
|
+
Run a command locally in the work directory.
|
|
183
|
+
|
|
184
|
+
Parameters
|
|
185
|
+
----------
|
|
186
|
+
command : list[str]
|
|
187
|
+
Command and arguments.
|
|
188
|
+
work_dir : str
|
|
189
|
+
Working directory.
|
|
190
|
+
env : dict[str, str]
|
|
191
|
+
Environment variables.
|
|
192
|
+
timeout : int | None
|
|
193
|
+
Timeout in seconds.
|
|
194
|
+
|
|
195
|
+
Returns
|
|
196
|
+
-------
|
|
197
|
+
tuple[int, str, str]
|
|
198
|
+
(exit_code, stdout, stderr).
|
|
199
|
+
"""
|
|
200
|
+
result: subprocess.CompletedProcess[str] = subprocess.run(
|
|
201
|
+
command,
|
|
202
|
+
capture_output=True,
|
|
203
|
+
text=True,
|
|
204
|
+
cwd=work_dir,
|
|
205
|
+
env=env,
|
|
206
|
+
timeout=timeout,
|
|
207
|
+
)
|
|
208
|
+
return result.returncode, result.stdout, result.stderr
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def run_opencode_local(
|
|
212
|
+
work_dir: str,
|
|
213
|
+
prompt: str,
|
|
214
|
+
model: str,
|
|
215
|
+
env: dict[str, str],
|
|
216
|
+
timeout: int | None = None,
|
|
217
|
+
log_prefix: str = "main",
|
|
218
|
+
) -> tuple[int, str, str]:
|
|
219
|
+
"""
|
|
220
|
+
Run opencode locally in the work directory.
|
|
221
|
+
|
|
222
|
+
Parameters
|
|
223
|
+
----------
|
|
224
|
+
work_dir : str
|
|
225
|
+
Session work directory.
|
|
226
|
+
prompt : str
|
|
227
|
+
Prompt text (already includes system instructions).
|
|
228
|
+
model : str
|
|
229
|
+
LLM model identifier.
|
|
230
|
+
env : dict[str, str]
|
|
231
|
+
Environment variables.
|
|
232
|
+
timeout : int | None
|
|
233
|
+
Timeout in seconds.
|
|
234
|
+
log_prefix : str
|
|
235
|
+
Log file prefix.
|
|
236
|
+
|
|
237
|
+
Returns
|
|
238
|
+
-------
|
|
239
|
+
tuple[int, str, str]
|
|
240
|
+
(exit_code, stdout, stderr).
|
|
241
|
+
"""
|
|
242
|
+
work_path: Path = Path(work_dir)
|
|
243
|
+
logs_dir: Path = work_path / "_opencode_logs"
|
|
244
|
+
|
|
245
|
+
# Write prompt to file (avoids shell quoting issues)
|
|
246
|
+
prompt_file: Path = work_path / ".prompt.txt"
|
|
247
|
+
prompt_file.write_text(prompt)
|
|
248
|
+
|
|
249
|
+
# Build runner script
|
|
250
|
+
log_path: str = str(logs_dir / f"{log_prefix}.log")
|
|
251
|
+
runner_script: str = f'''#!/bin/bash
|
|
252
|
+
set -o pipefail
|
|
253
|
+
prompt=$(cat "{prompt_file}")
|
|
254
|
+
opencode run --format json --log-level DEBUG --model "{model}" "$prompt" 2>&1 | tee "{log_path}"
|
|
255
|
+
'''
|
|
256
|
+
runner_file: Path = work_path / ".run_opencode.sh"
|
|
257
|
+
runner_file.write_text(runner_script)
|
|
258
|
+
runner_file.chmod(0o755)
|
|
259
|
+
|
|
260
|
+
result: subprocess.CompletedProcess[str] = subprocess.run(
|
|
261
|
+
["bash", str(runner_file)],
|
|
262
|
+
capture_output=True,
|
|
263
|
+
text=True,
|
|
264
|
+
cwd=work_dir,
|
|
265
|
+
env=env,
|
|
266
|
+
timeout=timeout,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
return result.returncode, result.stdout, result.stderr
|
dlab/model_fallback.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model validation and provider fallback for agent configs.
|
|
3
|
+
|
|
4
|
+
When a decision-pack references model providers whose API keys are not
|
|
5
|
+
in the .env file, this module replaces those model strings with the
|
|
6
|
+
orchestrator's model so users only need a single API key to get started.
|
|
7
|
+
|
|
8
|
+
Two-phase design:
|
|
9
|
+
1. preflight_check() — runs BEFORE session creation on source dpack files.
|
|
10
|
+
Catches fatal errors (orchestrator key missing, unknown models) early.
|
|
11
|
+
2. process_opencode_dir() — runs DURING session setup on work-dir copies.
|
|
12
|
+
Applies fallback replacements for missing provider keys.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import difflib
|
|
16
|
+
import os
|
|
17
|
+
import re
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
|
|
20
|
+
from dlab.create_dpack import KNOWN_PROVIDER_ENVS, get_model_list, get_provider_env_vars
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Matches provider/model-name patterns (e.g. "anthropic/claude-sonnet-4-5")
|
|
24
|
+
# Negative lookahead (?!/) excludes file paths like "opencode/agents/foo.md"
|
|
25
|
+
_MODEL_PATTERN: re.Pattern[str] = re.compile(
|
|
26
|
+
r"\b([a-zA-Z0-9_-]+/[a-zA-Z0-9._-]+)\b(?!/)"
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def parse_env_file(env_file: str | None) -> dict[str, str]:
|
|
31
|
+
"""
|
|
32
|
+
Parse a .env file into a key-value dict.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
env_file : str | None
|
|
37
|
+
Path to .env file, or None.
|
|
38
|
+
|
|
39
|
+
Returns
|
|
40
|
+
-------
|
|
41
|
+
dict[str, str]
|
|
42
|
+
Parsed environment variables. Empty dict if env_file is None
|
|
43
|
+
or file does not exist.
|
|
44
|
+
"""
|
|
45
|
+
if not env_file:
|
|
46
|
+
return {}
|
|
47
|
+
path: Path = Path(env_file)
|
|
48
|
+
if not path.exists():
|
|
49
|
+
return {}
|
|
50
|
+
|
|
51
|
+
env: dict[str, str] = {}
|
|
52
|
+
for line in path.read_text().splitlines():
|
|
53
|
+
line = line.strip()
|
|
54
|
+
if not line or line.startswith("#"):
|
|
55
|
+
continue
|
|
56
|
+
key, _, value = line.partition("=")
|
|
57
|
+
value = value.strip().strip("'\"")
|
|
58
|
+
env[key.strip()] = value
|
|
59
|
+
return env
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def get_available_providers(env_vars: dict[str, str]) -> set[str]:
|
|
63
|
+
"""
|
|
64
|
+
Return the set of providers whose required API keys are present.
|
|
65
|
+
|
|
66
|
+
Parameters
|
|
67
|
+
----------
|
|
68
|
+
env_vars : dict[str, str]
|
|
69
|
+
Parsed environment variables.
|
|
70
|
+
|
|
71
|
+
Returns
|
|
72
|
+
-------
|
|
73
|
+
set[str]
|
|
74
|
+
Provider names (e.g. {"anthropic", "google"}) with all required
|
|
75
|
+
keys present and non-empty.
|
|
76
|
+
"""
|
|
77
|
+
available: set[str] = set()
|
|
78
|
+
for provider, required_keys in KNOWN_PROVIDER_ENVS.items():
|
|
79
|
+
if all(env_vars.get(k) for k in required_keys):
|
|
80
|
+
available.add(provider)
|
|
81
|
+
return available
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _strip_comments(text: str) -> str:
|
|
85
|
+
"""Remove comment lines (# ...) from text before scanning for models."""
|
|
86
|
+
lines: list[str] = []
|
|
87
|
+
for line in text.splitlines():
|
|
88
|
+
stripped: str = line.lstrip()
|
|
89
|
+
if not stripped.startswith("#"):
|
|
90
|
+
lines.append(line)
|
|
91
|
+
return "\n".join(lines)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def find_model_strings(text: str) -> list[str]:
|
|
95
|
+
"""
|
|
96
|
+
Extract all provider/model-name strings from non-comment text.
|
|
97
|
+
|
|
98
|
+
Parameters
|
|
99
|
+
----------
|
|
100
|
+
text : str
|
|
101
|
+
File content to scan.
|
|
102
|
+
|
|
103
|
+
Returns
|
|
104
|
+
-------
|
|
105
|
+
list[str]
|
|
106
|
+
Deduplicated list of model strings found.
|
|
107
|
+
"""
|
|
108
|
+
matches: list[str] = _MODEL_PATTERN.findall(_strip_comments(text))
|
|
109
|
+
# Only keep matches whose provider prefix is a known provider
|
|
110
|
+
known_prefixes: set[str] = set(KNOWN_PROVIDER_ENVS.keys())
|
|
111
|
+
models: list[str] = []
|
|
112
|
+
seen: set[str] = set()
|
|
113
|
+
for m in matches:
|
|
114
|
+
provider: str = m.split("/")[0]
|
|
115
|
+
if provider in known_prefixes and m not in seen:
|
|
116
|
+
seen.add(m)
|
|
117
|
+
models.append(m)
|
|
118
|
+
return models
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _collect_models_from_dir(directory: Path) -> list[str]:
|
|
122
|
+
"""Scan all .yaml/.yml/.md files in a directory for model strings."""
|
|
123
|
+
all_models: list[str] = []
|
|
124
|
+
config_files: list[Path] = sorted(
|
|
125
|
+
list(directory.rglob("*.yaml"))
|
|
126
|
+
+ list(directory.rglob("*.yml"))
|
|
127
|
+
+ list(directory.rglob("*.md"))
|
|
128
|
+
)
|
|
129
|
+
for f in config_files:
|
|
130
|
+
all_models.extend(find_model_strings(f.read_text()))
|
|
131
|
+
return list(dict.fromkeys(all_models)) # deduplicate, preserve order
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _format_env_setup_hint(model: str) -> str:
|
|
135
|
+
"""Format a hint showing which env var to set for a model's provider."""
|
|
136
|
+
env_vars: list[str] = get_provider_env_vars(model)
|
|
137
|
+
if env_vars:
|
|
138
|
+
var_str: str = ", ".join(env_vars)
|
|
139
|
+
return f"Set {var_str} in your .env file"
|
|
140
|
+
return "Check provider documentation for required API key"
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def preflight_check(
|
|
144
|
+
orchestrator_model: str,
|
|
145
|
+
config_dir: str,
|
|
146
|
+
env_file: str | None,
|
|
147
|
+
no_sandboxing: bool = False,
|
|
148
|
+
) -> tuple[list[str], list[str]]:
|
|
149
|
+
"""
|
|
150
|
+
Validate models before session creation. Runs on source dpack files.
|
|
151
|
+
|
|
152
|
+
Returns errors (fatal, abort run) and warnings (informational, continue).
|
|
153
|
+
|
|
154
|
+
Parameters
|
|
155
|
+
----------
|
|
156
|
+
orchestrator_model : str
|
|
157
|
+
The orchestrator's model (from --model or config default_model).
|
|
158
|
+
config_dir : str
|
|
159
|
+
Path to the decision-pack config directory.
|
|
160
|
+
env_file : str | None
|
|
161
|
+
Path to .env file.
|
|
162
|
+
no_sandboxing : bool
|
|
163
|
+
If True, also check os.environ for API keys (local mode inherits
|
|
164
|
+
the shell environment).
|
|
165
|
+
|
|
166
|
+
Returns
|
|
167
|
+
-------
|
|
168
|
+
tuple[list[str], list[str]]
|
|
169
|
+
(errors, warnings). Errors are fatal and should abort the run.
|
|
170
|
+
Warnings are informational (e.g. fallback will be applied).
|
|
171
|
+
"""
|
|
172
|
+
errors: list[str] = []
|
|
173
|
+
warnings: list[str] = []
|
|
174
|
+
|
|
175
|
+
env_vars: dict[str, str] = {}
|
|
176
|
+
if no_sandboxing:
|
|
177
|
+
env_vars.update(os.environ)
|
|
178
|
+
env_vars.update(parse_env_file(env_file))
|
|
179
|
+
available: set[str] = get_available_providers(env_vars)
|
|
180
|
+
|
|
181
|
+
# Validate orchestrator model name
|
|
182
|
+
all_known: list[str] = get_model_list()
|
|
183
|
+
known: set[str] = set(all_known)
|
|
184
|
+
if orchestrator_model not in known:
|
|
185
|
+
suggestions: list[str] = sorted(difflib.get_close_matches(
|
|
186
|
+
orchestrator_model, all_known, n=3, cutoff=0.6,
|
|
187
|
+
))
|
|
188
|
+
if suggestions:
|
|
189
|
+
alt: str = ", ".join(suggestions)
|
|
190
|
+
errors.append(
|
|
191
|
+
f"Unknown model {orchestrator_model} — did you mean: {alt}?"
|
|
192
|
+
)
|
|
193
|
+
else:
|
|
194
|
+
errors.append(f"Unknown model {orchestrator_model}")
|
|
195
|
+
return errors, warnings
|
|
196
|
+
|
|
197
|
+
# Check orchestrator model's provider key
|
|
198
|
+
orchestrator_provider: str = orchestrator_model.split("/")[0]
|
|
199
|
+
if orchestrator_provider in KNOWN_PROVIDER_ENVS and orchestrator_provider not in available:
|
|
200
|
+
env_hint: str = _format_env_setup_hint(orchestrator_model)
|
|
201
|
+
errors.append(
|
|
202
|
+
f"Orchestrator model {orchestrator_model} requires an API key "
|
|
203
|
+
f"that is not set. {env_hint}"
|
|
204
|
+
)
|
|
205
|
+
return errors, warnings
|
|
206
|
+
|
|
207
|
+
# Scan source opencode/ dir for model strings
|
|
208
|
+
opencode_dir: Path = Path(config_dir) / "opencode"
|
|
209
|
+
if not opencode_dir.exists():
|
|
210
|
+
return errors, warnings
|
|
211
|
+
|
|
212
|
+
all_models: list[str] = _collect_models_from_dir(opencode_dir)
|
|
213
|
+
|
|
214
|
+
# Validate agent model names exist in known list
|
|
215
|
+
for model in all_models:
|
|
216
|
+
if model not in known:
|
|
217
|
+
suggestions: list[str] = sorted(difflib.get_close_matches(
|
|
218
|
+
model, all_known, n=3, cutoff=0.6,
|
|
219
|
+
))
|
|
220
|
+
if suggestions:
|
|
221
|
+
alt: str = ", ".join(suggestions)
|
|
222
|
+
errors.append(
|
|
223
|
+
f"Unknown model {model} — did you mean: {alt}?"
|
|
224
|
+
)
|
|
225
|
+
else:
|
|
226
|
+
errors.append(f"Unknown model {model}")
|
|
227
|
+
|
|
228
|
+
# Check which agent models will need fallback
|
|
229
|
+
unavailable: set[str] = set(KNOWN_PROVIDER_ENVS.keys()) - available
|
|
230
|
+
models_needing_fallback: list[str] = []
|
|
231
|
+
for model in all_models:
|
|
232
|
+
provider: str = model.split("/")[0]
|
|
233
|
+
if provider in unavailable and model != orchestrator_model:
|
|
234
|
+
models_needing_fallback.append(model)
|
|
235
|
+
|
|
236
|
+
if models_needing_fallback:
|
|
237
|
+
seen: set[str] = set()
|
|
238
|
+
for model in models_needing_fallback:
|
|
239
|
+
if model in seen:
|
|
240
|
+
continue
|
|
241
|
+
seen.add(model)
|
|
242
|
+
env_hint = _format_env_setup_hint(model)
|
|
243
|
+
warnings.append(
|
|
244
|
+
f"{model} -> {orchestrator_model} ({env_hint})"
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
return errors, warnings
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def apply_model_fallback(
|
|
251
|
+
text: str,
|
|
252
|
+
orchestrator_model: str,
|
|
253
|
+
unavailable_providers: set[str],
|
|
254
|
+
) -> tuple[str, list[str]]:
|
|
255
|
+
"""
|
|
256
|
+
Replace model strings whose providers are unavailable.
|
|
257
|
+
|
|
258
|
+
Parameters
|
|
259
|
+
----------
|
|
260
|
+
text : str
|
|
261
|
+
File content.
|
|
262
|
+
orchestrator_model : str
|
|
263
|
+
Model to substitute in place of unavailable ones.
|
|
264
|
+
unavailable_providers : set[str]
|
|
265
|
+
Provider names whose API keys are missing.
|
|
266
|
+
|
|
267
|
+
Returns
|
|
268
|
+
-------
|
|
269
|
+
tuple[str, list[str]]
|
|
270
|
+
(modified_text, list of replacement descriptions).
|
|
271
|
+
"""
|
|
272
|
+
if not unavailable_providers:
|
|
273
|
+
return text, []
|
|
274
|
+
|
|
275
|
+
replacements: list[str] = []
|
|
276
|
+
|
|
277
|
+
def _replace(match: re.Match[str]) -> str:
|
|
278
|
+
model_str: str = match.group(1)
|
|
279
|
+
provider: str = model_str.split("/")[0]
|
|
280
|
+
if provider in unavailable_providers:
|
|
281
|
+
replacements.append(f"{model_str} -> {orchestrator_model}")
|
|
282
|
+
return orchestrator_model
|
|
283
|
+
return model_str
|
|
284
|
+
|
|
285
|
+
# Only replace on non-comment lines
|
|
286
|
+
new_lines: list[str] = []
|
|
287
|
+
for line in text.splitlines(keepends=True):
|
|
288
|
+
if line.lstrip().startswith("#"):
|
|
289
|
+
new_lines.append(line)
|
|
290
|
+
else:
|
|
291
|
+
new_lines.append(_MODEL_PATTERN.sub(_replace, line))
|
|
292
|
+
|
|
293
|
+
return "".join(new_lines), replacements
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def process_opencode_dir(
|
|
297
|
+
opencode_dir: str,
|
|
298
|
+
orchestrator_model: str,
|
|
299
|
+
env_file: str | None,
|
|
300
|
+
no_sandboxing: bool = False,
|
|
301
|
+
) -> list[str]:
|
|
302
|
+
"""
|
|
303
|
+
Apply model fallback to all config files in .opencode/ (work-dir copies).
|
|
304
|
+
|
|
305
|
+
Assumes preflight_check() has already validated the orchestrator model.
|
|
306
|
+
Only applies replacements — no validation here.
|
|
307
|
+
|
|
308
|
+
Parameters
|
|
309
|
+
----------
|
|
310
|
+
opencode_dir : str
|
|
311
|
+
Path to the .opencode/ directory in the work dir.
|
|
312
|
+
orchestrator_model : str
|
|
313
|
+
The orchestrator's model (fallback target).
|
|
314
|
+
env_file : str | None
|
|
315
|
+
Path to .env file.
|
|
316
|
+
no_sandboxing : bool
|
|
317
|
+
If True, also check os.environ for API keys.
|
|
318
|
+
|
|
319
|
+
Returns
|
|
320
|
+
-------
|
|
321
|
+
list[str]
|
|
322
|
+
Replacement messages (e.g. "parallel_agents/poet.yaml: google/gemini-2.0-flash -> ...").
|
|
323
|
+
"""
|
|
324
|
+
opencode_path: Path = Path(opencode_dir)
|
|
325
|
+
if not opencode_path.exists():
|
|
326
|
+
return []
|
|
327
|
+
|
|
328
|
+
env_vars: dict[str, str] = {}
|
|
329
|
+
if no_sandboxing:
|
|
330
|
+
env_vars.update(os.environ)
|
|
331
|
+
env_vars.update(parse_env_file(env_file))
|
|
332
|
+
available: set[str] = get_available_providers(env_vars)
|
|
333
|
+
|
|
334
|
+
orchestrator_provider: str = orchestrator_model.split("/")[0]
|
|
335
|
+
if orchestrator_provider in KNOWN_PROVIDER_ENVS and orchestrator_provider not in available:
|
|
336
|
+
return []
|
|
337
|
+
|
|
338
|
+
unavailable: set[str] = set(KNOWN_PROVIDER_ENVS.keys()) - available
|
|
339
|
+
if not unavailable:
|
|
340
|
+
return []
|
|
341
|
+
|
|
342
|
+
messages: list[str] = []
|
|
343
|
+
config_files: list[Path] = sorted(
|
|
344
|
+
list(opencode_path.rglob("*.yaml"))
|
|
345
|
+
+ list(opencode_path.rglob("*.yml"))
|
|
346
|
+
+ list(opencode_path.rglob("*.md"))
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
for f in config_files:
|
|
350
|
+
text: str = f.read_text()
|
|
351
|
+
new_text, replacements = apply_model_fallback(
|
|
352
|
+
text, orchestrator_model, unavailable,
|
|
353
|
+
)
|
|
354
|
+
if replacements:
|
|
355
|
+
f.write_text(new_text)
|
|
356
|
+
rel: str = str(f.relative_to(opencode_path))
|
|
357
|
+
for r in replacements:
|
|
358
|
+
messages.append(f"{rel}: {r}")
|
|
359
|
+
|
|
360
|
+
return messages
|
dlab/parallel_tool.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Template for the parallel-agents.ts tool.
|
|
3
|
+
|
|
4
|
+
This module loads the parallel-agents TypeScript source from dlab/js/
|
|
5
|
+
and exposes it as PARALLEL_AGENTS_SOURCE for use by session setup.
|
|
6
|
+
|
|
7
|
+
WARNING: The template contains an evil hack (git init) to work around OpenCode's config
|
|
8
|
+
traversal behavior. See "Git Init Hack" section in CLAUDE.md for details.
|
|
9
|
+
This should be replaced with a proper solution when OpenCode supports
|
|
10
|
+
disabling parent directory config traversal.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from importlib.resources import files
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
PARALLEL_AGENTS_SOURCE: str = (
|
|
17
|
+
files("dlab.js").joinpath("parallel-agents.ts").read_text()
|
|
18
|
+
)
|