delimit-cli 3.6.12 → 3.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,12 +1,12 @@
1
1
  # Delimit
2
2
 
3
- Governance toolkit for AI coding assistants API checks, persistent memory, consensus, security.
3
+ One workspace for every AI coding assistant. Switch models, not context.
4
4
 
5
5
  [![npm](https://img.shields.io/npm/v/delimit-cli)](https://www.npmjs.com/package/delimit-cli)
6
6
  [![GitHub Action](https://img.shields.io/badge/GitHub%20Action-v1.5.0-blue)](https://github.com/marketplace/actions/delimit-api-governance)
7
7
  [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
8
8
 
9
- Delimit gives your AI coding assistant governance tools API breaking change detection, persistent task ledger, security audit, test verification, and multi-model deliberation. Works with Claude Code, Codex, Cursor, and Gemini CLI.
9
+ Your tasks, memory, and governance carry between Claude Code, Codex, and Gemini CLI. Persistent ledger, API breaking change detection, security audit, multi-model deliberation all shared across assistants.
10
10
 
11
11
  ---
12
12
 
@@ -339,7 +339,7 @@ Run full governance compliance checks. Verify security, policy compliance, evide
339
339
  models.gemini = { name: 'Gemini', api_url: `https://us-central1-aiplatform.googleapis.com/v1/projects/{project}/locations/us-central1/publishers/google/models/gemini-2.5-flash:generateContent`, model: 'gemini-2.5-flash', format: 'vertex_ai', enabled: true };
340
340
  }
341
341
  if (process.env.OPENAI_API_KEY) {
342
- models.openai = { name: 'GPT', api_url: 'https://api.openai.com/v1/chat/completions', model: 'gpt-4o', env_key: 'OPENAI_API_KEY', enabled: true };
342
+ models.openai = { name: 'OpenAI', api_url: 'https://api.openai.com/v1/chat/completions', model: 'gpt-4o', env_key: 'OPENAI_API_KEY', prefer_cli: true, enabled: true };
343
343
  }
344
344
  if (process.env.ANTHROPIC_API_KEY) {
345
345
  models.anthropic = { name: 'Claude', api_url: 'https://api.anthropic.com/v1/messages', model: 'claude-sonnet-4-5-20250514', env_key: 'ANTHROPIC_API_KEY', format: 'anthropic', enabled: true };
@@ -384,7 +384,7 @@ Run full governance compliance checks. Verify security, policy compliance, evide
384
384
  function getClaudeMdContent() {
385
385
  return `# Delimit
386
386
 
387
- Governance toolkit for AI coding assistants.
387
+ One workspace for every AI coding assistant.
388
388
 
389
389
  ## Try these:
390
390
  - "lint my API spec" -- catch breaking changes in your OpenAPI spec
@@ -924,7 +924,11 @@ def deploy_site(project_path: str = ".", message: str = "", env_vars: dict = Non
924
924
  # 4. Vercel build
925
925
  env = {**os.environ}
926
926
  if env_vars:
927
- env.update(env_vars)
927
+ # Whitelist safe env var prefixes — block LD_PRELOAD, PATH overrides, etc.
928
+ blocked = {"LD_PRELOAD", "LD_LIBRARY_PATH", "DYLD_", "PATH", "HOME", "USER", "SHELL"}
929
+ for k, v in env_vars.items():
930
+ if not any(k.startswith(b) for b in blocked):
931
+ env[str(k)] = str(v)
928
932
 
929
933
  try:
930
934
  result = subprocess.run(
@@ -350,29 +350,35 @@ def test_smoke(project_path: str, test_suite: Optional[str] = None) -> Dict[str,
350
350
  framework = detected["framework"]
351
351
  cmd = detected["cmd"]
352
352
 
353
- # If a specific suite is requested, append it
353
+ # Build command as list (never shell=True with user input)
354
+ import shlex
355
+ cmd_list = shlex.split(cmd)
356
+
357
+ # If a specific suite is requested, validate and append
354
358
  if test_suite:
355
- cmd = f"{cmd} {test_suite}"
359
+ # Sanitize: only allow alphanumeric, slashes, dots, underscores, hyphens, colons
360
+ import re
361
+ if not re.match(r'^[\w/.\-:*\[\]]+$', test_suite):
362
+ return {"tool": "test.smoke", "status": "error", "error": f"Invalid test_suite: {test_suite}"}
363
+ cmd_list.append(test_suite)
356
364
 
357
365
  # Detect the right Python executable
358
366
  if framework == "pytest":
359
367
  python_found = False
360
- # Check for venv
361
368
  for venv_dir in ["venv", ".venv", "env"]:
362
369
  venv_python = project / venv_dir / "bin" / "python"
363
370
  if venv_python.exists():
364
- cmd = cmd.replace("python", str(venv_python), 1)
371
+ cmd_list[0] = str(venv_python)
365
372
  python_found = True
366
373
  break
367
- # Fallback to the current interpreter (handles systems where `python` is missing)
368
374
  if not python_found:
369
375
  import sys as _sys
370
- cmd = cmd.replace("python", _sys.executable, 1)
376
+ cmd_list[0] = _sys.executable
371
377
 
372
378
  try:
373
379
  result = subprocess.run(
374
- cmd,
375
- shell=True,
380
+ cmd_list,
381
+ shell=False,
376
382
  cwd=str(project),
377
383
  capture_output=True,
378
384
  text=True,
@@ -38,13 +38,16 @@ DEFAULT_MODELS = {
38
38
  "env_key": "GOOGLE_APPLICATION_CREDENTIALS",
39
39
  "enabled": False,
40
40
  "format": "vertex_ai",
41
+ "prefer_cli": True, # Use gemini CLI if available (Ultra plan), fall back to Vertex AI
42
+ "cli_command": "gemini",
41
43
  },
42
44
  "openai": {
43
- "name": "GPT",
45
+ "name": "OpenAI",
44
46
  "api_url": "https://api.openai.com/v1/chat/completions",
45
47
  "model": "gpt-4o",
46
48
  "env_key": "OPENAI_API_KEY",
47
49
  "enabled": False,
50
+ "prefer_cli": True, # Use Codex CLI if available, fall back to API
48
51
  },
49
52
  "anthropic": {
50
53
  "name": "Claude",
@@ -53,13 +56,8 @@ DEFAULT_MODELS = {
53
56
  "env_key": "ANTHROPIC_API_KEY",
54
57
  "enabled": False,
55
58
  "format": "anthropic",
56
- },
57
- "codex": {
58
- "name": "Codex CLI",
59
- "format": "codex_cli",
60
- "model": "gpt-5.4",
61
- "env_key": "CODEX_CLI",
62
- "enabled": False,
59
+ "prefer_cli": True, # Use claude CLI if available (Pro/Max), fall back to API
60
+ "cli_command": "claude",
63
61
  },
64
62
  }
65
63
 
@@ -75,17 +73,31 @@ def get_models_config() -> Dict[str, Any]:
75
73
  # Auto-detect from environment
76
74
  config = {}
77
75
  for model_id, defaults in DEFAULT_MODELS.items():
78
- if defaults.get("format") == "codex_cli":
79
- # Codex: check if CLI is available
76
+ key = os.environ.get(defaults.get("env_key", ""), "")
77
+
78
+ if defaults.get("prefer_cli"):
79
+ # Prefer CLI (uses existing subscription) over API (extra cost)
80
80
  import shutil
81
- codex_path = shutil.which("codex")
82
- config[model_id] = {
83
- **defaults,
84
- "enabled": codex_path is not None,
85
- "codex_path": codex_path or "",
86
- }
81
+ cli_cmd = defaults.get("cli_command", "codex")
82
+ cli_path = shutil.which(cli_cmd)
83
+ if cli_path:
84
+ config[model_id] = {
85
+ **defaults,
86
+ "format": "codex_cli",
87
+ "enabled": True,
88
+ "codex_path": cli_path,
89
+ "backend": "cli",
90
+ }
91
+ elif key:
92
+ config[model_id] = {
93
+ **defaults,
94
+ "api_key": key,
95
+ "enabled": True,
96
+ "backend": "api",
97
+ }
98
+ else:
99
+ config[model_id] = {**defaults, "enabled": False}
87
100
  else:
88
- key = os.environ.get(defaults["env_key"], "")
89
101
  config[model_id] = {
90
102
  **defaults,
91
103
  "api_key": key,
@@ -101,47 +113,62 @@ def configure_models() -> Dict[str, Any]:
101
113
  available = {k: v for k, v in config.items() if v.get("enabled")}
102
114
  missing = {k: v for k, v in config.items() if not v.get("enabled")}
103
115
 
116
+ model_details = {}
117
+ for k, v in available.items():
118
+ backend = v.get("backend", "api")
119
+ if v.get("format") == "codex_cli":
120
+ backend = "cli"
121
+ model_details[k] = {"name": v.get("name", k), "backend": backend, "model": v.get("model", "")}
122
+
104
123
  return {
105
124
  "configured_models": list(available.keys()),
106
- "missing_models": {k: f"Set {v['env_key']} environment variable" for k, v in missing.items()},
125
+ "model_details": model_details,
126
+ "missing_models": {k: f"Set {v.get('env_key', 'key')} or install {v.get('cli_command', '')} CLI" for k, v in missing.items()},
107
127
  "config_path": str(MODELS_CONFIG),
108
- "note": "Add API keys to enable more models for deliberation. "
109
- "Set env vars or create ~/.delimit/models.json",
128
+ "note": "CLI backends use your existing subscription (no extra API cost). "
129
+ "API backends require separate API keys.",
110
130
  }
111
131
 
112
132
 
113
- def _call_codex(prompt: str, system_prompt: str = "") -> str:
114
- """Call Codex via CLI subprocess."""
133
+ def _call_cli(prompt: str, system_prompt: str = "", cli_path: str = "", cli_command: str = "codex") -> str:
134
+ """Call an AI CLI tool (codex or claude) via subprocess. Uses existing subscription — no API cost."""
115
135
  import subprocess
116
- codex_path = shutil.which("codex")
117
- if not codex_path:
118
- return "[Codex unavailable codex CLI not found in PATH]"
136
+
137
+ if not cli_path:
138
+ cli_path = shutil.which(cli_command) or ""
139
+ if not cli_path:
140
+ return f"[{cli_command} unavailable — CLI not found in PATH]"
119
141
 
120
142
  full_prompt = f"{system_prompt}\n\n{prompt}" if system_prompt else prompt
143
+
144
+ # Build command based on which CLI
145
+ if "claude" in cli_command:
146
+ cmd = [cli_path, "--print", "--dangerously-skip-permissions", full_prompt]
147
+ else:
148
+ # codex
149
+ cmd = [cli_path, "exec", "--dangerously-bypass-approvals-and-sandbox", full_prompt]
150
+
121
151
  try:
122
- result = subprocess.run(
123
- [codex_path, "exec", "--dangerously-bypass-approvals-and-sandbox", full_prompt],
124
- capture_output=True,
125
- text=True,
126
- timeout=120,
127
- )
152
+ result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
128
153
  output = result.stdout.strip()
129
154
  if not output and result.stderr:
130
- return f"[Codex error: {result.stderr[:300]}]"
131
- return output or "[Codex returned empty response]"
155
+ return f"[{cli_command} error: {result.stderr[:300]}]"
156
+ return output or f"[{cli_command} returned empty response]"
132
157
  except subprocess.TimeoutExpired:
133
- return "[Codex timed out after 120s]"
158
+ return f"[{cli_command} timed out after 120s]"
134
159
  except Exception as e:
135
- return f"[Codex error: {e}]"
160
+ return f"[{cli_command} error: {e}]"
136
161
 
137
162
 
138
163
  def _call_model(model_id: str, config: Dict, prompt: str, system_prompt: str = "") -> str:
139
- """Call any supported model — OpenAI-compatible API, Vertex AI, or Codex CLI."""
164
+ """Call any supported model — OpenAI-compatible API, Vertex AI, or CLI (codex/claude)."""
140
165
  fmt = config.get("format", "openai")
141
166
 
142
- # Codex uses CLI, not HTTP API
167
+ # CLI-based models (codex, claude) — uses existing subscription, no API cost
143
168
  if fmt == "codex_cli":
144
- return _call_codex(prompt, system_prompt)
169
+ cli_path = config.get("codex_path", "")
170
+ cli_command = config.get("cli_command", "codex")
171
+ return _call_cli(prompt, system_prompt, cli_path=cli_path, cli_command=cli_command)
145
172
 
146
173
  api_key = config.get("api_key") or os.environ.get(config.get("env_key", ""), "")
147
174
  # Vertex AI uses service account auth, not API key
@@ -0,0 +1,217 @@
1
+ """
2
+ Delimit Release Sync — single source of truth for all public surfaces.
3
+
4
+ Audit mode: scans all surfaces and reports inconsistencies.
5
+ Apply mode: fixes what it can automatically.
6
+
7
+ Central config: ~/.delimit/release.json
8
+ """
9
+
10
+ import json
11
+ import os
12
+ import re
13
+ import subprocess
14
+ from pathlib import Path
15
+ from typing import Any, Dict, List, Optional
16
+
17
+ RELEASE_CONFIG = Path.home() / ".delimit" / "release.json"
18
+
19
+ DEFAULT_CONFIG = {
20
+ "product_name": "Delimit",
21
+ "tagline": "Governance toolkit for AI coding assistants",
22
+ "description": "Governance toolkit for AI coding assistants — API checks, persistent memory, consensus, security.",
23
+ "version": {
24
+ "cli": "", # filled dynamically
25
+ "action": "",
26
+ "gateway": "",
27
+ },
28
+ "urls": {
29
+ "homepage": "https://delimit.ai",
30
+ "docs": "https://delimit.ai/docs",
31
+ "github": "https://github.com/delimit-ai/delimit",
32
+ "action": "https://github.com/marketplace/actions/delimit-api-governance",
33
+ "npm": "https://www.npmjs.com/package/delimit-cli",
34
+ "quickstart": "https://github.com/delimit-ai/delimit-quickstart",
35
+ },
36
+ }
37
+
38
+
39
+ def get_release_config() -> Dict[str, Any]:
40
+ """Load or create the release config."""
41
+ if RELEASE_CONFIG.exists():
42
+ try:
43
+ return json.loads(RELEASE_CONFIG.read_text())
44
+ except Exception:
45
+ pass
46
+ return DEFAULT_CONFIG.copy()
47
+
48
+
49
+ def save_release_config(config: Dict[str, Any]) -> None:
50
+ """Save the release config."""
51
+ RELEASE_CONFIG.parent.mkdir(parents=True, exist_ok=True)
52
+ RELEASE_CONFIG.write_text(json.dumps(config, indent=2))
53
+
54
+
55
+ def _read_file(path: str) -> Optional[str]:
56
+ """Read a file, return None if missing."""
57
+ try:
58
+ return Path(path).read_text()
59
+ except Exception:
60
+ return None
61
+
62
+
63
+ def _check_contains(content: str, expected: str, surface: str) -> Dict:
64
+ """Check if content contains expected string."""
65
+ if content is None:
66
+ return {"surface": surface, "status": "missing", "detail": "File not found"}
67
+ if expected.lower() in content.lower():
68
+ return {"surface": surface, "status": "ok"}
69
+ return {
70
+ "surface": surface,
71
+ "status": "stale",
72
+ "expected": expected,
73
+ "detail": f"Does not contain: {expected[:80]}",
74
+ }
75
+
76
+
77
+ def _get_npm_version(pkg_path: str) -> str:
78
+ """Read version from package.json."""
79
+ try:
80
+ pkg = json.loads(Path(pkg_path).read_text())
81
+ return pkg.get("version", "")
82
+ except Exception:
83
+ return ""
84
+
85
+
86
+ def _get_pyproject_version(path: str) -> str:
87
+ """Read version from pyproject.toml."""
88
+ try:
89
+ content = Path(path).read_text()
90
+ m = re.search(r'version\s*=\s*"([^"]+)"', content)
91
+ return m.group(1) if m else ""
92
+ except Exception:
93
+ return ""
94
+
95
+
96
+ def audit(config: Optional[Dict] = None) -> Dict[str, Any]:
97
+ """Audit all public surfaces for consistency with the release config."""
98
+ cfg = config or get_release_config()
99
+ tagline = cfg.get("tagline", "")
100
+ description = cfg.get("description", "")
101
+ results = []
102
+
103
+ # 1. npm package.json
104
+ npm_pkg = _read_file(os.path.expanduser("~/.delimit/server/../../../npm-delimit/package.json"))
105
+ # Try common locations
106
+ for candidate in [
107
+ Path.home() / "npm-delimit" / "package.json",
108
+ ]:
109
+ if candidate.exists():
110
+ npm_pkg = candidate.read_text()
111
+ break
112
+
113
+ if npm_pkg:
114
+ try:
115
+ pkg = json.loads(npm_pkg)
116
+ pkg_desc = pkg.get("description", "")
117
+ if tagline.lower() not in pkg_desc.lower():
118
+ results.append({"surface": "npm package.json description", "status": "stale", "current": pkg_desc[:100], "expected": description})
119
+ else:
120
+ results.append({"surface": "npm package.json description", "status": "ok"})
121
+ cfg.setdefault("version", {})["cli"] = pkg.get("version", "")
122
+ except Exception:
123
+ results.append({"surface": "npm package.json", "status": "error", "detail": "Could not parse"})
124
+
125
+ # 2. CLAUDE.md
126
+ claude_md = _read_file(str(Path.home() / "CLAUDE.md"))
127
+ results.append(_check_contains(claude_md, tagline, "CLAUDE.md"))
128
+
129
+ # 3. GitHub repo descriptions (requires gh CLI)
130
+ for repo, surface in [
131
+ ("delimit-ai/delimit", "GitHub: delimit repo"),
132
+ ("delimit-ai/delimit-action", "GitHub: delimit-action repo"),
133
+ ("delimit-ai/delimit-quickstart", "GitHub: quickstart repo"),
134
+ ]:
135
+ try:
136
+ r = subprocess.run(
137
+ ["gh", "api", f"repos/{repo}", "--jq", ".description"],
138
+ capture_output=True, text=True, timeout=10,
139
+ )
140
+ if r.returncode == 0:
141
+ desc = r.stdout.strip()
142
+ if tagline.lower() in desc.lower() or "governance" in desc.lower():
143
+ results.append({"surface": surface, "status": "ok", "current": desc[:100]})
144
+ else:
145
+ results.append({"surface": surface, "status": "stale", "current": desc[:100], "expected": tagline})
146
+ else:
147
+ results.append({"surface": surface, "status": "error", "detail": "gh API failed"})
148
+ except Exception:
149
+ results.append({"surface": surface, "status": "skipped", "detail": "gh CLI not available"})
150
+
151
+ # 4. GitHub org description
152
+ try:
153
+ r = subprocess.run(
154
+ ["gh", "api", "orgs/delimit-ai", "--jq", ".description"],
155
+ capture_output=True, text=True, timeout=10,
156
+ )
157
+ if r.returncode == 0:
158
+ org_desc = r.stdout.strip()
159
+ results.append(_check_contains(org_desc, "governance" if "governance" in tagline.lower() else tagline[:30], "GitHub: org description"))
160
+ except Exception:
161
+ results.append({"surface": "GitHub: org description", "status": "skipped"})
162
+
163
+ # 5. delimit.ai meta tags
164
+ for layout_path in [
165
+ Path.home() / "delimit-ui" / "app" / "layout.tsx",
166
+ ]:
167
+ if layout_path.exists():
168
+ layout = layout_path.read_text()
169
+ results.append(_check_contains(layout, tagline, "delimit.ai meta title"))
170
+ break
171
+ else:
172
+ results.append({"surface": "delimit.ai meta title", "status": "skipped", "detail": "layout.tsx not found"})
173
+
174
+ # 6. Gateway version
175
+ for pyproject_path in [
176
+ Path.home() / "delimit-gateway" / "pyproject.toml",
177
+ ]:
178
+ if pyproject_path.exists():
179
+ gw_version = _get_pyproject_version(str(pyproject_path))
180
+ cfg.setdefault("version", {})["gateway"] = gw_version
181
+ results.append({"surface": "gateway pyproject.toml", "status": "ok", "version": gw_version})
182
+ break
183
+
184
+ # 7. GitHub releases
185
+ try:
186
+ r = subprocess.run(
187
+ ["gh", "release", "list", "--repo", "delimit-ai/delimit", "--limit", "1", "--json", "tagName"],
188
+ capture_output=True, text=True, timeout=10,
189
+ )
190
+ if r.returncode == 0:
191
+ releases = json.loads(r.stdout)
192
+ if releases:
193
+ release_ver = releases[0].get("tagName", "").lstrip("v")
194
+ cli_ver = cfg.get("version", {}).get("cli", "")
195
+ if release_ver == cli_ver:
196
+ results.append({"surface": "GitHub release", "status": "ok", "version": release_ver})
197
+ else:
198
+ results.append({"surface": "GitHub release", "status": "stale", "current": release_ver, "expected": cli_ver})
199
+ except Exception:
200
+ results.append({"surface": "GitHub release", "status": "skipped"})
201
+
202
+ # Summary
203
+ ok = sum(1 for r in results if r["status"] == "ok")
204
+ stale = sum(1 for r in results if r["status"] == "stale")
205
+ errors = sum(1 for r in results if r["status"] in ("error", "missing"))
206
+
207
+ return {
208
+ "config": cfg,
209
+ "surfaces": results,
210
+ "summary": {
211
+ "total": len(results),
212
+ "ok": ok,
213
+ "stale": stale,
214
+ "errors": errors,
215
+ },
216
+ "all_synced": stale == 0 and errors == 0,
217
+ }
@@ -1497,6 +1497,13 @@ async def delimit_sensor_github_issue(
1497
1497
  issue_number: The issue number to monitor.
1498
1498
  since_comment_id: Last seen comment ID. Pass 0 to get all comments.
1499
1499
  """
1500
+ import re as _re
1501
+ # Validate inputs to prevent injection
1502
+ if not _re.match(r'^[\w.-]+/[\w.-]+$', repo):
1503
+ return _with_next_steps("sensor_github_issue", {"error": f"Invalid repo format: {repo}. Use owner/repo."})
1504
+ if not isinstance(issue_number, int) or issue_number <= 0:
1505
+ return _with_next_steps("sensor_github_issue", {"error": f"Invalid issue number: {issue_number}"})
1506
+
1500
1507
  try:
1501
1508
  # Fetch comments
1502
1509
  comments_jq = (
@@ -1982,17 +1989,110 @@ def delimit_ventures() -> Dict[str, Any]:
1982
1989
 
1983
1990
 
1984
1991
  @mcp.tool()
1985
- def delimit_models(action: str = "list") -> Dict[str, Any]:
1992
+ def delimit_models(
1993
+ action: str = "list",
1994
+ provider: str = "",
1995
+ api_key: str = "",
1996
+ model_name: str = "",
1997
+ ) -> Dict[str, Any]:
1986
1998
  """View and configure AI models for multi-model deliberation.
1987
1999
 
1988
- Shows which models are available for consensus runs. Models auto-detect
1989
- from environment variables (XAI_API_KEY, GEMINI_API_KEY, OPENAI_API_KEY).
2000
+ Actions:
2001
+ - "list": show configured models and what's available
2002
+ - "detect": auto-detect API keys from environment and configure
2003
+ - "add": add a model provider (set provider + api_key)
2004
+ - "remove": remove a model provider (set provider)
2005
+
2006
+ Supported providers: grok, gemini, openai, anthropic, codex
1990
2007
 
1991
2008
  Args:
1992
- action: 'list' to show configured models.
2009
+ action: list, detect, add, or remove.
2010
+ provider: Model provider for add/remove (grok, gemini, openai, anthropic, codex).
2011
+ api_key: API key for the provider (only used with action=add).
2012
+ model_name: Optional model name override (e.g. "gpt-4o", "claude-sonnet-4-5-20250514").
1993
2013
  """
1994
- from ai.deliberation import configure_models
1995
- return configure_models()
2014
+ from ai.deliberation import configure_models, get_models_config, MODELS_CONFIG, DEFAULT_MODELS
2015
+ import json as _json
2016
+
2017
+ if action == "list":
2018
+ return configure_models()
2019
+
2020
+ if action == "detect":
2021
+ # Auto-detect from env vars and save
2022
+ config = get_models_config()
2023
+ detected = []
2024
+ env_map = {
2025
+ "grok": "XAI_API_KEY",
2026
+ "gemini": "GOOGLE_APPLICATION_CREDENTIALS",
2027
+ "openai": "OPENAI_API_KEY",
2028
+ "anthropic": "ANTHROPIC_API_KEY",
2029
+ }
2030
+ for pid, env_key in env_map.items():
2031
+ if os.environ.get(env_key) and pid not in config:
2032
+ defaults = DEFAULT_MODELS.get(pid, {})
2033
+ config[pid] = {**defaults, "enabled": True}
2034
+ if "api_key" in defaults:
2035
+ config[pid]["api_key"] = os.environ[env_key]
2036
+ detected.append(pid)
2037
+ # Check codex CLI
2038
+ import shutil
2039
+ if shutil.which("codex") and "codex" not in config:
2040
+ config["codex"] = {**DEFAULT_MODELS.get("codex", {}), "enabled": True}
2041
+ detected.append("codex")
2042
+
2043
+ if detected:
2044
+ MODELS_CONFIG.parent.mkdir(parents=True, exist_ok=True)
2045
+ MODELS_CONFIG.write_text(_json.dumps(config, indent=2))
2046
+ return {"action": "detect", "detected": detected, "total_models": len(config), "config_path": str(MODELS_CONFIG)}
2047
+ return {"action": "detect", "detected": [], "note": "No new API keys found in environment."}
2048
+
2049
+ if action == "add":
2050
+ if not provider:
2051
+ return {"error": "Specify provider: grok, gemini, openai, anthropic, or codex"}
2052
+
2053
+ config = {}
2054
+ if MODELS_CONFIG.exists():
2055
+ try:
2056
+ config = _json.loads(MODELS_CONFIG.read_text())
2057
+ except Exception:
2058
+ pass
2059
+
2060
+ # Provider templates
2061
+ templates = {
2062
+ "grok": {"name": "Grok", "api_url": "https://api.x.ai/v1/chat/completions", "model": model_name or "grok-4-0709", "env_key": "XAI_API_KEY"},
2063
+ "openai": {"name": "OpenAI", "api_url": "https://api.openai.com/v1/chat/completions", "model": model_name or "gpt-4o", "env_key": "OPENAI_API_KEY", "prefer_cli": True},
2064
+ "anthropic": {"name": "Claude", "api_url": "https://api.anthropic.com/v1/messages", "model": model_name or "claude-sonnet-4-5-20250514", "env_key": "ANTHROPIC_API_KEY", "format": "anthropic"},
2065
+ "gemini": {"name": "Gemini", "api_url": "https://us-central1-aiplatform.googleapis.com/v1/projects/{project}/locations/us-central1/publishers/google/models/gemini-2.5-flash:generateContent", "model": model_name or "gemini-2.5-flash", "format": "vertex_ai"},
2066
+ }
2067
+
2068
+ if provider not in templates:
2069
+ return {"error": f"Unknown provider '{provider}'. Supported: {', '.join(templates.keys())}"}
2070
+
2071
+ entry = {**templates[provider], "enabled": True}
2072
+ if api_key:
2073
+ entry["api_key"] = api_key
2074
+
2075
+ config[provider] = entry
2076
+ MODELS_CONFIG.parent.mkdir(parents=True, exist_ok=True)
2077
+ MODELS_CONFIG.write_text(_json.dumps(config, indent=2))
2078
+ return {"action": "add", "provider": provider, "model": entry.get("model"), "config_path": str(MODELS_CONFIG)}
2079
+
2080
+ if action == "remove":
2081
+ if not provider:
2082
+ return {"error": "Specify provider to remove"}
2083
+ config = {}
2084
+ if MODELS_CONFIG.exists():
2085
+ try:
2086
+ config = _json.loads(MODELS_CONFIG.read_text())
2087
+ except Exception:
2088
+ pass
2089
+ if provider in config:
2090
+ del config[provider]
2091
+ MODELS_CONFIG.write_text(_json.dumps(config, indent=2))
2092
+ return {"action": "remove", "provider": provider, "remaining": list(config.keys())}
2093
+ return {"action": "remove", "provider": provider, "note": "Provider not found in config"}
2094
+
2095
+ return {"error": f"Unknown action '{action}'. Use: list, detect, add, remove"}
1996
2096
 
1997
2097
 
1998
2098
  @mcp.tool()
@@ -2128,6 +2228,22 @@ def _extract_deliberation_actions(result: Dict, question: str) -> List[Dict[str,
2128
2228
  return actions[:10]
2129
2229
 
2130
2230
 
2231
+ @mcp.tool()
2232
+ def delimit_release_sync(action: str = "audit") -> Dict[str, Any]:
2233
+ """Audit or sync all public surfaces for consistency.
2234
+
2235
+ Checks GitHub repos, npm, site meta tags, CLAUDE.md, and releases
2236
+ against a central config. Reports what's stale and what needs updating.
2237
+
2238
+ Args:
2239
+ action: "audit" to check all surfaces, "config" to view/edit the release config.
2240
+ """
2241
+ from ai.release_sync import audit, get_release_config
2242
+ if action == "config":
2243
+ return get_release_config()
2244
+ return _with_next_steps("release_sync", audit())
2245
+
2246
+
2131
2247
  # ═══════════════════════════════════════════════════════════════════════
2132
2248
  # ENTRY POINT
2133
2249
  # ═══════════════════════════════════════════════════════════════════════
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "delimit-cli",
3
- "version": "3.6.12",
4
- "description": "Governance toolkit for AI coding assistants API checks, persistent memory, consensus, security.",
3
+ "version": "3.7.1",
4
+ "description": "One workspace for every AI coding assistant. Tasks, memory, and governance carry between Claude Code, Codex, and Gemini CLI.",
5
5
  "main": "index.js",
6
6
  "files": [
7
7
  "bin/",