delimit-cli 3.6.10 → 3.6.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,12 +1,12 @@
1
1
  # Delimit
2
2
 
3
- Catch breaking API changes before they reach production.
3
+ Governance toolkit for AI coding assistants — API checks, persistent memory, consensus, security.
4
4
 
5
5
  [![npm](https://img.shields.io/npm/v/delimit-cli)](https://www.npmjs.com/package/delimit-cli)
6
- [![GitHub Action](https://img.shields.io/badge/GitHub%20Action-v1.4.0-blue)](https://github.com/marketplace/actions/delimit-api-governance)
6
+ [![GitHub Action](https://img.shields.io/badge/GitHub%20Action-v1.5.0-blue)](https://github.com/marketplace/actions/delimit-api-governance)
7
7
  [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
8
8
 
9
- Delimit diffs your OpenAPI spec on every pull request. Breaking changes get flagged, semver gets classified, and your team gets a migration guide automatically.
9
+ Delimit gives your AI coding assistant governance tools API breaking change detection, persistent task ledger, security audit, test verification, and multi-model deliberation. Works with Claude Code, Codex, Cursor, and Gemini CLI.
10
10
 
11
11
  ---
12
12
 
@@ -319,7 +319,7 @@ Run full governance compliance checks. Verify security, policy compliance, evide
319
319
  } else {
320
320
  // Check if existing CLAUDE.md is an older Delimit version that should be upgraded
321
321
  const existing = fs.readFileSync(claudeMd, 'utf-8');
322
- if (existing.includes('# Delimit AI Guardrails') || existing.includes('delimit_init') || existing.includes('delimit_lint')) {
322
+ if (existing.includes('# Delimit AI Guardrails') || existing.includes('delimit_init') || existing.includes('delimit_lint') || existing.includes('persistent memory, verified execution')) {
323
323
  fs.writeFileSync(claudeMd, getClaudeMdContent());
324
324
  log(` ${green('✓')} Updated ${claudeMd} with improved onboarding`);
325
325
  } else {
@@ -384,30 +384,33 @@ Run full governance compliance checks. Verify security, policy compliance, evide
384
384
  function getClaudeMdContent() {
385
385
  return `# Delimit
386
386
 
387
- Your AI has persistent memory, verified execution, and governance.
388
-
389
- ## First time? Say one of these:
390
- - "check this project's health" -- see what Delimit finds
391
- - "add to ledger: [anything]" -- start tracking tasks
392
- - "what's on the ledger?" -- see what's pending
393
-
394
- ## Returning? Your AI remembers:
395
- - Ledger items persist across sessions
396
- - Governance rules stay configured
397
- - Memory carries forward
398
-
399
- ## On first session, your AI will automatically:
400
- 1. Diagnose the environment to verify everything is connected
401
- 2. Check the ledger for any pending items from previous sessions
402
- 3. If no governance exists yet, suggest initializing it
403
-
404
- ## Available Agents
405
- - /lint -- check API specs for breaking changes
406
- - /engineering -- build, test, refactor with governance checks
407
- - /governance -- full compliance audit
408
-
409
- ## Need help?
410
- Say "delimit help" for docs on any capability.
387
+ Governance toolkit for AI coding assistants.
388
+
389
+ ## Try these:
390
+ - "lint my API spec" -- catch breaking changes in your OpenAPI spec
391
+ - "add to ledger: [anything]" -- track tasks across sessions
392
+ - "what's on the ledger?" -- pick up where you left off
393
+ - "check this project's health" -- run governance diagnostics
394
+
395
+ ## What Delimit does:
396
+ - **API governance** -- lint, diff, semver classification, migration guides
397
+ - **Persistent ledger** -- tasks survive across sessions
398
+ - **Security audit** -- dependency scanning, secret detection
399
+ - **Test verification** -- confirm tests actually ran, measure coverage
400
+ - **Multi-model deliberation** -- say "deliberate [question]" to get consensus from multiple AI models
401
+
402
+ ## GitHub Action
403
+ Add breaking change detection to any repo:
404
+ \`\`\`yaml
405
+ - uses: delimit-ai/delimit-action@v1
406
+ with:
407
+ spec: api/openapi.yaml
408
+ \`\`\`
409
+
410
+ ## Links
411
+ - Docs: https://delimit.ai/docs
412
+ - GitHub: https://github.com/delimit-ai/delimit
413
+ - Action: https://github.com/marketplace/actions/delimit-api-governance
411
414
  `;
412
415
  }
413
416
 
@@ -0,0 +1 @@
1
+ """Delimit unified MCP server — single agent-facing surface."""
@@ -11,7 +11,7 @@ from typing import Any, Dict, List, Optional
11
11
 
12
12
  logger = logging.getLogger("delimit.ai.generate_bridge")
13
13
 
14
- GEN_PACKAGE = Path(os.environ.get("DELIMIT_HOME", Path.home() / ".delimit")) / "server" / "packages" / "delimit-generator"
14
+ GEN_PACKAGE = Path(os.environ.get("DELIMIT_HOME", str(Path.home() / ".delimit"))) / "server" / "packages" / "delimit-generator"
15
15
 
16
16
 
17
17
  def _ensure_gen_path():
@@ -11,7 +11,7 @@ from typing import Any, Dict, List, Optional
11
11
 
12
12
  logger = logging.getLogger("delimit.ai.intel_bridge")
13
13
 
14
- INTEL_PACKAGE = Path(os.environ.get("DELIMIT_HOME", Path.home() / ".delimit")) / "server" / "packages" / "wireintel"
14
+ INTEL_PACKAGE = Path(os.environ.get("DELIMIT_HOME", str(Path.home() / ".delimit"))) / "server" / "packages" / "wireintel"
15
15
 
16
16
 
17
17
  def _ensure_intel_path():
@@ -13,7 +13,7 @@ from typing import Any, Dict, Optional
13
13
 
14
14
  logger = logging.getLogger("delimit.ai.memory_bridge")
15
15
 
16
- MEM_PACKAGE = Path(os.environ.get("DELIMIT_HOME", Path.home() / ".delimit")) / "server" / "packages" / "delimit-memory"
16
+ MEM_PACKAGE = Path(os.environ.get("DELIMIT_HOME", str(Path.home() / ".delimit"))) / "server" / "packages" / "delimit-memory"
17
17
 
18
18
  _server = None
19
19
 
@@ -15,7 +15,7 @@ from .async_utils import run_async
15
15
 
16
16
  logger = logging.getLogger("delimit.ai.ops_bridge")
17
17
 
18
- PACKAGES = Path(os.environ.get("DELIMIT_HOME", Path.home() / ".delimit")) / "server" / "packages"
18
+ PACKAGES = Path(os.environ.get("DELIMIT_HOME", str(Path.home() / ".delimit"))) / "server" / "packages"
19
19
 
20
20
  # Add PACKAGES dir so `from shared.base_server import BaseMCPServer` resolves
21
21
  _packages = str(PACKAGES)
@@ -14,7 +14,7 @@ from typing import Any, Dict, List, Optional
14
14
 
15
15
  logger = logging.getLogger("delimit.ai.os_bridge")
16
16
 
17
- OS_PACKAGE = Path(os.environ.get("DELIMIT_HOME", Path.home() / ".delimit")) / "server" / "packages" / "delimit-os"
17
+ OS_PACKAGE = Path(os.environ.get("DELIMIT_HOME", str(Path.home() / ".delimit"))) / "server" / "packages" / "delimit-os"
18
18
 
19
19
  _NOT_INIT_MSG = (
20
20
  "Project not initialized for governance. "
@@ -15,7 +15,7 @@ from .async_utils import run_async
15
15
 
16
16
  logger = logging.getLogger("delimit.ai.repo_bridge")
17
17
 
18
- PACKAGES = Path(os.environ.get("DELIMIT_HOME", Path.home() / ".delimit")) / "server" / "packages"
18
+ PACKAGES = Path(os.environ.get("DELIMIT_HOME", str(Path.home() / ".delimit"))) / "server" / "packages"
19
19
 
20
20
  # Add PACKAGES dir so `from shared.base_server import BaseMCPServer` resolves
21
21
  _packages = str(PACKAGES)
@@ -864,3 +864,274 @@ def release_status(environment: str = "production") -> Dict[str, Any]:
864
864
  result["head_sha"] = r["stdout"].strip()
865
865
 
866
866
  return result
867
+
868
+
869
+ def deploy_site(project_path: str = ".", message: str = "", env_vars: dict = None) -> Dict[str, Any]:
870
+ """Deploy a site project — git commit, push, Vercel build, deploy.
871
+
872
+ Handles the full chain: commit changes, push to remote, build with env vars,
873
+ deploy prebuilt to production. Returns deploy URL and status.
874
+ """
875
+ import subprocess
876
+ from pathlib import Path
877
+
878
+ p = Path(project_path).resolve()
879
+ results = {"project": str(p), "steps": []}
880
+
881
+ # 1. Check for changes
882
+ try:
883
+ status = subprocess.run(
884
+ ["git", "status", "--porcelain"],
885
+ capture_output=True, text=True, timeout=10, cwd=str(p)
886
+ )
887
+ changed_files = [l.strip() for l in status.stdout.strip().splitlines() if l.strip()]
888
+ if not changed_files:
889
+ return {"status": "no_changes", "message": "No changes to deploy."}
890
+ results["changed_files"] = len(changed_files)
891
+ results["steps"].append({"step": "check", "status": "ok", "files": len(changed_files)})
892
+ except Exception as e:
893
+ return {"error": f"Git status failed: {e}"}
894
+
895
+ # 2. Git add + commit
896
+ commit_msg = message or "deploy: site update"
897
+ try:
898
+ subprocess.run(["git", "add", "-A"], cwd=str(p), timeout=10, capture_output=True)
899
+ result = subprocess.run(
900
+ ["git", "commit", "-m", commit_msg],
901
+ cwd=str(p), timeout=10, capture_output=True, text=True
902
+ )
903
+ if result.returncode == 0:
904
+ results["steps"].append({"step": "commit", "status": "ok", "message": commit_msg})
905
+ else:
906
+ results["steps"].append({"step": "commit", "status": "skipped", "detail": "nothing to commit"})
907
+ except Exception as e:
908
+ results["steps"].append({"step": "commit", "status": "error", "detail": str(e)})
909
+
910
+ # 3. Git push
911
+ try:
912
+ result = subprocess.run(
913
+ ["git", "push", "origin", "HEAD"],
914
+ cwd=str(p), timeout=30, capture_output=True, text=True
915
+ )
916
+ results["steps"].append({
917
+ "step": "push",
918
+ "status": "ok" if result.returncode == 0 else "error",
919
+ "detail": result.stderr.strip()[:200] if result.returncode != 0 else "pushed"
920
+ })
921
+ except Exception as e:
922
+ results["steps"].append({"step": "push", "status": "error", "detail": str(e)})
923
+
924
+ # 4. Vercel build
925
+ env = {**os.environ}
926
+ if env_vars:
927
+ env.update(env_vars)
928
+
929
+ try:
930
+ result = subprocess.run(
931
+ ["npx", "vercel", "build", "--prod"],
932
+ cwd=str(p), timeout=120, capture_output=True, text=True, env=env
933
+ )
934
+ results["steps"].append({
935
+ "step": "build",
936
+ "status": "ok" if result.returncode == 0 else "error",
937
+ "detail": result.stdout.strip()[-200:] if result.returncode == 0 else result.stderr.strip()[:200]
938
+ })
939
+ if result.returncode != 0:
940
+ results["status"] = "build_failed"
941
+ return results
942
+ except subprocess.TimeoutExpired:
943
+ results["steps"].append({"step": "build", "status": "timeout"})
944
+ results["status"] = "build_timeout"
945
+ return results
946
+ except Exception as e:
947
+ results["steps"].append({"step": "build", "status": "error", "detail": str(e)})
948
+ results["status"] = "build_error"
949
+ return results
950
+
951
+ # 5. Vercel deploy
952
+ try:
953
+ result = subprocess.run(
954
+ ["npx", "vercel", "deploy", "--prebuilt", "--prod"],
955
+ cwd=str(p), timeout=60, capture_output=True, text=True, env=env
956
+ )
957
+ output = result.stdout.strip()
958
+ # Extract deploy URL
959
+ deploy_url = ""
960
+ for line in output.splitlines():
961
+ if "vercel.app" in line or "delimit.ai" in line:
962
+ deploy_url = line.strip()
963
+ break
964
+ results["steps"].append({
965
+ "step": "deploy",
966
+ "status": "ok" if result.returncode == 0 else "error",
967
+ "url": deploy_url
968
+ })
969
+ results["deploy_url"] = deploy_url
970
+ except Exception as e:
971
+ results["steps"].append({"step": "deploy", "status": "error", "detail": str(e)})
972
+
973
+ results["status"] = "deployed"
974
+ return results
975
+
976
+
977
+ def deploy_npm(project_path: str = ".", bump: str = "patch", tag: str = "latest", dry_run: bool = False) -> Dict[str, Any]:
978
+ """Publish an npm package — bump version, publish, verify.
979
+
980
+ Handles: version bump (patch/minor/major), npm publish, verify on registry.
981
+ Optionally dry-run to preview without publishing.
982
+ """
983
+ import subprocess
984
+ from pathlib import Path
985
+
986
+ p = Path(project_path).resolve()
987
+ pkg_json = p / "package.json"
988
+
989
+ if not pkg_json.exists():
990
+ return {"error": f"No package.json found at {p}"}
991
+
992
+ results = {"project": str(p), "steps": []}
993
+
994
+ # 1. Read current version
995
+ try:
996
+ import json
997
+ with open(pkg_json) as f:
998
+ pkg = json.load(f)
999
+ current_version = pkg.get("version", "0.0.0")
1000
+ pkg_name = pkg.get("name", "unknown")
1001
+ results["package"] = pkg_name
1002
+ results["current_version"] = current_version
1003
+ results["steps"].append({"step": "read_version", "status": "ok", "version": current_version})
1004
+ except Exception as e:
1005
+ return {"error": f"Failed to read package.json: {e}"}
1006
+
1007
+ # 2. Check npm auth
1008
+ try:
1009
+ result = subprocess.run(
1010
+ ["npm", "whoami"],
1011
+ capture_output=True, text=True, timeout=10
1012
+ )
1013
+ if result.returncode != 0:
1014
+ return {"error": "Not logged into npm. Run: npm login"}
1015
+ npm_user = result.stdout.strip()
1016
+ results["npm_user"] = npm_user
1017
+ results["steps"].append({"step": "auth_check", "status": "ok", "user": npm_user})
1018
+ except Exception as e:
1019
+ return {"error": f"npm auth check failed: {e}"}
1020
+
1021
+ # 3. Check for uncommitted changes
1022
+ try:
1023
+ status = subprocess.run(
1024
+ ["git", "status", "--porcelain"],
1025
+ capture_output=True, text=True, timeout=10, cwd=str(p)
1026
+ )
1027
+ uncommitted = [l.strip() for l in status.stdout.strip().splitlines() if l.strip()]
1028
+ if uncommitted:
1029
+ results["steps"].append({"step": "git_check", "status": "warning", "uncommitted_files": len(uncommitted)})
1030
+ else:
1031
+ results["steps"].append({"step": "git_check", "status": "ok"})
1032
+ except Exception:
1033
+ pass
1034
+
1035
+ # 4. Version bump
1036
+ if bump in ("patch", "minor", "major"):
1037
+ try:
1038
+ bump_cmd = ["npm", "version", bump, "--no-git-tag-version"]
1039
+ result = subprocess.run(
1040
+ bump_cmd, capture_output=True, text=True, timeout=10, cwd=str(p)
1041
+ )
1042
+ if result.returncode == 0:
1043
+ new_version = result.stdout.strip().lstrip("v")
1044
+ results["new_version"] = new_version
1045
+ results["steps"].append({"step": "version_bump", "status": "ok", "from": current_version, "to": new_version, "bump": bump})
1046
+ else:
1047
+ results["steps"].append({"step": "version_bump", "status": "error", "detail": result.stderr.strip()[:200]})
1048
+ results["status"] = "bump_failed"
1049
+ return results
1050
+ except Exception as e:
1051
+ results["steps"].append({"step": "version_bump", "status": "error", "detail": str(e)})
1052
+ results["status"] = "bump_failed"
1053
+ return results
1054
+ else:
1055
+ results["new_version"] = current_version
1056
+
1057
+ # 5. Publish
1058
+ publish_cmd = ["npm", "publish", "--tag", tag]
1059
+ if dry_run:
1060
+ publish_cmd.append("--dry-run")
1061
+
1062
+ try:
1063
+ result = subprocess.run(
1064
+ publish_cmd, capture_output=True, text=True, timeout=60, cwd=str(p)
1065
+ )
1066
+ if result.returncode == 0:
1067
+ results["steps"].append({
1068
+ "step": "publish",
1069
+ "status": "ok" if not dry_run else "dry_run",
1070
+ "tag": tag,
1071
+ "output": result.stdout.strip()[-300:]
1072
+ })
1073
+ else:
1074
+ results["steps"].append({
1075
+ "step": "publish",
1076
+ "status": "error",
1077
+ "detail": result.stderr.strip()[:300]
1078
+ })
1079
+ results["status"] = "publish_failed"
1080
+ return results
1081
+ except subprocess.TimeoutExpired:
1082
+ results["steps"].append({"step": "publish", "status": "timeout"})
1083
+ results["status"] = "publish_timeout"
1084
+ return results
1085
+ except Exception as e:
1086
+ results["steps"].append({"step": "publish", "status": "error", "detail": str(e)})
1087
+ results["status"] = "publish_failed"
1088
+ return results
1089
+
1090
+ # 6. Verify on registry (skip for dry run)
1091
+ if not dry_run:
1092
+ try:
1093
+ import time
1094
+ time.sleep(2) # brief wait for registry propagation
1095
+ result = subprocess.run(
1096
+ ["npm", "view", pkg_name, "version"],
1097
+ capture_output=True, text=True, timeout=15
1098
+ )
1099
+ registry_version = result.stdout.strip()
1100
+ verified = registry_version == results.get("new_version", current_version)
1101
+ results["steps"].append({
1102
+ "step": "verify",
1103
+ "status": "ok" if verified else "mismatch",
1104
+ "registry_version": registry_version
1105
+ })
1106
+ except Exception:
1107
+ results["steps"].append({"step": "verify", "status": "skipped"})
1108
+
1109
+ # 7. Git commit the version bump
1110
+ if bump in ("patch", "minor", "major") and not dry_run:
1111
+ try:
1112
+ new_ver = results.get("new_version", current_version)
1113
+ subprocess.run(["git", "add", "package.json"], cwd=str(p), timeout=10, capture_output=True)
1114
+ # Also stage package-lock.json if it exists
1115
+ lock_file = p / "package-lock.json"
1116
+ if lock_file.exists():
1117
+ subprocess.run(["git", "add", "package-lock.json"], cwd=str(p), timeout=10, capture_output=True)
1118
+ result = subprocess.run(
1119
+ ["git", "commit", "-m", f"release: v{new_ver}"],
1120
+ cwd=str(p), timeout=10, capture_output=True, text=True
1121
+ )
1122
+ if result.returncode == 0:
1123
+ results["steps"].append({"step": "git_commit", "status": "ok", "message": f"release: v{new_ver}"})
1124
+ # Push
1125
+ push_result = subprocess.run(
1126
+ ["git", "push", "origin", "HEAD"],
1127
+ cwd=str(p), timeout=30, capture_output=True, text=True
1128
+ )
1129
+ results["steps"].append({
1130
+ "step": "git_push",
1131
+ "status": "ok" if push_result.returncode == 0 else "error"
1132
+ })
1133
+ except Exception as e:
1134
+ results["steps"].append({"step": "git_commit", "status": "error", "detail": str(e)})
1135
+
1136
+ results["status"] = "published" if not dry_run else "dry_run_complete"
1137
+ return results
@@ -15,7 +15,7 @@ from .async_utils import run_async
15
15
 
16
16
  logger = logging.getLogger("delimit.ai.ui_bridge")
17
17
 
18
- PACKAGES = Path(os.environ.get("DELIMIT_HOME", Path.home() / ".delimit")) / "server" / "packages"
18
+ PACKAGES = Path(os.environ.get("DELIMIT_HOME", str(Path.home() / ".delimit"))) / "server" / "packages"
19
19
 
20
20
  # Add PACKAGES dir so `from shared.base_server import BaseMCPServer` resolves
21
21
  _packages = str(PACKAGES)
@@ -14,7 +14,7 @@ from .async_utils import run_async
14
14
 
15
15
  logger = logging.getLogger("delimit.ai.vault_bridge")
16
16
 
17
- VAULT_PACKAGE = Path(os.environ.get("DELIMIT_HOME", Path.home() / ".delimit")) / "server" / "packages" / "delimit-vault"
17
+ VAULT_PACKAGE = Path(os.environ.get("DELIMIT_HOME", str(Path.home() / ".delimit"))) / "server" / "packages" / "delimit-vault"
18
18
 
19
19
  _server = None
20
20
 
@@ -25,22 +25,37 @@ MODELS_CONFIG = Path.home() / ".delimit" / "models.json"
25
25
 
26
26
  DEFAULT_MODELS = {
27
27
  "grok": {
28
- "name": "Grok 4",
28
+ "name": "Grok",
29
29
  "api_url": "https://api.x.ai/v1/chat/completions",
30
30
  "model": "grok-4-0709",
31
31
  "env_key": "XAI_API_KEY",
32
32
  "enabled": False,
33
33
  },
34
34
  "gemini": {
35
- "name": "Gemini 2.5 Flash",
35
+ "name": "Gemini",
36
36
  "api_url": "https://us-central1-aiplatform.googleapis.com/v1/projects/{project}/locations/us-central1/publishers/google/models/gemini-2.5-flash:generateContent",
37
37
  "model": "gemini-2.5-flash",
38
38
  "env_key": "GOOGLE_APPLICATION_CREDENTIALS",
39
39
  "enabled": False,
40
40
  "format": "vertex_ai",
41
41
  },
42
+ "openai": {
43
+ "name": "GPT",
44
+ "api_url": "https://api.openai.com/v1/chat/completions",
45
+ "model": "gpt-4o",
46
+ "env_key": "OPENAI_API_KEY",
47
+ "enabled": False,
48
+ },
49
+ "anthropic": {
50
+ "name": "Claude",
51
+ "api_url": "https://api.anthropic.com/v1/messages",
52
+ "model": "claude-sonnet-4-5-20250514",
53
+ "env_key": "ANTHROPIC_API_KEY",
54
+ "enabled": False,
55
+ "format": "anthropic",
56
+ },
42
57
  "codex": {
43
- "name": "Codex (GPT-5.4)",
58
+ "name": "Codex CLI",
44
59
  "format": "codex_cli",
45
60
  "model": "gpt-5.4",
46
61
  "env_key": "CODEX_CLI",
@@ -148,7 +163,7 @@ def _call_model(model_id: str, config: Dict, prompt: str, system_prompt: str = "
148
163
  os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = creds_path
149
164
  creds, project = google.auth.default()
150
165
  creds.refresh(google.auth.transport.requests.Request())
151
- actual_url = api_url.replace("{project}", project or os.environ.get("GOOGLE_CLOUD_PROJECT", "delimit"))
166
+ actual_url = api_url.replace("{project}", project or os.environ.get("GOOGLE_CLOUD_PROJECT", ""))
152
167
  data = json.dumps({
153
168
  "contents": [{"role": "user", "parts": [{"text": f"{system_prompt}\n\n{prompt}" if system_prompt else prompt}]}],
154
169
  "generationConfig": {"maxOutputTokens": 4096, "temperature": 0.7},
@@ -176,6 +191,25 @@ def _call_model(model_id: str, config: Dict, prompt: str, system_prompt: str = "
176
191
  headers={"Content-Type": "application/json"},
177
192
  method="POST",
178
193
  )
194
+ elif fmt == "anthropic":
195
+ # Anthropic Messages API
196
+ data = json.dumps({
197
+ "model": model,
198
+ "max_tokens": 4096,
199
+ "system": system_prompt or "You are a helpful assistant participating in a multi-model deliberation.",
200
+ "messages": [{"role": "user", "content": prompt}],
201
+ }).encode()
202
+ req = urllib.request.Request(
203
+ api_url,
204
+ data=data,
205
+ headers={
206
+ "x-api-key": api_key,
207
+ "anthropic-version": "2023-06-01",
208
+ "Content-Type": "application/json",
209
+ "User-Agent": "Delimit/3.6.0",
210
+ },
211
+ method="POST",
212
+ )
179
213
  else:
180
214
  # OpenAI-compatible format (works for xAI, OpenAI, etc.)
181
215
  messages = []
@@ -195,7 +229,7 @@ def _call_model(model_id: str, config: Dict, prompt: str, system_prompt: str = "
195
229
  headers={
196
230
  "Authorization": f"Bearer {api_key}",
197
231
  "Content-Type": "application/json",
198
- "User-Agent": "Delimit/3.3.0",
232
+ "User-Agent": "Delimit/3.6.0",
199
233
  },
200
234
  method="POST",
201
235
  )
@@ -205,6 +239,8 @@ def _call_model(model_id: str, config: Dict, prompt: str, system_prompt: str = "
205
239
 
206
240
  if fmt in ("google", "vertex_ai"):
207
241
  return result["candidates"][0]["content"]["parts"][0]["text"]
242
+ elif fmt == "anthropic":
243
+ return result["content"][0]["text"]
208
244
  else:
209
245
  return result["choices"][0]["message"]["content"]
210
246
 
@@ -75,6 +75,61 @@ RULES = {
75
75
  },
76
76
  }
77
77
 
78
+ # Milestone rules — auto-create DONE ledger items for significant completions.
79
+ # Unlike threshold RULES (which create open items for problems), milestones
80
+ # record achievements so the ledger reflects what was shipped.
81
+ MILESTONES = {
82
+ "deploy_site": {
83
+ "trigger_key": "status",
84
+ "trigger_values": ["deployed"],
85
+ "ledger_title": "Deployed: {project}",
86
+ "ledger_type": "feat",
87
+ "ledger_priority": "P1",
88
+ "auto_done": True,
89
+ },
90
+ "deploy_npm": {
91
+ "trigger_key": "status",
92
+ "trigger_values": ["published"],
93
+ "ledger_title": "Published: {package}@{new_version}",
94
+ "ledger_type": "feat",
95
+ "ledger_priority": "P1",
96
+ "auto_done": True,
97
+ },
98
+ "deliberate": {
99
+ "trigger_key": "status",
100
+ "trigger_values": ["unanimous"],
101
+ "ledger_title": "Consensus reached: {question_short}",
102
+ "ledger_type": "strategy",
103
+ "ledger_priority": "P1",
104
+ "auto_done": True,
105
+ },
106
+ "test_generate": {
107
+ "threshold_key": "tests_generated",
108
+ "threshold": 10,
109
+ "comparison": "above",
110
+ "ledger_title": "Generated {value} tests",
111
+ "ledger_type": "feat",
112
+ "ledger_priority": "P2",
113
+ "auto_done": True,
114
+ },
115
+ "sensor_github_issue": {
116
+ "trigger_key": "has_new_activity",
117
+ "trigger_if_true": True,
118
+ "ledger_title": "Outreach response: new activity detected",
119
+ "ledger_type": "task",
120
+ "ledger_priority": "P1",
121
+ "auto_done": False, # needs follow-up
122
+ },
123
+ "zero_spec": {
124
+ "trigger_key": "success",
125
+ "trigger_if_true": True,
126
+ "ledger_title": "Zero-spec extracted: {framework} ({paths_count} paths)",
127
+ "ledger_type": "feat",
128
+ "ledger_priority": "P2",
129
+ "auto_done": True,
130
+ },
131
+ }
132
+
78
133
  # Next steps registry — what to do after each tool
79
134
  NEXT_STEPS = {
80
135
  "lint": [
@@ -87,11 +142,16 @@ NEXT_STEPS = {
87
142
  ],
88
143
  "semver": [
89
144
  {"tool": "delimit_explain", "reason": "Generate human-readable changelog", "premium": False},
145
+ {"tool": "delimit_deploy_npm", "reason": "Publish the new version to npm", "premium": False},
90
146
  ],
91
147
  "init": [
92
148
  {"tool": "delimit_gov_health", "reason": "Verify governance is set up correctly", "premium": True},
93
149
  {"tool": "delimit_diagnose", "reason": "Check for any issues", "premium": False},
94
150
  ],
151
+ "deploy_site": [
152
+ {"tool": "delimit_deploy_npm", "reason": "Publish npm package if applicable", "premium": False},
153
+ {"tool": "delimit_ledger_context", "reason": "Check what else needs deploying", "premium": False},
154
+ ],
95
155
  "test_coverage": [
96
156
  {"tool": "delimit_test_generate", "reason": "Generate tests for uncovered files", "premium": False},
97
157
  ],
@@ -102,6 +162,9 @@ NEXT_STEPS = {
102
162
  {"tool": "delimit_gov_status", "reason": "See detailed governance status", "premium": True},
103
163
  {"tool": "delimit_repo_analyze", "reason": "Full repo health report", "premium": True},
104
164
  ],
165
+ "deploy_npm": [
166
+ {"tool": "delimit_deploy_verify", "reason": "Verify the published package", "premium": True},
167
+ ],
105
168
  "deploy_plan": [
106
169
  {"tool": "delimit_deploy_build", "reason": "Build the deployment", "premium": True},
107
170
  ],
@@ -192,12 +255,75 @@ def govern(tool_name: str, result: Dict[str, Any], project_path: str = ".") -> D
192
255
  "source": f"governance:{clean_name}",
193
256
  })
194
257
 
195
- # 2. Auto-create ledger items
258
+ # 1b. Check milestone rules (auto-create DONE items for achievements)
259
+ milestone = MILESTONES.get(clean_name)
260
+ if milestone:
261
+ m_triggered = False
262
+ m_context = {}
263
+
264
+ # Value match (e.g., status == "deployed")
265
+ if "trigger_key" in milestone and "trigger_values" in milestone:
266
+ value = _deep_get(result, milestone["trigger_key"])
267
+ if value in milestone["trigger_values"]:
268
+ m_triggered = True
269
+ m_context = {"value": str(value)}
270
+
271
+ # Boolean check (e.g., success == True)
272
+ if "trigger_key" in milestone and milestone.get("trigger_if_true"):
273
+ value = _deep_get(result, milestone["trigger_key"])
274
+ if value:
275
+ m_triggered = True
276
+
277
+ # Threshold above (e.g., tests_generated > 10)
278
+ if "threshold_key" in milestone:
279
+ value = _deep_get(result, milestone["threshold_key"])
280
+ if value is not None:
281
+ threshold = milestone["threshold"]
282
+ if milestone.get("comparison") == "above" and value > threshold:
283
+ m_triggered = True
284
+ m_context = {"value": str(value), "threshold": str(threshold)}
285
+
286
+ if m_triggered:
287
+ # Build context from result fields for title interpolation
288
+ for key in ("project", "package", "new_version", "framework", "paths_count", "repo"):
289
+ if key not in m_context:
290
+ v = _deep_get(result, key)
291
+ if v is not None:
292
+ m_context[key] = str(v)
293
+ # Special: short question for deliberations
294
+ if "question_short" not in m_context:
295
+ q = _deep_get(result, "question") or _deep_get(result, "note") or ""
296
+ m_context["question_short"] = str(q)[:80]
297
+
298
+ try:
299
+ title = milestone["ledger_title"].format(**m_context)
300
+ except (KeyError, IndexError):
301
+ title = milestone["ledger_title"]
302
+
303
+ auto_items.append({
304
+ "title": title,
305
+ "type": milestone.get("ledger_type", "feat"),
306
+ "priority": milestone.get("ledger_priority", "P1"),
307
+ "source": f"milestone:{clean_name}",
308
+ "auto_done": milestone.get("auto_done", True),
309
+ })
310
+
311
+ # 2. Auto-create ledger items (with dedup — skip if open item with same title exists)
196
312
  if auto_items:
197
313
  try:
198
- from ai.ledger_manager import add_item
314
+ from ai.ledger_manager import add_item, update_item, list_items
315
+ # Load existing open titles for dedup
316
+ existing = list_items(project_path=project_path)
317
+ open_titles = {
318
+ i.get("title", "")
319
+ for i in existing.get("items", [])
320
+ if i.get("status") == "open"
321
+ }
199
322
  created = []
200
323
  for item in auto_items:
324
+ if item["title"] in open_titles:
325
+ logger.debug("Skipping duplicate ledger item: %s", item["title"])
326
+ continue
201
327
  entry = add_item(
202
328
  title=item["title"],
203
329
  type=item["type"],
@@ -205,7 +331,14 @@ def govern(tool_name: str, result: Dict[str, Any], project_path: str = ".") -> D
205
331
  source=item["source"],
206
332
  project_path=project_path,
207
333
  )
208
- created.append(entry.get("added", {}).get("id", ""))
334
+ item_id = entry.get("added", {}).get("id", "")
335
+ created.append(item_id)
336
+ # Auto-close milestone items
337
+ if item.get("auto_done") and item_id:
338
+ try:
339
+ update_item(item_id, status="done", project_path=project_path)
340
+ except Exception:
341
+ pass
209
342
  governed_result["governance"] = {
210
343
  "action": "ledger_items_created",
211
344
  "items": created,
@@ -1806,6 +1806,59 @@ def delimit_license_status() -> Dict[str, Any]:
1806
1806
  return _with_next_steps("license_status", get_license())
1807
1807
 
1808
1808
 
1809
+ # ═══════════════════════════════════════════════════════════════════════
1810
+ # SITE DEPLOY
1811
+ # ═══════════════════════════════════════════════════════════════════════
1812
+
1813
+
1814
+ @mcp.tool()
1815
+ def delimit_deploy_site(
1816
+ project_path: str = ".",
1817
+ message: str = "",
1818
+ ) -> Dict[str, Any]:
1819
+ """Deploy a site — git commit, push, Vercel build, and deploy in one step.
1820
+
1821
+ Handles the full chain: stages changes, commits, pushes to remote,
1822
+ builds with Vercel, deploys to production. No manual steps needed.
1823
+
1824
+ Args:
1825
+ project_path: Path to the site project (must have .vercel/ configured).
1826
+ message: Git commit message. Auto-generated if empty.
1827
+ """
1828
+ from backends.tools_infra import deploy_site
1829
+ env_vars = {}
1830
+ # Auto-detect Delimit UI env vars
1831
+ if "delimit-ui" in project_path or "delimit-ui" in str(Path(project_path).resolve()):
1832
+ chatops_token = os.environ.get("CHATOPS_AUTH_TOKEN", "")
1833
+ env_vars = {
1834
+ "NEXT_PUBLIC_CHATOPS_URL": "https://chatops.delimit.ai",
1835
+ "NEXT_PUBLIC_CHATOPS_TOKEN": chatops_token,
1836
+ }
1837
+ return _with_next_steps("deploy_site", deploy_site(project_path, message, env_vars))
1838
+
1839
+
1840
+ @mcp.tool()
1841
+ def delimit_deploy_npm(
1842
+ project_path: str = ".",
1843
+ bump: str = "patch",
1844
+ tag: str = "latest",
1845
+ dry_run: bool = False,
1846
+ ) -> Dict[str, Any]:
1847
+ """Publish an npm package — bump version, publish to registry, verify.
1848
+
1849
+ Full chain: check auth, bump version, npm publish, verify on registry,
1850
+ git commit + push the version bump. Use dry_run=true to preview first.
1851
+
1852
+ Args:
1853
+ project_path: Path to the npm package (must have package.json).
1854
+ bump: Version bump type — "patch", "minor", or "major".
1855
+ tag: npm dist-tag (default "latest").
1856
+ dry_run: If true, preview without actually publishing.
1857
+ """
1858
+ from backends.tools_infra import deploy_npm
1859
+ return _with_next_steps("deploy_npm", deploy_npm(project_path, bump, tag, dry_run))
1860
+
1861
+
1809
1862
  # ═══════════════════════════════════════════════════════════════════════
1810
1863
  # LEDGER (Strategy + Operational Task Tracking)
1811
1864
  # ═══════════════════════════════════════════════════════════════════════
@@ -1825,8 +1878,8 @@ def _resolve_venture(venture: str) -> str:
1825
1878
  if name == venture or venture in name:
1826
1879
  return info.get("path", ".")
1827
1880
  # Fallback: assume it's a directory name under common roots
1828
- for root in [Path.home(), Path.home() / "ventures", Path("/home")]:
1829
- candidate = root / venture
1881
+ for root in [str(Path.home()), str(Path.home() / "ventures"), "/home"]:
1882
+ candidate = Path(root) / venture
1830
1883
  if candidate.exists():
1831
1884
  return str(candidate)
1832
1885
  return "."
@@ -1837,7 +1890,7 @@ def delimit_ledger_add(
1837
1890
  title: str,
1838
1891
  venture: str = "",
1839
1892
  ledger: str = "ops",
1840
- type: str = "task",
1893
+ item_type: str = "task",
1841
1894
  priority: str = "P1",
1842
1895
  description: str = "",
1843
1896
  source: str = "session",
@@ -1849,16 +1902,16 @@ def delimit_ledger_add(
1849
1902
 
1850
1903
  Args:
1851
1904
  title: What needs to be done.
1852
- venture: Project name or path (e.g. "delimit-gateway", "~/delimit-gateway"). Auto-detects if empty.
1905
+ venture: Project name or path (e.g. "my-project", "./path/to/project"). Auto-detects if empty.
1853
1906
  ledger: "ops" (tasks, bugs, features) or "strategy" (decisions, direction).
1854
- type: task, fix, feat, strategy, consensus.
1907
+ item_type: task, fix, feat, strategy, consensus.
1855
1908
  priority: P0 (urgent), P1 (important), P2 (nice to have).
1856
1909
  description: Details.
1857
1910
  source: Where this came from (session, consensus, focus-group, etc).
1858
1911
  """
1859
1912
  from ai.ledger_manager import add_item
1860
1913
  project = _resolve_venture(venture)
1861
- return add_item(title=title, ledger=ledger, type=type, priority=priority,
1914
+ return add_item(title=title, ledger=ledger, type=item_type, priority=priority,
1862
1915
  description=description, source=source, project_path=project)
1863
1916
 
1864
1917
 
@@ -1994,9 +2047,87 @@ def delimit_deliberate(
1994
2047
  summary["gemini_final_response"] = last_round["responses"].get("gemini", "")[:2000]
1995
2048
  summary["grok_final_response"] = last_round["responses"].get("grok", "")[:2000]
1996
2049
 
2050
+ # Auto-create ledger items from deliberation findings
2051
+ if unanimous and result.get("rounds"):
2052
+ try:
2053
+ from ai.ledger_manager import add_item, list_items
2054
+ # Extract action items from final round responses
2055
+ actions = _extract_deliberation_actions(result, question)
2056
+ # Dedup against existing open items
2057
+ existing = list_items()
2058
+ open_titles = {i.get("title", "") for i in existing.get("items", []) if i.get("status") == "open"}
2059
+ created = []
2060
+ for action in actions:
2061
+ if action["title"] not in open_titles:
2062
+ entry = add_item(
2063
+ title=action["title"],
2064
+ type="strategy",
2065
+ priority="P1",
2066
+ source=f"deliberation:{result.get('saved_to', 'unknown')}",
2067
+ description=action.get("detail", ""),
2068
+ )
2069
+ created.append(entry.get("added", {}).get("id", ""))
2070
+ if created:
2071
+ summary["ledger_items_created"] = created
2072
+ except Exception as e:
2073
+ logger.warning("Deliberation auto-ledger failed: %s", e)
2074
+
1997
2075
  return summary
1998
2076
 
1999
2077
 
2078
+ def _extract_deliberation_actions(result: Dict, question: str) -> List[Dict[str, str]]:
2079
+ """Parse deliberation transcript for actionable items.
2080
+
2081
+ Looks for numbered lists, bullet points, and recommendation patterns
2082
+ in the final round of model responses.
2083
+ """
2084
+ import re
2085
+ actions = []
2086
+ seen = set()
2087
+
2088
+ if not result.get("rounds"):
2089
+ return actions
2090
+
2091
+ last_round = result["rounds"][-1]
2092
+ q_short = question[:60].rstrip()
2093
+
2094
+ for model_id, response in last_round.get("responses", {}).items():
2095
+ if not response or "[error" in response.lower():
2096
+ continue
2097
+
2098
+ # Look for numbered items (1. Do X, 2. Do Y)
2099
+ numbered = re.findall(r'(?:^|\n)\s*\d+[\.\)]\s*\*?\*?(.+?)(?:\n|$)', response)
2100
+ for item in numbered:
2101
+ clean = item.strip().rstrip('.*')
2102
+ # Skip very short or verdict lines
2103
+ if len(clean) < 15 or 'verdict' in clean.lower():
2104
+ continue
2105
+ key = clean[:50].lower()
2106
+ if key not in seen:
2107
+ seen.add(key)
2108
+ actions.append({
2109
+ "title": f"[Consensus] {clean[:100]}",
2110
+ "detail": f"From deliberation on: {q_short}. Source model: {model_id}.",
2111
+ })
2112
+
2113
+ # Look for bullet points (- Do X, * Do Y)
2114
+ bullets = re.findall(r'(?:^|\n)\s*[\-\*]\s*\*?\*?(.+?)(?:\n|$)', response)
2115
+ for item in bullets:
2116
+ clean = item.strip().rstrip('.*')
2117
+ if len(clean) < 15 or 'verdict' in clean.lower():
2118
+ continue
2119
+ key = clean[:50].lower()
2120
+ if key not in seen:
2121
+ seen.add(key)
2122
+ actions.append({
2123
+ "title": f"[Consensus] {clean[:100]}",
2124
+ "detail": f"From deliberation on: {q_short}. Source model: {model_id}.",
2125
+ })
2126
+
2127
+ # Cap at 10 items to avoid noise
2128
+ return actions[:10]
2129
+
2130
+
2000
2131
  # ═══════════════════════════════════════════════════════════════════════
2001
2132
  # ENTRY POINT
2002
2133
  # ═══════════════════════════════════════════════════════════════════════
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "delimit-cli",
3
- "version": "3.6.10",
4
- "description": "Catch breaking API changes before they ship. GitHub Action + CLI for OpenAPI specs.",
3
+ "version": "3.6.12",
4
+ "description": "Governance toolkit for AI coding assistants API checks, persistent memory, consensus, security.",
5
5
  "main": "index.js",
6
6
  "files": [
7
7
  "bin/",
@@ -18,7 +18,7 @@
18
18
  },
19
19
  "scripts": {
20
20
  "postinstall": "echo '\\nRun: npx delimit-cli setup\\n'",
21
- "test": "node --test tests/cli.test.js"
21
+ "test": "node --test tests/setup-onboarding.test.js"
22
22
  },
23
23
  "keywords": [
24
24
  "openapi",