delimit-cli 3.8.2 → 3.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -98,6 +98,26 @@ async function main() {
98
98
  fs.copyFileSync(serverSource, path.join(DELIMIT_HOME, 'server', 'mcp-server.py'));
99
99
  }
100
100
 
101
+ // Download compiled Pro modules (platform-specific)
102
+ const proDir = path.join(DELIMIT_HOME, 'server', 'ai');
103
+ const pyVer = (() => { try { return execSync(`${python} -c "import sys; print(f'cp{sys.version_info.major}{sys.version_info.minor}')"`, { encoding: 'utf-8' }).trim(); } catch { return 'cp310'; } })();
104
+ const arch = (() => { try { return execSync('uname -m', { encoding: 'utf-8' }).trim(); } catch { return 'x86_64'; } })();
105
+ const osName = process.platform === 'darwin' ? 'macos' : 'linux';
106
+ const artifact = `${osName}-${arch}-${pyVer}`;
107
+ const pkg = JSON.parse(fs.readFileSync(path.join(__dirname, '..', 'package.json'), 'utf-8'));
108
+ const proVersion = pkg.proModuleVersion || '3.8.2';
109
+ const proUrl = `https://delimit.ai/releases/v${proVersion}/delimit-pro-${artifact}.tar.gz`;
110
+
111
+ try {
112
+ const proTarball = path.join(DELIMIT_HOME, 'pro.tar.gz');
113
+ execSync(`curl -sL "${proUrl}" -o "${proTarball}" --fail`, { stdio: 'pipe', timeout: 30000 });
114
+ execSync(`tar -xzf "${proTarball}" -C "${proDir}"`, { stdio: 'pipe' });
115
+ fs.unlinkSync(proTarball);
116
+ log(` ${green('✓')} Pro modules installed (${artifact})`);
117
+ } catch {
118
+ log(` ${dim(' Pro modules not available for ${artifact} — free tools work fine')}`);
119
+ }
120
+
101
121
  // Install Python deps into isolated venv with pinned versions
102
122
  log(` ${dim(' Installing Python dependencies...')}`);
103
123
  const venvDir = path.join(DELIMIT_HOME, 'venv');
@@ -1,450 +1,6 @@
1
- """
2
- Delimit Deliberation Engine Multi-round consensus with real model-to-model debate.
3
-
4
- Passes each model's EXACT raw response to the other models for counter-arguments.
5
- Rounds continue until unanimous agreement or max rounds reached.
6
-
7
- Models are configured via ~/.delimit/models.json — users choose which AI models
8
- to include in deliberations. Supports any OpenAI-compatible API.
9
- """
10
-
11
- import json
12
- import logging
13
- import os
14
- import shutil
15
- import time
16
- import urllib.request
17
- import urllib.error
18
- from pathlib import Path
19
- from typing import Any, Dict, List, Optional
20
-
21
- logger = logging.getLogger("delimit.deliberation")
22
-
23
- DELIBERATION_DIR = Path.home() / ".delimit" / "deliberations"
24
- MODELS_CONFIG = Path.home() / ".delimit" / "models.json"
25
-
26
- DEFAULT_MODELS = {
27
- "grok": {
28
- "name": "Grok",
29
- "api_url": "https://api.x.ai/v1/chat/completions",
30
- "model": "grok-4-0709",
31
- "env_key": "XAI_API_KEY",
32
- "enabled": False,
33
- },
34
- "gemini": {
35
- "name": "Gemini",
36
- "api_url": "https://us-central1-aiplatform.googleapis.com/v1/projects/{project}/locations/us-central1/publishers/google/models/gemini-2.5-flash:generateContent",
37
- "model": "gemini-2.5-flash",
38
- "env_key": "GOOGLE_APPLICATION_CREDENTIALS",
39
- "enabled": False,
40
- "format": "vertex_ai",
41
- "prefer_cli": True, # Use gemini CLI if available (Ultra plan), fall back to Vertex AI
42
- "cli_command": "gemini",
43
- },
44
- "openai": {
45
- "name": "OpenAI",
46
- "api_url": "https://api.openai.com/v1/chat/completions",
47
- "model": "gpt-4o",
48
- "env_key": "OPENAI_API_KEY",
49
- "enabled": False,
50
- "prefer_cli": True, # Use Codex CLI if available, fall back to API
51
- },
52
- "anthropic": {
53
- "name": "Claude",
54
- "api_url": "https://api.anthropic.com/v1/messages",
55
- "model": "claude-sonnet-4-5-20250514",
56
- "env_key": "ANTHROPIC_API_KEY",
57
- "enabled": False,
58
- "format": "anthropic",
59
- "prefer_cli": True, # Use claude CLI if available (Pro/Max), fall back to API
60
- "cli_command": "claude",
61
- },
62
- }
63
-
64
-
65
- def get_models_config() -> Dict[str, Any]:
66
- """Load model configuration. Auto-detects available API keys."""
67
- if MODELS_CONFIG.exists():
68
- try:
69
- return json.loads(MODELS_CONFIG.read_text())
70
- except Exception:
71
- pass
72
-
73
- # Auto-detect from environment
74
- config = {}
75
- for model_id, defaults in DEFAULT_MODELS.items():
76
- key = os.environ.get(defaults.get("env_key", ""), "")
77
-
78
- if defaults.get("prefer_cli"):
79
- # Prefer CLI (uses existing subscription) over API (extra cost)
80
- import shutil
81
- cli_cmd = defaults.get("cli_command", "codex")
82
- cli_path = shutil.which(cli_cmd)
83
- if cli_path:
84
- config[model_id] = {
85
- **defaults,
86
- "format": "codex_cli",
87
- "enabled": True,
88
- "codex_path": cli_path,
89
- "backend": "cli",
90
- }
91
- elif key:
92
- config[model_id] = {
93
- **defaults,
94
- "api_key": key,
95
- "enabled": True,
96
- "backend": "api",
97
- }
98
- else:
99
- config[model_id] = {**defaults, "enabled": False}
100
- else:
101
- config[model_id] = {
102
- **defaults,
103
- "api_key": key,
104
- "enabled": bool(key),
105
- }
106
-
107
- return config
108
-
109
-
110
- def configure_models() -> Dict[str, Any]:
111
- """Return current model configuration and what's available."""
112
- config = get_models_config()
113
- available = {k: v for k, v in config.items() if v.get("enabled")}
114
- missing = {k: v for k, v in config.items() if not v.get("enabled")}
115
-
116
- model_details = {}
117
- for k, v in available.items():
118
- backend = v.get("backend", "api")
119
- if v.get("format") == "codex_cli":
120
- backend = "cli"
121
- model_details[k] = {"name": v.get("name", k), "backend": backend, "model": v.get("model", "")}
122
-
123
- return {
124
- "configured_models": list(available.keys()),
125
- "model_details": model_details,
126
- "missing_models": {k: f"Set {v.get('env_key', 'key')} or install {v.get('cli_command', '')} CLI" for k, v in missing.items()},
127
- "config_path": str(MODELS_CONFIG),
128
- "note": "CLI backends use your existing subscription (no extra API cost). "
129
- "API backends require separate API keys.",
130
- }
131
-
132
-
133
- def _call_cli(prompt: str, system_prompt: str = "", cli_path: str = "", cli_command: str = "codex") -> str:
134
- """Call an AI CLI tool (codex or claude) via subprocess. Uses existing subscription — no API cost."""
135
- import subprocess
136
-
137
- if not cli_path:
138
- cli_path = shutil.which(cli_command) or ""
139
- if not cli_path:
140
- return f"[{cli_command} unavailable — CLI not found in PATH]"
141
-
142
- full_prompt = f"{system_prompt}\n\n{prompt}" if system_prompt else prompt
143
-
144
- # Build command based on which CLI
145
- if "claude" in cli_command:
146
- cmd = [cli_path, "--print", "--dangerously-skip-permissions", full_prompt]
147
- else:
148
- # codex
149
- cmd = [cli_path, "exec", "--dangerously-bypass-approvals-and-sandbox", full_prompt]
150
-
151
- try:
152
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
153
- output = result.stdout.strip()
154
- if not output and result.stderr:
155
- return f"[{cli_command} error: {result.stderr[:300]}]"
156
- return output or f"[{cli_command} returned empty response]"
157
- except subprocess.TimeoutExpired:
158
- return f"[{cli_command} timed out after 120s]"
159
- except Exception as e:
160
- return f"[{cli_command} error: {e}]"
161
-
162
-
163
- def _call_model(model_id: str, config: Dict, prompt: str, system_prompt: str = "") -> str:
164
- """Call any supported model — OpenAI-compatible API, Vertex AI, or CLI (codex/claude)."""
165
- fmt = config.get("format", "openai")
166
-
167
- # CLI-based models (codex, claude) — uses existing subscription, no API cost
168
- if fmt == "codex_cli":
169
- cli_path = config.get("codex_path", "")
170
- cli_command = config.get("cli_command", "codex")
171
- return _call_cli(prompt, system_prompt, cli_path=cli_path, cli_command=cli_command)
172
-
173
- api_key = config.get("api_key") or os.environ.get(config.get("env_key", ""), "")
174
- # Vertex AI uses service account auth, not API key
175
- if not api_key and fmt != "vertex_ai":
176
- return f"[{config.get('name', model_id)} unavailable — {config.get('env_key')} not set]"
177
-
178
- api_url = config["api_url"]
179
- model = config.get("model", "")
180
-
181
- try:
182
- if fmt == "vertex_ai":
183
- # Vertex AI format — use google-auth for access token
184
- try:
185
- import google.auth
186
- import google.auth.transport.requests
187
- # Explicitly set credentials path if not in env
188
- creds_path = "/root/.config/gcloud/application_default_credentials.json"
189
- if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") and os.path.exists(creds_path):
190
- os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = creds_path
191
- creds, project = google.auth.default()
192
- creds.refresh(google.auth.transport.requests.Request())
193
- actual_url = api_url.replace("{project}", project or os.environ.get("GOOGLE_CLOUD_PROJECT", ""))
194
- data = json.dumps({
195
- "contents": [{"role": "user", "parts": [{"text": f"{system_prompt}\n\n{prompt}" if system_prompt else prompt}]}],
196
- "generationConfig": {"maxOutputTokens": 4096, "temperature": 0.7},
197
- }).encode()
198
- req = urllib.request.Request(
199
- actual_url,
200
- data=data,
201
- headers={
202
- "Authorization": f"Bearer {creds.token}",
203
- "Content-Type": "application/json",
204
- },
205
- method="POST",
206
- )
207
- except ImportError:
208
- return f"[Gemini unavailable — install google-auth: pip install google-auth]"
209
- elif fmt == "google":
210
- # Google Generative AI format (API key)
211
- data = json.dumps({
212
- "contents": [{"role": "user", "parts": [{"text": f"{system_prompt}\n\n{prompt}" if system_prompt else prompt}]}],
213
- "generationConfig": {"maxOutputTokens": 4096, "temperature": 0.7},
214
- }).encode()
215
- req = urllib.request.Request(
216
- f"{api_url}?key={api_key}",
217
- data=data,
218
- headers={"Content-Type": "application/json"},
219
- method="POST",
220
- )
221
- elif fmt == "anthropic":
222
- # Anthropic Messages API
223
- data = json.dumps({
224
- "model": model,
225
- "max_tokens": 4096,
226
- "system": system_prompt or "You are a helpful assistant participating in a multi-model deliberation.",
227
- "messages": [{"role": "user", "content": prompt}],
228
- }).encode()
229
- req = urllib.request.Request(
230
- api_url,
231
- data=data,
232
- headers={
233
- "x-api-key": api_key,
234
- "anthropic-version": "2023-06-01",
235
- "Content-Type": "application/json",
236
- "User-Agent": "Delimit/3.6.0",
237
- },
238
- method="POST",
239
- )
240
- else:
241
- # OpenAI-compatible format (works for xAI, OpenAI, etc.)
242
- messages = []
243
- if system_prompt:
244
- messages.append({"role": "system", "content": system_prompt})
245
- messages.append({"role": "user", "content": prompt})
246
-
247
- data = json.dumps({
248
- "model": model,
249
- "messages": messages,
250
- "temperature": 0.7,
251
- "max_tokens": 4096,
252
- }).encode()
253
- req = urllib.request.Request(
254
- api_url,
255
- data=data,
256
- headers={
257
- "Authorization": f"Bearer {api_key}",
258
- "Content-Type": "application/json",
259
- "User-Agent": "Delimit/3.6.0",
260
- },
261
- method="POST",
262
- )
263
-
264
- with urllib.request.urlopen(req, timeout=120) as resp:
265
- result = json.loads(resp.read())
266
-
267
- if fmt in ("google", "vertex_ai"):
268
- return result["candidates"][0]["content"]["parts"][0]["text"]
269
- elif fmt == "anthropic":
270
- return result["content"][0]["text"]
271
- else:
272
- return result["choices"][0]["message"]["content"]
273
-
274
- except Exception as e:
275
- return f"[{config.get('name', model_id)} error: {e}]"
276
-
277
-
278
- def deliberate(
279
- question: str,
280
- context: str = "",
281
- max_rounds: int = 3,
282
- mode: str = "dialogue",
283
- require_unanimous: bool = True,
284
- save_path: Optional[str] = None,
285
- ) -> Dict[str, Any]:
286
- """
287
- Run a multi-round deliberation across all configured AI models.
288
-
289
- Modes:
290
- - "debate": Long-form essays, models respond to each other's full arguments (3 rounds default)
291
- - "dialogue": Short conversational turns, models build on each other like a group chat (6 rounds default)
292
-
293
- Returns the full deliberation transcript + final verdict.
294
- """
295
- DELIBERATION_DIR.mkdir(parents=True, exist_ok=True)
296
-
297
- config = get_models_config()
298
- enabled_models = {k: v for k, v in config.items() if v.get("enabled")}
299
-
300
- if len(enabled_models) < 2:
301
- return {
302
- "error": "Need at least 2 AI models for deliberation.",
303
- "configured": list(enabled_models.keys()),
304
- "missing": {k: f"Set {v.get('env_key', 'key')}" for k, v in config.items() if not v.get("enabled")},
305
- "tip": "Set API key environment variables or create ~/.delimit/models.json",
306
- }
307
-
308
- model_ids = list(enabled_models.keys())
309
-
310
- # Dialogue mode uses more rounds with shorter responses
311
- if mode == "dialogue" and max_rounds == 3:
312
- max_rounds = 6
313
-
314
- transcript = {
315
- "question": question,
316
- "context": context,
317
- "mode": mode,
318
- "models": model_ids,
319
- "started_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
320
- "rounds": [],
321
- "thread": [], # flat conversation thread for dialogue mode
322
- "unanimous": False,
323
- "final_verdict": None,
324
- }
325
-
326
- if mode == "dialogue":
327
- system_prompt = (
328
- "You are in a group chat with other AI models. Keep responses to 2-4 sentences. "
329
- "Be direct and conversational — this is a discussion, not an essay. "
330
- "Build on what others said. Disagree specifically if you disagree. "
331
- "When you're ready to agree, say VERDICT: AGREE. "
332
- "If you disagree, say VERDICT: DISAGREE — [why in one sentence]."
333
- )
334
- else:
335
- system_prompt = (
336
- "You are participating in a structured multi-model deliberation with other AI models. "
337
- "You will see other models' exact responses and must engage with their specific arguments. "
338
- "At the END of your response, you MUST include exactly one of these lines:\n"
339
- "VERDICT: AGREE\n"
340
- "VERDICT: DISAGREE — [one sentence reason]\n"
341
- "VERDICT: AGREE WITH MODIFICATIONS — [one sentence modification]\n"
342
- "Do not hedge. Take a clear position."
343
- )
344
-
345
- full_prompt = f"{context}\n\nQUESTION:\n{question}" if context else question
346
-
347
- # Round 1: Independent responses
348
- logger.info(f"Deliberation Round 1 ({mode} mode): Independent responses")
349
- round1 = {"round": 1, "type": "independent", "responses": {}}
350
-
351
- for model_id in model_ids:
352
- if mode == "dialogue":
353
- # Shorter initial prompt for dialogue
354
- r1_prompt = f"{full_prompt}\n\nGive your initial take in 2-4 sentences. Don't write an essay."
355
- else:
356
- r1_prompt = full_prompt
357
- response = _call_model(model_id, enabled_models[model_id], r1_prompt, system_prompt)
358
- round1["responses"][model_id] = response
359
- # Build flat thread
360
- transcript["thread"].append({"model": model_id, "round": 1, "text": response})
361
- logger.info(f" {model_id}: {len(response)} chars")
362
-
363
- transcript["rounds"].append(round1)
364
-
365
- # Subsequent rounds: Models see each other's responses
366
- for round_num in range(2, max_rounds + 1):
367
- logger.info(f"Deliberation Round {round_num} ({mode})")
368
- round_data = {"round": round_num, "type": "deliberation", "responses": {}}
369
- prev = transcript["rounds"][-1]["responses"]
370
-
371
- for model_id in model_ids:
372
- if mode == "dialogue":
373
- # Dialogue: show the full conversation thread so far
374
- thread_text = f"Topic: {question}\n\nConversation so far:\n"
375
- for entry in transcript["thread"]:
376
- name = enabled_models.get(entry["model"], {}).get("name", entry["model"])
377
- thread_text += f"\n[{name}]: {entry['text']}\n"
378
- thread_text += (
379
- f"\nYour turn ({enabled_models[model_id]['name']}). "
380
- f"Respond in 2-4 sentences to the conversation above. "
381
- f"If you agree with the emerging consensus, say VERDICT: AGREE. "
382
- f"If not, push back specifically."
383
- )
384
- cross_prompt = thread_text
385
- else:
386
- # Debate: show other models' full responses from last round
387
- others_text = ""
388
- for other_id in model_ids:
389
- if other_id != model_id:
390
- others_text += (
391
- f"\n=== {enabled_models[other_id]['name'].upper()}'S EXACT RESPONSE "
392
- f"(Round {round_num - 1}) ===\n"
393
- f"{prev[other_id]}\n"
394
- )
395
- cross_prompt = (
396
- f"DELIBERATION ROUND {round_num}\n\n"
397
- f"Original question: {question}\n"
398
- f"{others_text}\n"
399
- f"Respond to the other models' SPECIFIC arguments. "
400
- f"Quote them directly if you disagree. "
401
- f"End with VERDICT: AGREE / DISAGREE / AGREE WITH MODIFICATIONS."
402
- )
403
-
404
- response = _call_model(model_id, enabled_models[model_id], cross_prompt, system_prompt)
405
- round_data["responses"][model_id] = response
406
- transcript["thread"].append({"model": model_id, "round": round_num, "text": response})
407
-
408
- transcript["rounds"].append(round_data)
409
-
410
- # Check for unanimous agreement
411
- all_agree = True
412
- for model_id in model_ids:
413
- resp = round_data["responses"][model_id].upper()
414
- if "VERDICT:" in resp:
415
- verdict_part = resp.split("VERDICT:")[-1].strip()
416
- agrees = verdict_part.startswith("AGREE")
417
- if not agrees:
418
- all_agree = False
419
- else:
420
- all_agree = False # No verdict = no agreement
421
-
422
- if all_agree:
423
- transcript["unanimous"] = True
424
- transcript["final_verdict"] = "UNANIMOUS AGREEMENT"
425
- transcript["agreed_at_round"] = round_num
426
- break
427
- else:
428
- # Max rounds reached
429
- transcript["final_verdict"] = "MAX ROUNDS REACHED"
430
- for model_id in model_ids:
431
- resp = transcript["rounds"][-1]["responses"][model_id].upper()
432
- verdict = "unknown"
433
- if "VERDICT:" in resp:
434
- verdict_part = resp.split("VERDICT:")[-1].strip()
435
- verdict = "agree" if verdict_part.startswith("AGREE") else "disagree"
436
- transcript[f"{model_id}_final"] = verdict
437
-
438
- transcript["completed_at"] = time.strftime("%Y-%m-%dT%H:%M:%SZ")
439
-
440
- # Save transcript
441
- save_to = save_path
442
- if not save_to:
443
- ts = time.strftime("%Y%m%d_%H%M%S")
444
- save_to = str(DELIBERATION_DIR / f"deliberation_{ts}.json")
445
-
446
- Path(save_to).parent.mkdir(parents=True, exist_ok=True)
447
- Path(save_to).write_text(json.dumps(transcript, indent=2))
448
- transcript["saved_to"] = save_to
449
-
450
- return transcript
1
+ """Delimit Deliberation Engine — compiled binary required. Run: npx delimit-cli setup"""
2
+ def get_models_config(): raise ImportError("Requires Pro module")
3
+ def configure_models(): raise ImportError("Requires Pro module")
4
+ def deliberate(**kwargs): raise ImportError("Requires Pro module")
5
+ def _call_cli(*a, **kw): raise ImportError("Requires Pro module")
6
+ def _call_model(*a, **kw): raise ImportError("Requires Pro module")
@@ -1,391 +1,4 @@
1
- """
2
- Delimit Governance Layer — the loop that keeps AI agents on track.
3
-
4
- Every tool flows through governance. Governance:
5
- 1. Logs what happened (evidence)
6
- 2. Checks result against rules (thresholds, policies)
7
- 3. Auto-creates ledger items for failures/warnings
8
- 4. Suggests next steps (loops back to keep building)
9
-
10
- This replaces _with_next_steps — governance IS the next step system.
11
- """
12
-
13
- import json
14
- import logging
15
- import time
16
- from pathlib import Path
17
- from typing import Any, Dict, List, Optional
18
-
19
- logger = logging.getLogger("delimit.governance")
20
-
21
-
22
- # Governance rules — what triggers auto-ledger-creation
23
- RULES = {
24
- "test_coverage": {
25
- "threshold_key": "line_coverage",
26
- "threshold": 80,
27
- "comparison": "below",
28
- "ledger_title": "Test coverage below {threshold}% — currently {value}%",
29
- "ledger_type": "fix",
30
- "ledger_priority": "P1",
31
- },
32
- "security_audit": {
33
- "trigger_key": "vulnerabilities",
34
- "trigger_if_nonempty": True,
35
- "ledger_title": "Security: {count} vulnerabilities found",
36
- "ledger_type": "fix",
37
- "ledger_priority": "P0",
38
- },
39
- "security_scan": {
40
- "trigger_key": "vulnerabilities",
41
- "trigger_if_nonempty": True,
42
- "ledger_title": "Security scan: {count} issues detected",
43
- "ledger_type": "fix",
44
- "ledger_priority": "P0",
45
- },
46
- "lint": {
47
- "trigger_key": "violations",
48
- "trigger_if_nonempty": True,
49
- "ledger_title": "API lint: {count} violations found",
50
- "ledger_type": "fix",
51
- "ledger_priority": "P1",
52
- },
53
- "deliberate": {
54
- "trigger_key": "unanimous",
55
- "trigger_if_true": True,
56
- "extract_actions": True,
57
- "ledger_title": "Deliberation consensus reached — action items pending",
58
- "ledger_type": "strategy",
59
- "ledger_priority": "P1",
60
- },
61
- "gov_health": {
62
- "trigger_key": "status",
63
- "trigger_values": ["not_initialized", "degraded"],
64
- "ledger_title": "Governance health: {value} — needs attention",
65
- "ledger_type": "fix",
66
- "ledger_priority": "P1",
67
- },
68
- "docs_validate": {
69
- "threshold_key": "coverage_percent",
70
- "threshold": 50,
71
- "comparison": "below",
72
- "ledger_title": "Documentation coverage below {threshold}% — currently {value}%",
73
- "ledger_type": "task",
74
- "ledger_priority": "P2",
75
- },
76
- }
77
-
78
- # Milestone rules — auto-create DONE ledger items for significant completions.
79
- # Unlike threshold RULES (which create open items for problems), milestones
80
- # record achievements so the ledger reflects what was shipped.
81
- MILESTONES = {
82
- "deploy_site": {
83
- "trigger_key": "status",
84
- "trigger_values": ["deployed"],
85
- "ledger_title": "Deployed: {project}",
86
- "ledger_type": "feat",
87
- "ledger_priority": "P1",
88
- "auto_done": True,
89
- },
90
- "deploy_npm": {
91
- "trigger_key": "status",
92
- "trigger_values": ["published"],
93
- "ledger_title": "Published: {package}@{new_version}",
94
- "ledger_type": "feat",
95
- "ledger_priority": "P1",
96
- "auto_done": True,
97
- },
98
- "deliberate": {
99
- "trigger_key": "status",
100
- "trigger_values": ["unanimous"],
101
- "ledger_title": "Consensus reached: {question_short}",
102
- "ledger_type": "strategy",
103
- "ledger_priority": "P1",
104
- "auto_done": True,
105
- },
106
- "test_generate": {
107
- "threshold_key": "tests_generated",
108
- "threshold": 10,
109
- "comparison": "above",
110
- "ledger_title": "Generated {value} tests",
111
- "ledger_type": "feat",
112
- "ledger_priority": "P2",
113
- "auto_done": True,
114
- },
115
- "sensor_github_issue": {
116
- "trigger_key": "has_new_activity",
117
- "trigger_if_true": True,
118
- "ledger_title": "Outreach response: new activity detected",
119
- "ledger_type": "task",
120
- "ledger_priority": "P1",
121
- "auto_done": False, # needs follow-up
122
- },
123
- "zero_spec": {
124
- "trigger_key": "success",
125
- "trigger_if_true": True,
126
- "ledger_title": "Zero-spec extracted: {framework} ({paths_count} paths)",
127
- "ledger_type": "feat",
128
- "ledger_priority": "P2",
129
- "auto_done": True,
130
- },
131
- }
132
-
133
- # Next steps registry — what to do after each tool
134
- NEXT_STEPS = {
135
- "lint": [
136
- {"tool": "delimit_explain", "reason": "Get migration guide for violations", "premium": False},
137
- {"tool": "delimit_semver", "reason": "Classify the version bump", "premium": False},
138
- ],
139
- "diff": [
140
- {"tool": "delimit_semver", "reason": "Classify changes as MAJOR/MINOR/PATCH", "premium": False},
141
- {"tool": "delimit_policy", "reason": "Check against governance policies", "premium": False},
142
- ],
143
- "semver": [
144
- {"tool": "delimit_explain", "reason": "Generate human-readable changelog", "premium": False},
145
- {"tool": "delimit_deploy_npm", "reason": "Publish the new version to npm", "premium": False},
146
- ],
147
- "init": [
148
- {"tool": "delimit_gov_health", "reason": "Verify governance is set up correctly", "premium": True},
149
- {"tool": "delimit_diagnose", "reason": "Check for any issues", "premium": False},
150
- ],
151
- "deploy_site": [
152
- {"tool": "delimit_deploy_npm", "reason": "Publish npm package if applicable", "premium": False},
153
- {"tool": "delimit_ledger_context", "reason": "Check what else needs deploying", "premium": False},
154
- ],
155
- "test_coverage": [
156
- {"tool": "delimit_test_generate", "reason": "Generate tests for uncovered files", "premium": False},
157
- ],
158
- "security_audit": [
159
- {"tool": "delimit_evidence_collect", "reason": "Collect evidence of findings", "premium": True},
160
- ],
161
- "gov_health": [
162
- {"tool": "delimit_gov_status", "reason": "See detailed governance status", "premium": True},
163
- {"tool": "delimit_repo_analyze", "reason": "Full repo health report", "premium": True},
164
- ],
165
- "deploy_npm": [
166
- {"tool": "delimit_deploy_verify", "reason": "Verify the published package", "premium": True},
167
- ],
168
- "deploy_plan": [
169
- {"tool": "delimit_deploy_build", "reason": "Build the deployment", "premium": True},
170
- ],
171
- "deploy_build": [
172
- {"tool": "delimit_deploy_publish", "reason": "Publish the build", "premium": True},
173
- ],
174
- "deploy_publish": [
175
- {"tool": "delimit_deploy_verify", "reason": "Verify the deployment", "premium": True},
176
- ],
177
- "deploy_verify": [
178
- {"tool": "delimit_deploy_rollback", "reason": "Rollback if unhealthy", "premium": True},
179
- ],
180
- "repo_analyze": [
181
- {"tool": "delimit_security_audit", "reason": "Scan for security issues", "premium": False},
182
- {"tool": "delimit_gov_health", "reason": "Check governance status", "premium": True},
183
- ],
184
- "deliberate": [
185
- {"tool": "delimit_ledger_context", "reason": "Review what's on the ledger after consensus", "premium": False},
186
- ],
187
- "ledger_add": [
188
- {"tool": "delimit_ledger_context", "reason": "See updated ledger state", "premium": False},
189
- ],
190
- "diagnose": [
191
- {"tool": "delimit_init", "reason": "Initialize governance if not set up", "premium": False},
192
- ],
193
- }
194
-
195
-
196
- def govern(tool_name: str, result: Dict[str, Any], project_path: str = ".") -> Dict[str, Any]:
197
- """
198
- Run governance on a tool's result. This is the central loop.
199
-
200
- 1. Check result against rules
201
- 2. Auto-create ledger items if thresholds breached
202
- 3. Add next_steps for the AI to continue
203
- 4. Return enriched result
204
-
205
- Every tool should call this before returning.
206
- """
207
- # Strip "delimit_" prefix for rule matching
208
- clean_name = tool_name.replace("delimit_", "")
209
-
210
- governed_result = dict(result)
211
-
212
- # 1. Check governance rules
213
- rule = RULES.get(clean_name)
214
- auto_items = []
215
-
216
- if rule:
217
- triggered = False
218
- context = {}
219
-
220
- # Threshold check (e.g., coverage < 80%)
221
- if "threshold_key" in rule:
222
- value = _deep_get(result, rule["threshold_key"])
223
- if value is not None:
224
- threshold = rule["threshold"]
225
- if rule.get("comparison") == "below" and value < threshold:
226
- triggered = True
227
- context = {"value": f"{value:.1f}" if isinstance(value, float) else str(value), "threshold": str(threshold)}
228
-
229
- # Non-empty list check (e.g., vulnerabilities found)
230
- if "trigger_key" in rule and "trigger_if_nonempty" in rule:
231
- items = _deep_get(result, rule["trigger_key"])
232
- if items and isinstance(items, list) and len(items) > 0:
233
- triggered = True
234
- context = {"count": str(len(items))}
235
-
236
- # Value match check (e.g., status == "degraded")
237
- if "trigger_key" in rule and "trigger_values" in rule:
238
- value = _deep_get(result, rule["trigger_key"])
239
- if value in rule["trigger_values"]:
240
- triggered = True
241
- context = {"value": str(value)}
242
-
243
- # Boolean check (e.g., unanimous == True)
244
- if "trigger_key" in rule and "trigger_if_true" in rule:
245
- value = _deep_get(result, rule["trigger_key"])
246
- if value:
247
- triggered = True
248
-
249
- if triggered:
250
- title = rule["ledger_title"].format(**context) if context else rule["ledger_title"]
251
- auto_items.append({
252
- "title": title,
253
- "type": rule.get("ledger_type", "task"),
254
- "priority": rule.get("ledger_priority", "P1"),
255
- "source": f"governance:{clean_name}",
256
- })
257
-
258
- # 1b. Check milestone rules (auto-create DONE items for achievements)
259
- milestone = MILESTONES.get(clean_name)
260
- if milestone:
261
- m_triggered = False
262
- m_context = {}
263
-
264
- # Value match (e.g., status == "deployed")
265
- if "trigger_key" in milestone and "trigger_values" in milestone:
266
- value = _deep_get(result, milestone["trigger_key"])
267
- if value in milestone["trigger_values"]:
268
- m_triggered = True
269
- m_context = {"value": str(value)}
270
-
271
- # Boolean check (e.g., success == True)
272
- if "trigger_key" in milestone and milestone.get("trigger_if_true"):
273
- value = _deep_get(result, milestone["trigger_key"])
274
- if value:
275
- m_triggered = True
276
-
277
- # Threshold above (e.g., tests_generated > 10)
278
- if "threshold_key" in milestone:
279
- value = _deep_get(result, milestone["threshold_key"])
280
- if value is not None:
281
- threshold = milestone["threshold"]
282
- if milestone.get("comparison") == "above" and value > threshold:
283
- m_triggered = True
284
- m_context = {"value": str(value), "threshold": str(threshold)}
285
-
286
- if m_triggered:
287
- # Build context from result fields for title interpolation
288
- for key in ("project", "package", "new_version", "framework", "paths_count", "repo"):
289
- if key not in m_context:
290
- v = _deep_get(result, key)
291
- if v is not None:
292
- m_context[key] = str(v)
293
- # Special: short question for deliberations
294
- if "question_short" not in m_context:
295
- q = _deep_get(result, "question") or _deep_get(result, "note") or ""
296
- m_context["question_short"] = str(q)[:80]
297
-
298
- try:
299
- title = milestone["ledger_title"].format(**m_context)
300
- except (KeyError, IndexError):
301
- title = milestone["ledger_title"]
302
-
303
- auto_items.append({
304
- "title": title,
305
- "type": milestone.get("ledger_type", "feat"),
306
- "priority": milestone.get("ledger_priority", "P1"),
307
- "source": f"milestone:{clean_name}",
308
- "auto_done": milestone.get("auto_done", True),
309
- })
310
-
311
- # 2. Auto-create ledger items (with dedup — skip if open item with same title exists)
312
- if auto_items:
313
- try:
314
- from ai.ledger_manager import add_item, update_item, list_items
315
- # Load existing open titles for dedup
316
- existing = list_items(project_path=project_path)
317
- open_titles = {
318
- i.get("title", "")
319
- for i in existing.get("items", [])
320
- if i.get("status") == "open"
321
- }
322
- created = []
323
- for item in auto_items:
324
- if item["title"] in open_titles:
325
- logger.debug("Skipping duplicate ledger item: %s", item["title"])
326
- continue
327
- entry = add_item(
328
- title=item["title"],
329
- type=item["type"],
330
- priority=item["priority"],
331
- source=item["source"],
332
- project_path=project_path,
333
- )
334
- item_id = entry.get("added", {}).get("id", "")
335
- created.append(item_id)
336
- # Auto-close milestone items
337
- if item.get("auto_done") and item_id:
338
- try:
339
- update_item(item_id, status="done", project_path=project_path)
340
- except Exception:
341
- pass
342
- governed_result["governance"] = {
343
- "action": "ledger_items_created",
344
- "items": created,
345
- "reason": "Governance rule triggered by tool result",
346
- }
347
- except Exception as e:
348
- logger.warning("Governance auto-ledger failed: %s", e)
349
-
350
- # 3. Add next steps
351
- steps = NEXT_STEPS.get(clean_name, [])
352
- if steps:
353
- governed_result["next_steps"] = steps
354
-
355
- # 4. Always suggest checking the ledger
356
- if clean_name not in ("ledger_add", "ledger_done", "ledger_list", "ledger_context", "ventures", "version", "help", "diagnose", "activate", "license_status", "models"):
357
- if "next_steps" not in governed_result:
358
- governed_result["next_steps"] = []
359
- # Don't duplicate
360
- existing = {s.get("tool") for s in governed_result.get("next_steps", [])}
361
- if "delimit_ledger_context" not in existing:
362
- governed_result["next_steps"].append({
363
- "tool": "delimit_ledger_context",
364
- "reason": "Check ledger for what's next",
365
- "premium": False,
366
- })
367
- else:
368
- # Excluded tools still get the next_steps field (empty) for schema consistency
369
- if "next_steps" not in governed_result:
370
- governed_result["next_steps"] = []
371
-
372
- return governed_result
373
-
374
-
375
- def _deep_get(d: Dict, key: str) -> Any:
376
- """Get a value from a dict, supporting nested keys with dots."""
377
- if "." in key:
378
- parts = key.split(".", 1)
379
- sub = d.get(parts[0])
380
- if isinstance(sub, dict):
381
- return _deep_get(sub, parts[1])
382
- return None
383
-
384
- # Check top-level and common nested locations
385
- if key in d:
386
- return d[key]
387
- # Check inside 'data', 'result', 'overall_coverage'
388
- for wrapper in ["data", "result", "overall_coverage", "summary"]:
389
- if isinstance(d.get(wrapper), dict) and key in d[wrapper]:
390
- return d[wrapper][key]
391
- return None
1
+ """Delimit Governance Layer — compiled binary required. Run: npx delimit-cli setup"""
2
+ def govern(tool_name, result, project_path="."):
3
+ result["next_steps"] = result.get("next_steps", [])
4
+ return result
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "delimit-cli",
3
- "version": "3.8.2",
3
+ "version": "3.9.2",
4
4
  "description": "One workspace for every AI coding assistant. Tasks, memory, and governance carry between Claude Code, Codex, and Gemini CLI.",
5
5
  "main": "index.js",
6
6
  "files": [
@@ -58,6 +58,7 @@
58
58
  "js-yaml": "^4.1.0",
59
59
  "minimatch": "^5.1.0"
60
60
  },
61
+ "proModuleVersion": "3.8.2",
61
62
  "engines": {
62
63
  "node": ">=14.0.0"
63
64
  }