delimit-cli 3.4.0 → 3.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/delimit-setup.js +23 -0
- package/gateway/ai/backends/tools_data.py +830 -0
- package/gateway/ai/backends/tools_design.py +921 -0
- package/gateway/ai/backends/tools_infra.py +866 -0
- package/gateway/ai/backends/tools_real.py +766 -0
- package/gateway/ai/backends/ui_bridge.py +26 -49
- package/gateway/ai/deliberation.py +387 -0
- package/gateway/ai/ledger_manager.py +207 -0
- package/gateway/ai/server.py +630 -216
- package/package.json +1 -1
package/gateway/ai/server.py
CHANGED
|
@@ -64,6 +64,194 @@ def _safe_call(fn, **kwargs) -> Dict[str, Any]:
|
|
|
64
64
|
return {"error": "backend_failure", "message": str(e)}
|
|
65
65
|
|
|
66
66
|
|
|
67
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
68
|
+
# CONSENSUS 096: Tool Cohesion — next_steps in every response
|
|
69
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
70
|
+
|
|
71
|
+
NEXT_STEPS_REGISTRY: Dict[str, List[Dict[str, Any]]] = {
|
|
72
|
+
# --- Tier 1 Core (Free) ---
|
|
73
|
+
"lint": [
|
|
74
|
+
{"tool": "delimit_explain", "reason": "Get migration guide for breaking changes", "suggested_args": {"template": "migration"}, "is_premium": False},
|
|
75
|
+
{"tool": "delimit_semver", "reason": "Determine the version bump for these changes", "suggested_args": {}, "is_premium": False},
|
|
76
|
+
],
|
|
77
|
+
"diff": [
|
|
78
|
+
{"tool": "delimit_semver", "reason": "Classify the semver bump for these changes", "suggested_args": {}, "is_premium": False},
|
|
79
|
+
{"tool": "delimit_policy", "reason": "Check policy violations for these changes", "suggested_args": {}, "is_premium": False},
|
|
80
|
+
],
|
|
81
|
+
"policy": [
|
|
82
|
+
{"tool": "delimit_lint", "reason": "Run full lint with policy enforcement", "suggested_args": {}, "is_premium": False},
|
|
83
|
+
],
|
|
84
|
+
"ledger": [],
|
|
85
|
+
"impact": [
|
|
86
|
+
{"tool": "delimit_ledger", "reason": "Record this impact assessment in the audit ledger", "suggested_args": {}, "is_premium": False},
|
|
87
|
+
],
|
|
88
|
+
"semver": [
|
|
89
|
+
{"tool": "delimit_explain", "reason": "Generate human-readable changelog for the version bump", "suggested_args": {"template": "changelog"}, "is_premium": False},
|
|
90
|
+
],
|
|
91
|
+
"explain": [],
|
|
92
|
+
"zero_spec": [
|
|
93
|
+
{"tool": "delimit_lint", "reason": "Lint the extracted spec against your baseline", "suggested_args": {}, "is_premium": False},
|
|
94
|
+
],
|
|
95
|
+
"init": [
|
|
96
|
+
{"tool": "delimit_gov_health", "reason": "Verify governance health after initialization", "suggested_args": {}, "is_premium": True},
|
|
97
|
+
{"tool": "delimit_diagnose", "reason": "Check environment and tool status", "suggested_args": {}, "is_premium": False},
|
|
98
|
+
],
|
|
99
|
+
# --- Tier 2 Platform (Pro) ---
|
|
100
|
+
"os_plan": [],
|
|
101
|
+
"os_status": [],
|
|
102
|
+
"os_gates": [],
|
|
103
|
+
"gov_health": [
|
|
104
|
+
{"tool": "delimit_gov_status", "reason": "Get detailed governance status", "suggested_args": {}, "is_premium": True},
|
|
105
|
+
{"tool": "delimit_repo_analyze", "reason": "Analyze repository structure and quality", "suggested_args": {}, "is_premium": True},
|
|
106
|
+
],
|
|
107
|
+
"gov_status": [
|
|
108
|
+
{"tool": "delimit_gov_policy", "reason": "Review governance policy configuration", "suggested_args": {}, "is_premium": True},
|
|
109
|
+
],
|
|
110
|
+
"gov_policy": [],
|
|
111
|
+
"gov_evaluate": [],
|
|
112
|
+
"gov_new_task": [],
|
|
113
|
+
"gov_run": [],
|
|
114
|
+
"gov_verify": [],
|
|
115
|
+
"memory_search": [
|
|
116
|
+
{"tool": "delimit_memory_store", "reason": "Store new information if no results found", "suggested_args": {}, "is_premium": True},
|
|
117
|
+
],
|
|
118
|
+
"memory_store": [],
|
|
119
|
+
"memory_recent": [],
|
|
120
|
+
"vault_search": [],
|
|
121
|
+
"vault_health": [
|
|
122
|
+
{"tool": "delimit_vault_search", "reason": "Search vault entries for details", "suggested_args": {}, "is_premium": True},
|
|
123
|
+
],
|
|
124
|
+
"vault_snapshot": [],
|
|
125
|
+
# --- Tier 3 Extended (Pro) ---
|
|
126
|
+
"deploy_plan": [
|
|
127
|
+
{"tool": "delimit_deploy_build", "reason": "Build Docker images for deployment", "suggested_args": {}, "is_premium": True},
|
|
128
|
+
],
|
|
129
|
+
"deploy_build": [
|
|
130
|
+
{"tool": "delimit_deploy_publish", "reason": "Publish built images to registry", "suggested_args": {}, "is_premium": True},
|
|
131
|
+
],
|
|
132
|
+
"deploy_publish": [
|
|
133
|
+
{"tool": "delimit_deploy_verify", "reason": "Verify deployment health after publish", "suggested_args": {}, "is_premium": True},
|
|
134
|
+
],
|
|
135
|
+
"deploy_rollback": [],
|
|
136
|
+
"deploy_status": [],
|
|
137
|
+
"generate_template": [],
|
|
138
|
+
"generate_scaffold": [],
|
|
139
|
+
"security_scan": [
|
|
140
|
+
{"tool": "delimit_evidence_collect", "reason": "Collect evidence artifacts from security findings", "suggested_args": {}, "is_premium": True},
|
|
141
|
+
],
|
|
142
|
+
"evidence_collect": [
|
|
143
|
+
{"tool": "delimit_evidence_verify", "reason": "Verify evidence bundle integrity", "suggested_args": {}, "is_premium": True},
|
|
144
|
+
],
|
|
145
|
+
"evidence_verify": [],
|
|
146
|
+
"security_audit": [
|
|
147
|
+
{"tool": "delimit_security_scan", "reason": "Run deeper security scan on flagged areas", "suggested_args": {}, "is_premium": True},
|
|
148
|
+
{"tool": "delimit_evidence_collect", "reason": "Collect evidence of security findings", "suggested_args": {}, "is_premium": True},
|
|
149
|
+
],
|
|
150
|
+
# --- Tier 4 Ops ---
|
|
151
|
+
"obs_status": [
|
|
152
|
+
{"tool": "delimit_obs_metrics", "reason": "Get detailed CPU/memory/disk metrics", "suggested_args": {"query": "all"}, "is_premium": False},
|
|
153
|
+
{"tool": "delimit_obs_logs", "reason": "Search logs for errors or issues", "suggested_args": {"query": "error"}, "is_premium": False},
|
|
154
|
+
],
|
|
155
|
+
"obs_metrics": [
|
|
156
|
+
{"tool": "delimit_obs_logs", "reason": "Correlate metrics with log entries", "suggested_args": {}, "is_premium": False},
|
|
157
|
+
{"tool": "delimit_obs_status", "reason": "Get overall system health", "suggested_args": {}, "is_premium": False},
|
|
158
|
+
],
|
|
159
|
+
"obs_logs": [
|
|
160
|
+
{"tool": "delimit_obs_metrics", "reason": "Check system metrics for the same time range", "suggested_args": {}, "is_premium": False},
|
|
161
|
+
],
|
|
162
|
+
"release_plan": [
|
|
163
|
+
{"tool": "delimit_release_status", "reason": "Check current deploy status before releasing", "suggested_args": {}, "is_premium": False},
|
|
164
|
+
{"tool": "delimit_security_audit", "reason": "Audit security before release", "suggested_args": {}, "is_premium": False},
|
|
165
|
+
],
|
|
166
|
+
"release_status": [
|
|
167
|
+
{"tool": "delimit_release_plan", "reason": "Create a new release plan", "suggested_args": {}, "is_premium": False},
|
|
168
|
+
],
|
|
169
|
+
"cost_analyze": [
|
|
170
|
+
{"tool": "delimit_cost_optimize", "reason": "Find optimization opportunities for detected services", "suggested_args": {}, "is_premium": False},
|
|
171
|
+
{"tool": "delimit_cost_alert", "reason": "Set up cost threshold alerts", "suggested_args": {"action": "create"}, "is_premium": False},
|
|
172
|
+
],
|
|
173
|
+
"cost_optimize": [
|
|
174
|
+
{"tool": "delimit_cost_analyze", "reason": "Get full cost breakdown for the project", "suggested_args": {}, "is_premium": False},
|
|
175
|
+
],
|
|
176
|
+
"cost_alert": [],
|
|
177
|
+
"data_validate": [
|
|
178
|
+
{"tool": "delimit_data_backup", "reason": "Back up validated data files", "suggested_args": {}, "is_premium": False},
|
|
179
|
+
],
|
|
180
|
+
"data_migrate": [
|
|
181
|
+
{"tool": "delimit_data_validate", "reason": "Validate data integrity after migration check", "suggested_args": {}, "is_premium": False},
|
|
182
|
+
],
|
|
183
|
+
"data_backup": [],
|
|
184
|
+
"intel_dataset_register": [
|
|
185
|
+
{"tool": "delimit_intel_snapshot_ingest", "reason": "Ingest data into the registered dataset", "suggested_args": {}, "is_premium": False},
|
|
186
|
+
],
|
|
187
|
+
"intel_dataset_list": [],
|
|
188
|
+
"intel_dataset_freeze": [],
|
|
189
|
+
"intel_snapshot_ingest": [
|
|
190
|
+
{"tool": "delimit_intel_query", "reason": "Query ingested snapshots", "suggested_args": {}, "is_premium": False},
|
|
191
|
+
],
|
|
192
|
+
"intel_query": [],
|
|
193
|
+
"test_generate": [
|
|
194
|
+
{"tool": "delimit_test_smoke", "reason": "Run the generated tests to verify they pass", "suggested_args": {}, "is_premium": False},
|
|
195
|
+
{"tool": "delimit_docs_generate", "reason": "Generate API docs for the tested code", "suggested_args": {}, "is_premium": False},
|
|
196
|
+
],
|
|
197
|
+
"test_smoke": [
|
|
198
|
+
{"tool": "delimit_test_generate", "reason": "Generate test skeletons for untested files", "suggested_args": {}, "is_premium": False},
|
|
199
|
+
{"tool": "delimit_docs_validate", "reason": "Check documentation coverage alongside test coverage", "suggested_args": {}, "is_premium": False},
|
|
200
|
+
],
|
|
201
|
+
"docs_generate": [
|
|
202
|
+
{"tool": "delimit_docs_validate", "reason": "Validate the generated documentation for completeness", "suggested_args": {}, "is_premium": False},
|
|
203
|
+
],
|
|
204
|
+
"docs_validate": [
|
|
205
|
+
{"tool": "delimit_docs_generate", "reason": "Generate docs to fix missing documentation", "suggested_args": {}, "is_premium": False},
|
|
206
|
+
{"tool": "delimit_test_generate", "reason": "Generate tests alongside documentation improvements", "suggested_args": {}, "is_premium": False},
|
|
207
|
+
],
|
|
208
|
+
# --- Tier 4 Design/Story ---
|
|
209
|
+
"design_extract_tokens": [
|
|
210
|
+
{"tool": "delimit_design_generate_tailwind", "reason": "Generate Tailwind config from extracted tokens", "suggested_args": {}, "is_premium": True},
|
|
211
|
+
{"tool": "delimit_design_component_library", "reason": "Catalog components that use these tokens", "suggested_args": {}, "is_premium": True},
|
|
212
|
+
],
|
|
213
|
+
"design_generate_component": [
|
|
214
|
+
{"tool": "delimit_story_generate", "reason": "Generate stories for the new component", "suggested_args": {}, "is_premium": True},
|
|
215
|
+
{"tool": "delimit_story_accessibility", "reason": "Check accessibility of the generated component", "suggested_args": {}, "is_premium": True},
|
|
216
|
+
],
|
|
217
|
+
"design_generate_tailwind": [
|
|
218
|
+
{"tool": "delimit_design_extract_tokens", "reason": "Extract tokens to verify config coverage", "suggested_args": {}, "is_premium": True},
|
|
219
|
+
],
|
|
220
|
+
"design_validate_responsive": [
|
|
221
|
+
{"tool": "delimit_story_visual_test", "reason": "Take screenshots at different viewports", "suggested_args": {}, "is_premium": True},
|
|
222
|
+
],
|
|
223
|
+
"design_component_library": [
|
|
224
|
+
{"tool": "delimit_story_generate", "reason": "Generate stories for cataloged components", "suggested_args": {}, "is_premium": True},
|
|
225
|
+
{"tool": "delimit_story_accessibility", "reason": "Run accessibility audit on all components", "suggested_args": {}, "is_premium": True},
|
|
226
|
+
],
|
|
227
|
+
"story_generate": [
|
|
228
|
+
{"tool": "delimit_story_visual_test", "reason": "Capture visual baseline for the component", "suggested_args": {}, "is_premium": True},
|
|
229
|
+
{"tool": "delimit_story_accessibility", "reason": "Check accessibility of the component", "suggested_args": {}, "is_premium": True},
|
|
230
|
+
],
|
|
231
|
+
"story_visual_test": [
|
|
232
|
+
{"tool": "delimit_story_accessibility", "reason": "Also run accessibility checks", "suggested_args": {}, "is_premium": True},
|
|
233
|
+
],
|
|
234
|
+
"story_accessibility": [
|
|
235
|
+
{"tool": "delimit_design_validate_responsive", "reason": "Also validate responsive patterns", "suggested_args": {}, "is_premium": True},
|
|
236
|
+
],
|
|
237
|
+
# --- Sensing ---
|
|
238
|
+
"sensor_github_issue": [],
|
|
239
|
+
# --- Meta ---
|
|
240
|
+
"version": [],
|
|
241
|
+
"help": [],
|
|
242
|
+
"diagnose": [],
|
|
243
|
+
"activate": [],
|
|
244
|
+
"license_status": [],
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def _with_next_steps(tool_name: str, result: Dict[str, Any]) -> Dict[str, Any]:
|
|
249
|
+
"""Attach next_steps metadata to a tool response (Consensus 096)."""
|
|
250
|
+
steps = NEXT_STEPS_REGISTRY.get(tool_name, [])
|
|
251
|
+
result["next_steps"] = steps
|
|
252
|
+
return result
|
|
253
|
+
|
|
254
|
+
|
|
67
255
|
# ═══════════════════════════════════════════════════════════════════════
|
|
68
256
|
# TIER 1: CORE — API Lint Engine
|
|
69
257
|
# ═══════════════════════════════════════════════════════════════════════
|
|
@@ -80,7 +268,7 @@ def delimit_lint(old_spec: str, new_spec: str, policy_file: Optional[str] = None
|
|
|
80
268
|
policy_file: Optional path to a .delimit/policies.yml file.
|
|
81
269
|
"""
|
|
82
270
|
from backends.gateway_core import run_lint
|
|
83
|
-
return _safe_call(run_lint, old_spec=old_spec, new_spec=new_spec, policy_file=policy_file)
|
|
271
|
+
return _with_next_steps("lint", _safe_call(run_lint, old_spec=old_spec, new_spec=new_spec, policy_file=policy_file))
|
|
84
272
|
|
|
85
273
|
|
|
86
274
|
@mcp.tool()
|
|
@@ -92,7 +280,7 @@ def delimit_diff(old_spec: str, new_spec: str) -> Dict[str, Any]:
|
|
|
92
280
|
new_spec: Path to the new OpenAPI spec file.
|
|
93
281
|
"""
|
|
94
282
|
from backends.gateway_core import run_diff
|
|
95
|
-
return _safe_call(run_diff, old_spec=old_spec, new_spec=new_spec)
|
|
283
|
+
return _with_next_steps("diff", _safe_call(run_diff, old_spec=old_spec, new_spec=new_spec))
|
|
96
284
|
|
|
97
285
|
|
|
98
286
|
@mcp.tool()
|
|
@@ -104,7 +292,7 @@ def delimit_policy(spec_files: List[str], policy_file: Optional[str] = None) ->
|
|
|
104
292
|
policy_file: Optional custom policy file path.
|
|
105
293
|
"""
|
|
106
294
|
from backends.gateway_core import run_policy
|
|
107
|
-
return _safe_call(run_policy, spec_files=spec_files, policy_file=policy_file)
|
|
295
|
+
return _with_next_steps("policy", _safe_call(run_policy, spec_files=spec_files, policy_file=policy_file))
|
|
108
296
|
|
|
109
297
|
|
|
110
298
|
@mcp.tool()
|
|
@@ -118,7 +306,7 @@ def delimit_ledger(ledger_path: str, api_name: Optional[str] = None, repository:
|
|
|
118
306
|
validate_chain: Validate hash chain integrity.
|
|
119
307
|
"""
|
|
120
308
|
from backends.gateway_core import query_ledger
|
|
121
|
-
return _safe_call(query_ledger, ledger_path=ledger_path, api_name=api_name, repository=repository, validate_chain=validate_chain)
|
|
309
|
+
return _with_next_steps("ledger", _safe_call(query_ledger, ledger_path=ledger_path, api_name=api_name, repository=repository, validate_chain=validate_chain))
|
|
122
310
|
|
|
123
311
|
|
|
124
312
|
@mcp.tool()
|
|
@@ -130,7 +318,7 @@ def delimit_impact(api_name: str, dependency_file: Optional[str] = None) -> Dict
|
|
|
130
318
|
dependency_file: Optional path to dependency manifest.
|
|
131
319
|
"""
|
|
132
320
|
from backends.gateway_core import run_impact
|
|
133
|
-
return _safe_call(run_impact, api_name=api_name, dependency_file=dependency_file)
|
|
321
|
+
return _with_next_steps("impact", _safe_call(run_impact, api_name=api_name, dependency_file=dependency_file))
|
|
134
322
|
|
|
135
323
|
|
|
136
324
|
@mcp.tool()
|
|
@@ -146,7 +334,7 @@ def delimit_semver(old_spec: str, new_spec: str, current_version: Optional[str]
|
|
|
146
334
|
current_version: Optional current version (e.g. "1.2.3") to compute next version.
|
|
147
335
|
"""
|
|
148
336
|
from backends.gateway_core import run_semver
|
|
149
|
-
return _safe_call(run_semver, old_spec=old_spec, new_spec=new_spec, current_version=current_version)
|
|
337
|
+
return _with_next_steps("semver", _safe_call(run_semver, old_spec=old_spec, new_spec=new_spec, current_version=current_version))
|
|
150
338
|
|
|
151
339
|
|
|
152
340
|
@mcp.tool()
|
|
@@ -171,7 +359,7 @@ def delimit_explain(
|
|
|
171
359
|
api_name: API/service name for context.
|
|
172
360
|
"""
|
|
173
361
|
from backends.gateway_core import run_explain
|
|
174
|
-
return _safe_call(run_explain, old_spec=old_spec, new_spec=new_spec, template=template, old_version=old_version, new_version=new_version, api_name=api_name)
|
|
362
|
+
return _with_next_steps("explain", _safe_call(run_explain, old_spec=old_spec, new_spec=new_spec, template=template, old_version=old_version, new_version=new_version, api_name=api_name))
|
|
175
363
|
|
|
176
364
|
|
|
177
365
|
@mcp.tool()
|
|
@@ -190,7 +378,7 @@ def delimit_zero_spec(
|
|
|
190
378
|
python_bin: Optional Python binary path (auto-detected if omitted).
|
|
191
379
|
"""
|
|
192
380
|
from backends.gateway_core import run_zero_spec
|
|
193
|
-
return _safe_call(run_zero_spec, project_dir=project_dir, python_bin=python_bin)
|
|
381
|
+
return _with_next_steps("zero_spec", _safe_call(run_zero_spec, project_dir=project_dir, python_bin=python_bin))
|
|
194
382
|
|
|
195
383
|
|
|
196
384
|
|
|
@@ -221,13 +409,13 @@ def delimit_init(
|
|
|
221
409
|
|
|
222
410
|
# Idempotency check
|
|
223
411
|
if policies_file.exists() and ledger_dir.exists() and events_file.exists():
|
|
224
|
-
return {
|
|
412
|
+
return _with_next_steps("init", {
|
|
225
413
|
"tool": "init",
|
|
226
414
|
"status": "already_initialized",
|
|
227
415
|
"project_path": str(root),
|
|
228
416
|
"preset": preset,
|
|
229
417
|
"message": f"Project already initialized at {delimit_dir}. No files overwritten.",
|
|
230
|
-
}
|
|
418
|
+
})
|
|
231
419
|
|
|
232
420
|
created = []
|
|
233
421
|
|
|
@@ -262,14 +450,14 @@ def delimit_init(
|
|
|
262
450
|
events_file.touch()
|
|
263
451
|
created.append(str(events_file))
|
|
264
452
|
|
|
265
|
-
return {
|
|
453
|
+
return _with_next_steps("init", {
|
|
266
454
|
"tool": "init",
|
|
267
455
|
"status": "initialized",
|
|
268
456
|
"project_path": str(root),
|
|
269
457
|
"preset": preset,
|
|
270
458
|
"created": created,
|
|
271
459
|
"message": f"Governance initialized with '{preset}' preset. {len(created)} items created.",
|
|
272
|
-
}
|
|
460
|
+
})
|
|
273
461
|
|
|
274
462
|
# ═══════════════════════════════════════════════════════════════════════
|
|
275
463
|
# TIER 2: PLATFORM — OS, Governance, Memory, Vault
|
|
@@ -293,7 +481,7 @@ def delimit_os_plan(operation: str, target: str, parameters: Optional[Dict[str,
|
|
|
293
481
|
if gate:
|
|
294
482
|
return gate
|
|
295
483
|
from backends.os_bridge import create_plan
|
|
296
|
-
return _safe_call(create_plan, operation=operation, target=target, parameters=parameters, require_approval=require_approval)
|
|
484
|
+
return _with_next_steps("os_plan", _safe_call(create_plan, operation=operation, target=target, parameters=parameters, require_approval=require_approval))
|
|
297
485
|
|
|
298
486
|
|
|
299
487
|
@mcp.tool()
|
|
@@ -304,7 +492,7 @@ def delimit_os_status() -> Dict[str, Any]:
|
|
|
304
492
|
if gate:
|
|
305
493
|
return gate
|
|
306
494
|
from backends.os_bridge import get_status
|
|
307
|
-
return _safe_call(get_status)
|
|
495
|
+
return _with_next_steps("os_status", _safe_call(get_status))
|
|
308
496
|
|
|
309
497
|
|
|
310
498
|
@mcp.tool()
|
|
@@ -319,7 +507,7 @@ def delimit_os_gates(plan_id: str) -> Dict[str, Any]:
|
|
|
319
507
|
if gate:
|
|
320
508
|
return gate
|
|
321
509
|
from backends.os_bridge import check_gates
|
|
322
|
-
return _safe_call(check_gates, plan_id=plan_id)
|
|
510
|
+
return _with_next_steps("os_gates", _safe_call(check_gates, plan_id=plan_id))
|
|
323
511
|
|
|
324
512
|
|
|
325
513
|
# ─── Governance ─────────────────────────────────────────────────────────
|
|
@@ -336,7 +524,7 @@ def delimit_gov_health(repo: str = ".") -> Dict[str, Any]:
|
|
|
336
524
|
if gate:
|
|
337
525
|
return gate
|
|
338
526
|
from backends.governance_bridge import health
|
|
339
|
-
return _safe_call(health, repo=repo)
|
|
527
|
+
return _with_next_steps("gov_health", _safe_call(health, repo=repo))
|
|
340
528
|
|
|
341
529
|
|
|
342
530
|
@mcp.tool()
|
|
@@ -351,7 +539,7 @@ def delimit_gov_status(repo: str = ".") -> Dict[str, Any]:
|
|
|
351
539
|
if gate:
|
|
352
540
|
return gate
|
|
353
541
|
from backends.governance_bridge import status
|
|
354
|
-
return _safe_call(status, repo=repo)
|
|
542
|
+
return _with_next_steps("gov_status", _safe_call(status, repo=repo))
|
|
355
543
|
|
|
356
544
|
|
|
357
545
|
@mcp.tool()
|
|
@@ -366,7 +554,7 @@ def delimit_gov_policy(repo: str = ".") -> Dict[str, Any]:
|
|
|
366
554
|
if gate:
|
|
367
555
|
return gate
|
|
368
556
|
from backends.governance_bridge import policy
|
|
369
|
-
return _safe_call(policy, repo=repo)
|
|
557
|
+
return _with_next_steps("gov_policy", _safe_call(policy, repo=repo))
|
|
370
558
|
|
|
371
559
|
|
|
372
560
|
@mcp.tool()
|
|
@@ -383,7 +571,7 @@ def delimit_gov_evaluate(action: str, context: Optional[Dict[str, Any]] = None,
|
|
|
383
571
|
if gate:
|
|
384
572
|
return gate
|
|
385
573
|
from backends.governance_bridge import evaluate_trigger
|
|
386
|
-
return _safe_call(evaluate_trigger, action=action, context=context, repo=repo)
|
|
574
|
+
return _with_next_steps("gov_evaluate", _safe_call(evaluate_trigger, action=action, context=context, repo=repo))
|
|
387
575
|
|
|
388
576
|
|
|
389
577
|
@mcp.tool()
|
|
@@ -401,7 +589,7 @@ def delimit_gov_new_task(title: str, scope: str, risk_level: str = "medium", rep
|
|
|
401
589
|
if gate:
|
|
402
590
|
return gate
|
|
403
591
|
from backends.governance_bridge import new_task
|
|
404
|
-
return _safe_call(new_task, title=title, scope=scope, risk_level=risk_level, repo=repo)
|
|
592
|
+
return _with_next_steps("gov_new_task", _safe_call(new_task, title=title, scope=scope, risk_level=risk_level, repo=repo))
|
|
405
593
|
|
|
406
594
|
|
|
407
595
|
@mcp.tool()
|
|
@@ -417,7 +605,7 @@ def delimit_gov_run(task_id: str, repo: str = ".") -> Dict[str, Any]:
|
|
|
417
605
|
if gate:
|
|
418
606
|
return gate
|
|
419
607
|
from backends.governance_bridge import run_task
|
|
420
|
-
return _safe_call(run_task, task_id=task_id, repo=repo)
|
|
608
|
+
return _with_next_steps("gov_run", _safe_call(run_task, task_id=task_id, repo=repo))
|
|
421
609
|
|
|
422
610
|
|
|
423
611
|
@mcp.tool()
|
|
@@ -433,7 +621,7 @@ def delimit_gov_verify(task_id: str, repo: str = ".") -> Dict[str, Any]:
|
|
|
433
621
|
if gate:
|
|
434
622
|
return gate
|
|
435
623
|
from backends.governance_bridge import verify
|
|
436
|
-
return _safe_call(verify, task_id=task_id, repo=repo)
|
|
624
|
+
return _with_next_steps("gov_verify", _safe_call(verify, task_id=task_id, repo=repo))
|
|
437
625
|
|
|
438
626
|
|
|
439
627
|
# ─── Memory ─────────────────────────────────────────────────────────────
|
|
@@ -451,7 +639,7 @@ def delimit_memory_search(query: str, limit: int = 10) -> Dict[str, Any]:
|
|
|
451
639
|
if gate:
|
|
452
640
|
return gate
|
|
453
641
|
from backends.memory_bridge import search
|
|
454
|
-
return _safe_call(search, query=query, limit=limit)
|
|
642
|
+
return _with_next_steps("memory_search", _safe_call(search, query=query, limit=limit))
|
|
455
643
|
|
|
456
644
|
|
|
457
645
|
@mcp.tool()
|
|
@@ -468,7 +656,7 @@ def delimit_memory_store(content: str, tags: Optional[List[str]] = None, context
|
|
|
468
656
|
if gate:
|
|
469
657
|
return gate
|
|
470
658
|
from backends.memory_bridge import store
|
|
471
|
-
return _safe_call(store, content=content, tags=tags, context=context)
|
|
659
|
+
return _with_next_steps("memory_store", _safe_call(store, content=content, tags=tags, context=context))
|
|
472
660
|
|
|
473
661
|
|
|
474
662
|
@mcp.tool()
|
|
@@ -483,7 +671,7 @@ def delimit_memory_recent(limit: int = 5) -> Dict[str, Any]:
|
|
|
483
671
|
if gate:
|
|
484
672
|
return gate
|
|
485
673
|
from backends.memory_bridge import get_recent
|
|
486
|
-
return _safe_call(get_recent, limit=limit)
|
|
674
|
+
return _with_next_steps("memory_recent", _safe_call(get_recent, limit=limit))
|
|
487
675
|
|
|
488
676
|
|
|
489
677
|
# ─── Vault ──────────────────────────────────────────────────────────────
|
|
@@ -500,7 +688,7 @@ def delimit_vault_search(query: str) -> Dict[str, Any]:
|
|
|
500
688
|
if gate:
|
|
501
689
|
return gate
|
|
502
690
|
from backends.vault_bridge import search
|
|
503
|
-
return _safe_call(search, query=query)
|
|
691
|
+
return _with_next_steps("vault_search", _safe_call(search, query=query))
|
|
504
692
|
|
|
505
693
|
|
|
506
694
|
@mcp.tool()
|
|
@@ -511,7 +699,7 @@ def delimit_vault_health() -> Dict[str, Any]:
|
|
|
511
699
|
if gate:
|
|
512
700
|
return gate
|
|
513
701
|
from backends.vault_bridge import health
|
|
514
|
-
return _safe_call(health)
|
|
702
|
+
return _with_next_steps("vault_health", _safe_call(health))
|
|
515
703
|
|
|
516
704
|
|
|
517
705
|
@mcp.tool()
|
|
@@ -522,7 +710,7 @@ def delimit_vault_snapshot() -> Dict[str, Any]:
|
|
|
522
710
|
if gate:
|
|
523
711
|
return gate
|
|
524
712
|
from backends.vault_bridge import snapshot
|
|
525
|
-
return _safe_call(snapshot)
|
|
713
|
+
return _with_next_steps("vault_snapshot", _safe_call(snapshot))
|
|
526
714
|
|
|
527
715
|
|
|
528
716
|
# ═══════════════════════════════════════════════════════════════════════
|
|
@@ -546,7 +734,7 @@ def delimit_deploy_plan(app: str, env: str, git_ref: Optional[str] = None) -> Di
|
|
|
546
734
|
if gate:
|
|
547
735
|
return gate
|
|
548
736
|
from backends.deploy_bridge import plan
|
|
549
|
-
return _safe_call(plan, app=app, env=env, git_ref=git_ref)
|
|
737
|
+
return _with_next_steps("deploy_plan", _safe_call(plan, app=app, env=env, git_ref=git_ref))
|
|
550
738
|
|
|
551
739
|
|
|
552
740
|
@mcp.tool()
|
|
@@ -562,7 +750,7 @@ def delimit_deploy_build(app: str, git_ref: Optional[str] = None) -> Dict[str, A
|
|
|
562
750
|
if gate:
|
|
563
751
|
return gate
|
|
564
752
|
from backends.deploy_bridge import build
|
|
565
|
-
return _safe_call(build, app=app, git_ref=git_ref)
|
|
753
|
+
return _with_next_steps("deploy_build", _safe_call(build, app=app, git_ref=git_ref))
|
|
566
754
|
|
|
567
755
|
|
|
568
756
|
@mcp.tool()
|
|
@@ -578,7 +766,7 @@ def delimit_deploy_publish(app: str, git_ref: Optional[str] = None) -> Dict[str,
|
|
|
578
766
|
if gate:
|
|
579
767
|
return gate
|
|
580
768
|
from backends.deploy_bridge import publish
|
|
581
|
-
return _safe_call(publish, app=app, git_ref=git_ref)
|
|
769
|
+
return _with_next_steps("deploy_publish", _safe_call(publish, app=app, git_ref=git_ref))
|
|
582
770
|
|
|
583
771
|
|
|
584
772
|
@_experimental_tool() # HIDDEN: stub/pass-through (LED-044)
|
|
@@ -612,7 +800,7 @@ def delimit_deploy_rollback(app: str, env: str, to_sha: Optional[str] = None) ->
|
|
|
612
800
|
if gate:
|
|
613
801
|
return gate
|
|
614
802
|
from backends.deploy_bridge import rollback
|
|
615
|
-
return _safe_call(rollback, app=app, env=env, to_sha=to_sha)
|
|
803
|
+
return _with_next_steps("deploy_rollback", _safe_call(rollback, app=app, env=env, to_sha=to_sha))
|
|
616
804
|
|
|
617
805
|
|
|
618
806
|
@mcp.tool()
|
|
@@ -628,65 +816,65 @@ def delimit_deploy_status(app: str, env: str) -> Dict[str, Any]:
|
|
|
628
816
|
if gate:
|
|
629
817
|
return gate
|
|
630
818
|
from backends.deploy_bridge import status
|
|
631
|
-
return _safe_call(status, app=app, env=env)
|
|
819
|
+
return _with_next_steps("deploy_status", _safe_call(status, app=app, env=env))
|
|
632
820
|
|
|
633
821
|
|
|
634
822
|
# ─── Intel ──────────────────────────────────────────────────────────────
|
|
635
823
|
|
|
636
|
-
@
|
|
637
|
-
def delimit_intel_dataset_register(name: str, schema: Dict[str, Any], description: Optional[str] = None) -> Dict[str, Any]:
|
|
638
|
-
"""Register a new dataset
|
|
824
|
+
@mcp.tool()
|
|
825
|
+
def delimit_intel_dataset_register(name: str, schema: Optional[Dict[str, Any]] = None, description: Optional[str] = None) -> Dict[str, Any]:
|
|
826
|
+
"""Register a new dataset in the file-based intel registry.
|
|
639
827
|
|
|
640
828
|
Args:
|
|
641
829
|
name: Dataset name.
|
|
642
|
-
schema: JSON schema for the dataset.
|
|
830
|
+
schema: Optional JSON schema for the dataset.
|
|
643
831
|
description: Human-readable description.
|
|
644
832
|
"""
|
|
645
|
-
from backends.
|
|
646
|
-
return _safe_call(
|
|
833
|
+
from backends.tools_data import intel_dataset_register
|
|
834
|
+
return _with_next_steps("intel_dataset_register", _safe_call(intel_dataset_register, name=name, schema=schema, description=description))
|
|
647
835
|
|
|
648
836
|
|
|
649
|
-
@
|
|
837
|
+
@mcp.tool()
|
|
650
838
|
def delimit_intel_dataset_list() -> Dict[str, Any]:
|
|
651
|
-
"""List registered datasets
|
|
652
|
-
from backends.
|
|
653
|
-
return _safe_call(
|
|
839
|
+
"""List all registered datasets from the intel registry."""
|
|
840
|
+
from backends.tools_data import intel_dataset_list
|
|
841
|
+
return _with_next_steps("intel_dataset_list", _safe_call(intel_dataset_list))
|
|
654
842
|
|
|
655
843
|
|
|
656
|
-
@
|
|
844
|
+
@mcp.tool()
|
|
657
845
|
def delimit_intel_dataset_freeze(dataset_id: str) -> Dict[str, Any]:
|
|
658
|
-
"""Mark dataset as immutable (
|
|
846
|
+
"""Mark a dataset as immutable (frozen). Prevents further modifications.
|
|
659
847
|
|
|
660
848
|
Args:
|
|
661
849
|
dataset_id: Dataset identifier.
|
|
662
850
|
"""
|
|
663
|
-
from backends.
|
|
664
|
-
return _safe_call(
|
|
851
|
+
from backends.tools_data import intel_dataset_freeze
|
|
852
|
+
return _with_next_steps("intel_dataset_freeze", _safe_call(intel_dataset_freeze, dataset_id=dataset_id))
|
|
665
853
|
|
|
666
854
|
|
|
667
|
-
@
|
|
855
|
+
@mcp.tool()
|
|
668
856
|
def delimit_intel_snapshot_ingest(data: Dict[str, Any], provenance: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
669
|
-
"""Store research snapshot with provenance
|
|
857
|
+
"""Store a research snapshot with provenance metadata in the local intel store.
|
|
670
858
|
|
|
671
859
|
Args:
|
|
672
|
-
data: Snapshot data.
|
|
673
|
-
provenance:
|
|
860
|
+
data: Snapshot data (any JSON-serializable dict).
|
|
861
|
+
provenance: Optional provenance metadata (source, author, etc.).
|
|
674
862
|
"""
|
|
675
|
-
from backends.
|
|
676
|
-
return _safe_call(
|
|
863
|
+
from backends.tools_data import intel_snapshot_ingest
|
|
864
|
+
return _with_next_steps("intel_snapshot_ingest", _safe_call(intel_snapshot_ingest, data=data, provenance=provenance))
|
|
677
865
|
|
|
678
866
|
|
|
679
|
-
@
|
|
680
|
-
def delimit_intel_query(dataset_id: str, query: str, parameters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
681
|
-
"""
|
|
867
|
+
@mcp.tool()
|
|
868
|
+
def delimit_intel_query(dataset_id: Optional[str] = None, query: str = "", parameters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
869
|
+
"""Search saved intel snapshots by keyword, date, or dataset.
|
|
682
870
|
|
|
683
871
|
Args:
|
|
684
|
-
dataset_id:
|
|
685
|
-
query:
|
|
686
|
-
parameters:
|
|
872
|
+
dataset_id: Optional dataset to filter by.
|
|
873
|
+
query: Keyword search string.
|
|
874
|
+
parameters: Optional params (date_from, date_to, limit).
|
|
687
875
|
"""
|
|
688
|
-
from backends.
|
|
689
|
-
return _safe_call(
|
|
876
|
+
from backends.tools_data import intel_query
|
|
877
|
+
return _with_next_steps("intel_query", _safe_call(intel_query, dataset_id=dataset_id, query=query, parameters=parameters))
|
|
690
878
|
|
|
691
879
|
|
|
692
880
|
# ─── Generate ───────────────────────────────────────────────────────────
|
|
@@ -702,7 +890,7 @@ def delimit_generate_template(template_type: str, name: str, framework: str = "n
|
|
|
702
890
|
features: Optional feature flags.
|
|
703
891
|
"""
|
|
704
892
|
from backends.generate_bridge import template
|
|
705
|
-
return _safe_call(template, template_type=template_type, name=name, framework=framework, features=features)
|
|
893
|
+
return _with_next_steps("generate_template", _safe_call(template, template_type=template_type, name=name, framework=framework, features=features))
|
|
706
894
|
|
|
707
895
|
|
|
708
896
|
@mcp.tool()
|
|
@@ -715,7 +903,7 @@ def delimit_generate_scaffold(project_type: str, name: str, packages: Optional[L
|
|
|
715
903
|
packages: Packages to include.
|
|
716
904
|
"""
|
|
717
905
|
from backends.generate_bridge import scaffold
|
|
718
|
-
return _safe_call(scaffold, project_type=project_type, name=name, packages=packages)
|
|
906
|
+
return _with_next_steps("generate_scaffold", _safe_call(scaffold, project_type=project_type, name=name, packages=packages))
|
|
719
907
|
|
|
720
908
|
|
|
721
909
|
# ─── Repo (RepoDoctor + ConfigSentry) ──────────────────────────────────
|
|
@@ -794,22 +982,26 @@ def delimit_security_scan(target: str = ".") -> Dict[str, Any]:
|
|
|
794
982
|
if gate:
|
|
795
983
|
return gate
|
|
796
984
|
from backends.repo_bridge import security_scan
|
|
797
|
-
return _safe_call(security_scan, target=target)
|
|
985
|
+
return _with_next_steps("security_scan", _safe_call(security_scan, target=target))
|
|
798
986
|
|
|
799
987
|
|
|
800
|
-
@
|
|
988
|
+
@mcp.tool()
|
|
801
989
|
def delimit_security_audit(target: str = ".") -> Dict[str, Any]:
|
|
802
|
-
"""Audit security
|
|
990
|
+
"""Audit security: dependency vulnerabilities, anti-patterns, and secret detection.
|
|
991
|
+
|
|
992
|
+
Scans for:
|
|
993
|
+
- Dependency vulnerabilities (pip-audit, npm audit)
|
|
994
|
+
- Hardcoded secrets (API keys, tokens, passwords)
|
|
995
|
+
- Dangerous patterns (eval, exec, SQL injection, XSS)
|
|
996
|
+
- .env files tracked in git
|
|
997
|
+
|
|
998
|
+
Optional: Set SNYK_TOKEN or install Trivy for enhanced scanning.
|
|
803
999
|
|
|
804
1000
|
Args:
|
|
805
|
-
target: Repository or file path.
|
|
1001
|
+
target: Repository or file path to audit.
|
|
806
1002
|
"""
|
|
807
|
-
from
|
|
808
|
-
|
|
809
|
-
if gate:
|
|
810
|
-
return gate
|
|
811
|
-
from backends.repo_bridge import security_audit
|
|
812
|
-
return _safe_call(security_audit, target=target)
|
|
1003
|
+
from backends.tools_infra import security_audit
|
|
1004
|
+
return _with_next_steps("security_audit", _safe_call(security_audit, target=target))
|
|
813
1005
|
|
|
814
1006
|
|
|
815
1007
|
# ─── Evidence ───────────────────────────────────────────────────────────
|
|
@@ -826,7 +1018,7 @@ def delimit_evidence_collect(target: str = ".") -> Dict[str, Any]:
|
|
|
826
1018
|
if gate:
|
|
827
1019
|
return gate
|
|
828
1020
|
from backends.repo_bridge import evidence_collect
|
|
829
|
-
return _safe_call(evidence_collect, target=target)
|
|
1021
|
+
return _with_next_steps("evidence_collect", _safe_call(evidence_collect, target=target))
|
|
830
1022
|
|
|
831
1023
|
|
|
832
1024
|
@mcp.tool()
|
|
@@ -842,7 +1034,7 @@ def delimit_evidence_verify(bundle_id: Optional[str] = None, bundle_path: Option
|
|
|
842
1034
|
if gate:
|
|
843
1035
|
return gate
|
|
844
1036
|
from backends.repo_bridge import evidence_verify
|
|
845
|
-
return _safe_call(evidence_verify, bundle_id=bundle_id, bundle_path=bundle_path)
|
|
1037
|
+
return _with_next_steps("evidence_verify", _safe_call(evidence_verify, bundle_id=bundle_id, bundle_path=bundle_path))
|
|
846
1038
|
|
|
847
1039
|
|
|
848
1040
|
# ═══════════════════════════════════════════════════════════════════════
|
|
@@ -852,18 +1044,22 @@ def delimit_evidence_verify(bundle_id: Optional[str] = None, bundle_path: Option
|
|
|
852
1044
|
|
|
853
1045
|
# ─── ReleasePilot (Governance Primitive) ────────────────────────────────
|
|
854
1046
|
|
|
855
|
-
@
|
|
856
|
-
def delimit_release_plan(environment: str, version: str, repository: str, services: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
857
|
-
"""
|
|
1047
|
+
@mcp.tool()
|
|
1048
|
+
def delimit_release_plan(environment: str = "production", version: str = "", repository: str = ".", services: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
1049
|
+
"""Generate a release plan from git history.
|
|
1050
|
+
|
|
1051
|
+
Reads git log since last tag, counts commits and changed files,
|
|
1052
|
+
suggests a semver version, and generates a release checklist.
|
|
1053
|
+
Saves plan to ~/.delimit/deploys/ for tracking.
|
|
858
1054
|
|
|
859
1055
|
Args:
|
|
860
1056
|
environment: Target environment (staging/production).
|
|
861
|
-
version: Release version.
|
|
862
|
-
repository: Repository
|
|
1057
|
+
version: Release version (auto-detected if empty).
|
|
1058
|
+
repository: Repository path (default: current directory).
|
|
863
1059
|
services: Optional service list.
|
|
864
1060
|
"""
|
|
865
|
-
from backends.
|
|
866
|
-
return _safe_call(release_plan, environment=environment, version=version, repository=repository, services=services)
|
|
1061
|
+
from backends.tools_infra import release_plan
|
|
1062
|
+
return _with_next_steps("release_plan", _safe_call(release_plan, environment=environment, version=version, repository=repository, services=services))
|
|
867
1063
|
|
|
868
1064
|
|
|
869
1065
|
@_experimental_tool() # HIDDEN: stub/pass-through (LED-044)
|
|
@@ -878,15 +1074,18 @@ def delimit_release_validate(environment: str, version: str) -> Dict[str, Any]:
|
|
|
878
1074
|
return _safe_call(release_validate, environment=environment, version=version)
|
|
879
1075
|
|
|
880
1076
|
|
|
881
|
-
@
|
|
882
|
-
def delimit_release_status(environment: str) -> Dict[str, Any]:
|
|
883
|
-
"""Check
|
|
1077
|
+
@mcp.tool()
|
|
1078
|
+
def delimit_release_status(environment: str = "production") -> Dict[str, Any]:
|
|
1079
|
+
"""Check release/deploy status from file-based tracker and git state.
|
|
1080
|
+
|
|
1081
|
+
Shows latest deploy plan, current git tag, how many commits HEAD
|
|
1082
|
+
is ahead of the tag, and recent deploy history.
|
|
884
1083
|
|
|
885
1084
|
Args:
|
|
886
|
-
environment: Target environment.
|
|
1085
|
+
environment: Target environment (staging/production).
|
|
887
1086
|
"""
|
|
888
|
-
from backends.
|
|
889
|
-
return _safe_call(release_status, environment=environment)
|
|
1087
|
+
from backends.tools_infra import release_status
|
|
1088
|
+
return _with_next_steps("release_status", _safe_call(release_status, environment=environment))
|
|
890
1089
|
|
|
891
1090
|
|
|
892
1091
|
@_experimental_tool() # HIDDEN: stub/pass-through (LED-044)
|
|
@@ -918,98 +1117,112 @@ def delimit_release_history(environment: str, limit: int = 10) -> Dict[str, Any]
|
|
|
918
1117
|
|
|
919
1118
|
@mcp.tool()
|
|
920
1119
|
def delimit_cost_analyze(target: str = ".") -> Dict[str, Any]:
|
|
921
|
-
"""Analyze
|
|
1120
|
+
"""Analyze project costs by scanning Dockerfiles, dependencies, and cloud configs.
|
|
922
1121
|
|
|
923
1122
|
Args:
|
|
924
|
-
target: Project or infrastructure path.
|
|
1123
|
+
target: Project or infrastructure path to analyze.
|
|
925
1124
|
"""
|
|
926
|
-
from backends.
|
|
927
|
-
return _safe_call(cost_analyze, target=target)
|
|
1125
|
+
from backends.tools_data import cost_analyze
|
|
1126
|
+
return _with_next_steps("cost_analyze", _safe_call(cost_analyze, target=target))
|
|
928
1127
|
|
|
929
1128
|
|
|
930
|
-
@
|
|
1129
|
+
@mcp.tool()
|
|
931
1130
|
def delimit_cost_optimize(target: str = ".") -> Dict[str, Any]:
|
|
932
|
-
"""
|
|
1131
|
+
"""Find cost optimization opportunities: unused deps, oversized images, uncompressed assets.
|
|
933
1132
|
|
|
934
1133
|
Args:
|
|
935
|
-
target: Project or infrastructure path.
|
|
1134
|
+
target: Project or infrastructure path to analyze.
|
|
936
1135
|
"""
|
|
937
|
-
from backends.
|
|
938
|
-
return _safe_call(cost_optimize, target=target)
|
|
1136
|
+
from backends.tools_data import cost_optimize
|
|
1137
|
+
return _with_next_steps("cost_optimize", _safe_call(cost_optimize, target=target))
|
|
939
1138
|
|
|
940
1139
|
|
|
941
|
-
@
|
|
942
|
-
def delimit_cost_alert(action: str = "list"
|
|
943
|
-
|
|
1140
|
+
@mcp.tool()
|
|
1141
|
+
def delimit_cost_alert(action: str = "list", name: Optional[str] = None,
|
|
1142
|
+
threshold: Optional[float] = None, alert_id: Optional[str] = None) -> Dict[str, Any]:
|
|
1143
|
+
"""Manage cost alerts (file-based). CRUD operations on spending thresholds.
|
|
944
1144
|
|
|
945
1145
|
Args:
|
|
946
|
-
action: Action (list/create/delete/
|
|
1146
|
+
action: Action (list/create/delete/toggle).
|
|
1147
|
+
name: Alert name (required for create).
|
|
1148
|
+
threshold: Cost threshold in USD (required for create).
|
|
1149
|
+
alert_id: Alert ID (required for delete/toggle).
|
|
947
1150
|
"""
|
|
948
|
-
from backends.
|
|
949
|
-
return _safe_call(cost_alert, action=action)
|
|
1151
|
+
from backends.tools_data import cost_alert
|
|
1152
|
+
return _with_next_steps("cost_alert", _safe_call(cost_alert, action=action, name=name, threshold=threshold, alert_id=alert_id))
|
|
950
1153
|
|
|
951
1154
|
|
|
952
1155
|
# ─── DataSteward (Governance Primitive) ────────────────────────────────
|
|
953
1156
|
|
|
954
1157
|
@mcp.tool()
|
|
955
1158
|
def delimit_data_validate(target: str = ".") -> Dict[str, Any]:
|
|
956
|
-
"""Validate data integrity
|
|
1159
|
+
"""Validate data files: JSON parse, CSV structure, SQLite integrity check.
|
|
957
1160
|
|
|
958
1161
|
Args:
|
|
959
|
-
target:
|
|
1162
|
+
target: Directory or file path containing data files.
|
|
960
1163
|
"""
|
|
961
|
-
from backends.
|
|
962
|
-
return _safe_call(data_validate, target=target)
|
|
1164
|
+
from backends.tools_data import data_validate
|
|
1165
|
+
return _with_next_steps("data_validate", _safe_call(data_validate, target=target))
|
|
963
1166
|
|
|
964
1167
|
|
|
965
|
-
@
|
|
1168
|
+
@mcp.tool()
|
|
966
1169
|
def delimit_data_migrate(target: str = ".") -> Dict[str, Any]:
|
|
967
|
-
"""
|
|
1170
|
+
"""Check for migration files (alembic, Django, Prisma, Knex) and report status.
|
|
968
1171
|
|
|
969
1172
|
Args:
|
|
970
|
-
target:
|
|
1173
|
+
target: Project path to scan for migration files.
|
|
971
1174
|
"""
|
|
972
|
-
from backends.
|
|
973
|
-
return _safe_call(data_migrate, target=target)
|
|
1175
|
+
from backends.tools_data import data_migrate
|
|
1176
|
+
return _with_next_steps("data_migrate", _safe_call(data_migrate, target=target))
|
|
974
1177
|
|
|
975
1178
|
|
|
976
|
-
@
|
|
1179
|
+
@mcp.tool()
|
|
977
1180
|
def delimit_data_backup(target: str = ".") -> Dict[str, Any]:
|
|
978
|
-
"""
|
|
1181
|
+
"""Back up SQLite and JSON data files to ~/.delimit/backups/ with timestamp.
|
|
979
1182
|
|
|
980
1183
|
Args:
|
|
981
|
-
target:
|
|
1184
|
+
target: Directory or file to back up.
|
|
982
1185
|
"""
|
|
983
|
-
from backends.
|
|
984
|
-
return _safe_call(data_backup, target=target)
|
|
1186
|
+
from backends.tools_data import data_backup
|
|
1187
|
+
return _with_next_steps("data_backup", _safe_call(data_backup, target=target))
|
|
985
1188
|
|
|
986
1189
|
|
|
987
1190
|
# ─── ObservabilityOps (Internal OS) ────────────────────────────────────
|
|
988
1191
|
|
|
989
|
-
@
|
|
990
|
-
def delimit_obs_metrics(query: str, time_range: str = "1h", source: Optional[str] = None) -> Dict[str, Any]:
|
|
991
|
-
"""Query
|
|
1192
|
+
@mcp.tool()
|
|
1193
|
+
def delimit_obs_metrics(query: str = "system", time_range: str = "1h", source: Optional[str] = None) -> Dict[str, Any]:
|
|
1194
|
+
"""Query live system metrics (CPU, memory, disk I/O, network).
|
|
1195
|
+
|
|
1196
|
+
Query types: cpu, memory, disk, io, network, system (default), all.
|
|
1197
|
+
Reads directly from /proc for real-time data.
|
|
1198
|
+
|
|
1199
|
+
Optional: Set PROMETHEUS_URL for remote metrics.
|
|
992
1200
|
|
|
993
1201
|
Args:
|
|
994
|
-
query: Metrics query.
|
|
1202
|
+
query: Metrics query type (cpu|memory|disk|io|network|system|all).
|
|
995
1203
|
time_range: Time range (e.g. "1h", "24h", "7d").
|
|
996
|
-
source: Optional metrics source.
|
|
1204
|
+
source: Optional metrics source (prometheus, local).
|
|
997
1205
|
"""
|
|
998
|
-
from backends.
|
|
999
|
-
return _safe_call(obs_metrics, query=query, time_range=time_range, source=source)
|
|
1206
|
+
from backends.tools_infra import obs_metrics
|
|
1207
|
+
return _with_next_steps("obs_metrics", _safe_call(obs_metrics, query=query, time_range=time_range, source=source))
|
|
1000
1208
|
|
|
1001
1209
|
|
|
1002
|
-
@
|
|
1210
|
+
@mcp.tool()
|
|
1003
1211
|
def delimit_obs_logs(query: str, time_range: str = "1h", source: Optional[str] = None) -> Dict[str, Any]:
|
|
1004
|
-
"""
|
|
1212
|
+
"""Search system and application logs.
|
|
1213
|
+
|
|
1214
|
+
Searches journalctl, /var/log/*, and application log directories.
|
|
1215
|
+
Returns matching log lines with source attribution.
|
|
1216
|
+
|
|
1217
|
+
Optional: Set ELASTICSEARCH_URL or LOKI_URL for centralized log search.
|
|
1005
1218
|
|
|
1006
1219
|
Args:
|
|
1007
|
-
query: Log search query.
|
|
1008
|
-
time_range: Time range.
|
|
1009
|
-
source:
|
|
1220
|
+
query: Log search query string.
|
|
1221
|
+
time_range: Time range (5m, 15m, 1h, 6h, 24h, 7d).
|
|
1222
|
+
source: Log source path or integration name (journalctl, elasticsearch).
|
|
1010
1223
|
"""
|
|
1011
|
-
from backends.
|
|
1012
|
-
return _safe_call(obs_logs, query=query, time_range=time_range, source=source)
|
|
1224
|
+
from backends.tools_infra import obs_logs
|
|
1225
|
+
return _with_next_steps("obs_logs", _safe_call(obs_logs, query=query, time_range=time_range, source=source))
|
|
1013
1226
|
|
|
1014
1227
|
|
|
1015
1228
|
@_experimental_tool() # HIDDEN: stub/pass-through (LED-044)
|
|
@@ -1025,107 +1238,119 @@ def delimit_obs_alerts(action: str, alert_rule: Optional[Dict[str, Any]] = None,
|
|
|
1025
1238
|
return _safe_call(obs_alerts, action=action, alert_rule=alert_rule, rule_id=rule_id)
|
|
1026
1239
|
|
|
1027
1240
|
|
|
1028
|
-
@
|
|
1241
|
+
@mcp.tool()
|
|
1029
1242
|
def delimit_obs_status() -> Dict[str, Any]:
|
|
1030
|
-
"""
|
|
1031
|
-
|
|
1032
|
-
|
|
1243
|
+
"""System health check: disk space, memory, running services, uptime.
|
|
1244
|
+
|
|
1245
|
+
Checks disk usage, memory, process count, load average, and probes
|
|
1246
|
+
common service ports (Node, PostgreSQL, Redis, Nginx, etc.).
|
|
1247
|
+
No external integration needed.
|
|
1248
|
+
"""
|
|
1249
|
+
from backends.tools_infra import obs_status
|
|
1250
|
+
return _with_next_steps("obs_status", _safe_call(obs_status))
|
|
1033
1251
|
|
|
1034
1252
|
|
|
1035
1253
|
# ─── DesignSystem (UI Tooling) ──────────────────────────────────────────
|
|
1036
1254
|
|
|
1037
|
-
@
|
|
1038
|
-
def delimit_design_extract_tokens(figma_file_key: str, token_types: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
1039
|
-
"""Extract design tokens from
|
|
1255
|
+
@mcp.tool()
|
|
1256
|
+
def delimit_design_extract_tokens(figma_file_key: Optional[str] = None, token_types: Optional[List[str]] = None, project_path: Optional[str] = None) -> Dict[str, Any]:
|
|
1257
|
+
"""Extract design tokens from project CSS/SCSS/Tailwind config (or Figma if FIGMA_TOKEN set).
|
|
1040
1258
|
|
|
1041
1259
|
Args:
|
|
1042
|
-
figma_file_key: Figma file key.
|
|
1043
|
-
token_types: Token types to extract (colors, typography, spacing,
|
|
1260
|
+
figma_file_key: Optional Figma file key (uses Figma API if FIGMA_TOKEN env var is set).
|
|
1261
|
+
token_types: Token types to extract (colors, typography, spacing, breakpoints).
|
|
1262
|
+
project_path: Project directory to scan. Defaults to cwd.
|
|
1044
1263
|
"""
|
|
1045
1264
|
from backends.ui_bridge import design_extract_tokens
|
|
1046
|
-
return _safe_call(design_extract_tokens, figma_file_key=figma_file_key, token_types=token_types)
|
|
1265
|
+
return _with_next_steps("design_extract_tokens", _safe_call(design_extract_tokens, figma_file_key=figma_file_key, token_types=token_types, project_path=project_path))
|
|
1047
1266
|
|
|
1048
1267
|
|
|
1049
|
-
@
|
|
1050
|
-
def delimit_design_generate_component(component_name: str, figma_node_id: Optional[str] = None, output_path: Optional[str] = None) -> Dict[str, Any]:
|
|
1051
|
-
"""Generate Next.js component
|
|
1268
|
+
@mcp.tool()
|
|
1269
|
+
def delimit_design_generate_component(component_name: str, figma_node_id: Optional[str] = None, output_path: Optional[str] = None, project_path: Optional[str] = None) -> Dict[str, Any]:
|
|
1270
|
+
"""Generate a React/Next.js component skeleton with props interface and Tailwind support.
|
|
1052
1271
|
|
|
1053
1272
|
Args:
|
|
1054
|
-
component_name: Component name.
|
|
1055
|
-
figma_node_id: Figma node ID.
|
|
1056
|
-
output_path: Output file path.
|
|
1273
|
+
component_name: Component name (PascalCase).
|
|
1274
|
+
figma_node_id: Optional Figma node ID (reserved for future use).
|
|
1275
|
+
output_path: Output file path. Defaults to components/<Name>/<Name>.tsx.
|
|
1276
|
+
project_path: Project root for Tailwind detection.
|
|
1057
1277
|
"""
|
|
1058
1278
|
from backends.ui_bridge import design_generate_component
|
|
1059
|
-
return _safe_call(design_generate_component, component_name=component_name, figma_node_id=figma_node_id, output_path=output_path)
|
|
1279
|
+
return _with_next_steps("design_generate_component", _safe_call(design_generate_component, component_name=component_name, figma_node_id=figma_node_id, output_path=output_path, project_path=project_path))
|
|
1060
1280
|
|
|
1061
1281
|
|
|
1062
|
-
@
|
|
1063
|
-
def delimit_design_generate_tailwind(figma_file_key: str, output_path: Optional[str] = None) -> Dict[str, Any]:
|
|
1064
|
-
"""
|
|
1282
|
+
@mcp.tool()
|
|
1283
|
+
def delimit_design_generate_tailwind(figma_file_key: Optional[str] = None, output_path: Optional[str] = None, project_path: Optional[str] = None) -> Dict[str, Any]:
|
|
1284
|
+
"""Read existing tailwind.config or generate one from detected CSS tokens.
|
|
1065
1285
|
|
|
1066
1286
|
Args:
|
|
1067
|
-
figma_file_key: Figma file key.
|
|
1068
|
-
output_path: Output file path.
|
|
1287
|
+
figma_file_key: Optional Figma file key (reserved for future use).
|
|
1288
|
+
output_path: Output file path for generated config.
|
|
1289
|
+
project_path: Project root to scan for existing config or CSS tokens.
|
|
1069
1290
|
"""
|
|
1070
1291
|
from backends.ui_bridge import design_generate_tailwind
|
|
1071
|
-
return _safe_call(design_generate_tailwind, figma_file_key=figma_file_key, output_path=output_path)
|
|
1292
|
+
return _with_next_steps("design_generate_tailwind", _safe_call(design_generate_tailwind, figma_file_key=figma_file_key, output_path=output_path, project_path=project_path))
|
|
1072
1293
|
|
|
1073
1294
|
|
|
1074
|
-
@
|
|
1295
|
+
@mcp.tool()
|
|
1075
1296
|
def delimit_design_validate_responsive(project_path: str, check_types: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
1076
|
-
"""Validate responsive design patterns
|
|
1297
|
+
"""Validate responsive design patterns via static CSS analysis.
|
|
1298
|
+
|
|
1299
|
+
Scans for media queries, viewport meta, mobile-first patterns, fixed widths.
|
|
1077
1300
|
|
|
1078
1301
|
Args:
|
|
1079
1302
|
project_path: Project path to validate.
|
|
1080
1303
|
check_types: Check types (breakpoints, containers, fluid-type, etc.).
|
|
1081
1304
|
"""
|
|
1082
1305
|
from backends.ui_bridge import design_validate_responsive
|
|
1083
|
-
return _safe_call(design_validate_responsive, project_path=project_path, check_types=check_types)
|
|
1306
|
+
return _with_next_steps("design_validate_responsive", _safe_call(design_validate_responsive, project_path=project_path, check_types=check_types))
|
|
1084
1307
|
|
|
1085
1308
|
|
|
1086
|
-
@
|
|
1309
|
+
@mcp.tool()
|
|
1087
1310
|
def delimit_design_component_library(project_path: str, output_format: str = "json") -> Dict[str, Any]:
|
|
1088
|
-
"""
|
|
1311
|
+
"""Scan for React/Vue/Svelte components and generate a component catalog.
|
|
1089
1312
|
|
|
1090
1313
|
Args:
|
|
1091
|
-
project_path: Project path.
|
|
1314
|
+
project_path: Project path to scan.
|
|
1092
1315
|
output_format: Output format (json/markdown).
|
|
1093
1316
|
"""
|
|
1094
1317
|
from backends.ui_bridge import design_component_library
|
|
1095
|
-
return _safe_call(design_component_library, project_path=project_path, output_format=output_format)
|
|
1318
|
+
return _with_next_steps("design_component_library", _safe_call(design_component_library, project_path=project_path, output_format=output_format))
|
|
1096
1319
|
|
|
1097
1320
|
|
|
1098
|
-
# ───
|
|
1321
|
+
# ─── Story (Component Stories + Visual/A11y Testing) ────────────────────
|
|
1099
1322
|
|
|
1100
|
-
@
|
|
1323
|
+
@mcp.tool()
|
|
1101
1324
|
def delimit_story_generate(component_path: str, story_name: Optional[str] = None, variants: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
1102
|
-
"""Generate
|
|
1325
|
+
"""Generate a .stories.tsx file for a component (no Storybook install required).
|
|
1103
1326
|
|
|
1104
1327
|
Args:
|
|
1105
1328
|
component_path: Path to the component file.
|
|
1106
|
-
story_name: Custom story name.
|
|
1107
|
-
variants: Variants to generate.
|
|
1329
|
+
story_name: Custom story name. Defaults to component name.
|
|
1330
|
+
variants: Variants to generate. Defaults to [Default, WithChildren].
|
|
1108
1331
|
"""
|
|
1109
1332
|
from backends.ui_bridge import story_generate
|
|
1110
|
-
return _safe_call(story_generate, component_path=component_path, story_name=story_name, variants=variants)
|
|
1333
|
+
return _with_next_steps("story_generate", _safe_call(story_generate, component_path=component_path, story_name=story_name, variants=variants))
|
|
1111
1334
|
|
|
1112
1335
|
|
|
1113
|
-
@
|
|
1336
|
+
@mcp.tool()
|
|
1114
1337
|
def delimit_story_visual_test(url: str, project_path: Optional[str] = None, threshold: float = 0.05) -> Dict[str, Any]:
|
|
1115
|
-
"""Run visual regression test with Playwright
|
|
1338
|
+
"""Run visual regression test -- screenshot with Playwright and compare to baseline.
|
|
1339
|
+
|
|
1340
|
+
Falls back to guidance if Playwright is not installed.
|
|
1116
1341
|
|
|
1117
1342
|
Args:
|
|
1118
|
-
url: URL to
|
|
1343
|
+
url: URL to screenshot.
|
|
1119
1344
|
project_path: Project path for baseline storage.
|
|
1120
1345
|
threshold: Diff threshold (0.0-1.0).
|
|
1121
1346
|
"""
|
|
1122
1347
|
from backends.ui_bridge import story_visual_test
|
|
1123
|
-
return _safe_call(story_visual_test, url=url, project_path=project_path, threshold=threshold)
|
|
1348
|
+
return _with_next_steps("story_visual_test", _safe_call(story_visual_test, url=url, project_path=project_path, threshold=threshold))
|
|
1124
1349
|
|
|
1125
1350
|
|
|
1126
|
-
@_experimental_tool() # HIDDEN:
|
|
1351
|
+
@_experimental_tool() # HIDDEN: requires Storybook installed (LED-044)
|
|
1127
1352
|
def delimit_story_build(project_path: str, output_dir: Optional[str] = None) -> Dict[str, Any]:
|
|
1128
|
-
"""Build Storybook static site (
|
|
1353
|
+
"""Build Storybook static site (requires Storybook installed).
|
|
1129
1354
|
|
|
1130
1355
|
Args:
|
|
1131
1356
|
project_path: Project path.
|
|
@@ -1135,35 +1360,36 @@ def delimit_story_build(project_path: str, output_dir: Optional[str] = None) ->
|
|
|
1135
1360
|
return _safe_call(story_build, project_path=project_path, output_dir=output_dir)
|
|
1136
1361
|
|
|
1137
1362
|
|
|
1138
|
-
@
|
|
1363
|
+
@mcp.tool()
|
|
1139
1364
|
def delimit_story_accessibility(project_path: str, standards: str = "WCAG2AA") -> Dict[str, Any]:
|
|
1140
|
-
"""Run WCAG accessibility
|
|
1365
|
+
"""Run WCAG accessibility checks by scanning HTML/JSX/TSX for common issues.
|
|
1366
|
+
|
|
1367
|
+
Checks: missing alt, missing labels, empty buttons, heading order, aria-hidden on focusable.
|
|
1141
1368
|
|
|
1142
1369
|
Args:
|
|
1143
|
-
project_path: Project path.
|
|
1370
|
+
project_path: Project path to scan.
|
|
1144
1371
|
standards: Accessibility standard (WCAG2A/WCAG2AA/WCAG2AAA).
|
|
1145
1372
|
"""
|
|
1146
1373
|
from backends.ui_bridge import story_accessibility_test
|
|
1147
|
-
return _safe_call(story_accessibility_test, project_path=project_path, standards=standards)
|
|
1374
|
+
return _with_next_steps("story_accessibility", _safe_call(story_accessibility_test, project_path=project_path, standards=standards))
|
|
1148
1375
|
|
|
1149
1376
|
|
|
1150
|
-
# ─── TestSmith (Testing)
|
|
1377
|
+
# ─── TestSmith (Testing — Real implementations) ──────────────────────
|
|
1151
1378
|
|
|
1152
|
-
@
|
|
1379
|
+
@mcp.tool()
|
|
1153
1380
|
def delimit_test_generate(project_path: str, source_files: Optional[List[str]] = None, framework: str = "jest") -> Dict[str, Any]:
|
|
1154
|
-
"""Generate
|
|
1381
|
+
"""Generate test skeletons for source code.
|
|
1382
|
+
|
|
1383
|
+
Scans source files using AST parsing (Python) or regex (JS/TS),
|
|
1384
|
+
extracts public function signatures, and generates test file skeletons.
|
|
1155
1385
|
|
|
1156
1386
|
Args:
|
|
1157
1387
|
project_path: Project path.
|
|
1158
1388
|
source_files: Specific files to generate tests for.
|
|
1159
1389
|
framework: Test framework (jest/pytest/vitest).
|
|
1160
1390
|
"""
|
|
1161
|
-
from ai.license import require_premium
|
|
1162
|
-
gate = require_premium("test_generate")
|
|
1163
|
-
if gate:
|
|
1164
|
-
return gate
|
|
1165
1391
|
from backends.ui_bridge import test_generate
|
|
1166
|
-
return _safe_call(test_generate, project_path=project_path, source_files=source_files, framework=framework)
|
|
1392
|
+
return _with_next_steps("test_generate", _safe_call(test_generate, project_path=project_path, source_files=source_files, framework=framework))
|
|
1167
1393
|
|
|
1168
1394
|
|
|
1169
1395
|
@_experimental_tool() # HIDDEN: stub/pass-through (LED-044)
|
|
@@ -1184,42 +1410,47 @@ def delimit_test_coverage(project_path: str, threshold: int = 80) -> Dict[str, A
|
|
|
1184
1410
|
|
|
1185
1411
|
@mcp.tool()
|
|
1186
1412
|
def delimit_test_smoke(project_path: str, test_suite: Optional[str] = None) -> Dict[str, Any]:
|
|
1187
|
-
"""Run smoke tests
|
|
1413
|
+
"""Run smoke tests for a project.
|
|
1414
|
+
|
|
1415
|
+
Detects the test framework (pytest/jest/vitest/mocha) from project config,
|
|
1416
|
+
runs the test suite, and parses pass/fail/error counts.
|
|
1188
1417
|
|
|
1189
1418
|
Args:
|
|
1190
1419
|
project_path: Project path.
|
|
1191
1420
|
test_suite: Specific test suite to run.
|
|
1192
1421
|
"""
|
|
1193
|
-
from ai.license import require_premium
|
|
1194
|
-
gate = require_premium("test_smoke")
|
|
1195
|
-
if gate:
|
|
1196
|
-
return gate
|
|
1197
1422
|
from backends.ui_bridge import test_smoke
|
|
1198
|
-
return _safe_call(test_smoke, project_path=project_path, test_suite=test_suite)
|
|
1423
|
+
return _with_next_steps("test_smoke", _safe_call(test_smoke, project_path=project_path, test_suite=test_suite))
|
|
1199
1424
|
|
|
1200
1425
|
|
|
1201
|
-
# ─── Docs
|
|
1426
|
+
# ─── Docs (Real implementations) ─────────────────────────────────────
|
|
1202
1427
|
|
|
1203
|
-
@
|
|
1428
|
+
@mcp.tool()
|
|
1204
1429
|
def delimit_docs_generate(target: str = ".") -> Dict[str, Any]:
|
|
1205
|
-
"""Generate documentation for a project
|
|
1430
|
+
"""Generate API reference documentation for a project.
|
|
1431
|
+
|
|
1432
|
+
Scans Python files for docstrings and JS/TS files for JSDoc comments.
|
|
1433
|
+
Produces a markdown API reference organized by source file.
|
|
1206
1434
|
|
|
1207
1435
|
Args:
|
|
1208
1436
|
target: Project path.
|
|
1209
1437
|
"""
|
|
1210
1438
|
from backends.ui_bridge import docs_generate
|
|
1211
|
-
return _safe_call(docs_generate, target=target)
|
|
1439
|
+
return _with_next_steps("docs_generate", _safe_call(docs_generate, target=target))
|
|
1212
1440
|
|
|
1213
1441
|
|
|
1214
|
-
@
|
|
1442
|
+
@mcp.tool()
|
|
1215
1443
|
def delimit_docs_validate(target: str = ".") -> Dict[str, Any]:
|
|
1216
|
-
"""Validate documentation quality and completeness
|
|
1444
|
+
"""Validate documentation quality and completeness.
|
|
1445
|
+
|
|
1446
|
+
Checks README existence, docstring coverage on public functions,
|
|
1447
|
+
and broken internal links in markdown files.
|
|
1217
1448
|
|
|
1218
1449
|
Args:
|
|
1219
1450
|
target: Project path.
|
|
1220
1451
|
"""
|
|
1221
1452
|
from backends.ui_bridge import docs_validate
|
|
1222
|
-
return _safe_call(docs_validate, target=target)
|
|
1453
|
+
return _with_next_steps("docs_validate", _safe_call(docs_validate, target=target))
|
|
1223
1454
|
|
|
1224
1455
|
|
|
1225
1456
|
|
|
@@ -1271,10 +1502,10 @@ async def delimit_sensor_github_issue(
|
|
|
1271
1502
|
timeout=30,
|
|
1272
1503
|
)
|
|
1273
1504
|
if comments_proc.returncode != 0:
|
|
1274
|
-
return {
|
|
1505
|
+
return _with_next_steps("sensor_github_issue", {
|
|
1275
1506
|
"error": f"gh api comments failed: {comments_proc.stderr.strip()}",
|
|
1276
1507
|
"has_new_activity": False,
|
|
1277
|
-
}
|
|
1508
|
+
})
|
|
1278
1509
|
|
|
1279
1510
|
all_comments = json.loads(comments_proc.stdout) if comments_proc.stdout.strip() else []
|
|
1280
1511
|
|
|
@@ -1294,10 +1525,10 @@ async def delimit_sensor_github_issue(
|
|
|
1294
1525
|
timeout=30,
|
|
1295
1526
|
)
|
|
1296
1527
|
if issue_proc.returncode != 0:
|
|
1297
|
-
return {
|
|
1528
|
+
return _with_next_steps("sensor_github_issue", {
|
|
1298
1529
|
"error": f"gh api issue failed: {issue_proc.stderr.strip()}",
|
|
1299
1530
|
"has_new_activity": False,
|
|
1300
|
-
}
|
|
1531
|
+
})
|
|
1301
1532
|
|
|
1302
1533
|
issue_info = json.loads(issue_proc.stdout) if issue_proc.stdout.strip() else {}
|
|
1303
1534
|
issue_state = issue_info.get("state", "unknown")
|
|
@@ -1321,7 +1552,7 @@ async def delimit_sensor_github_issue(
|
|
|
1321
1552
|
latest_comment_id = max((c["id"] for c in all_comments), default=since_comment_id)
|
|
1322
1553
|
|
|
1323
1554
|
repo_key = repo.replace("/", "_")
|
|
1324
|
-
return {
|
|
1555
|
+
return _with_next_steps("sensor_github_issue", {
|
|
1325
1556
|
"signal": {
|
|
1326
1557
|
"id": f"sensor:github_issue:{repo_key}:{issue_number}",
|
|
1327
1558
|
"venture": "delimit",
|
|
@@ -1335,15 +1566,15 @@ async def delimit_sensor_github_issue(
|
|
|
1335
1566
|
"latest_comment_id": latest_comment_id,
|
|
1336
1567
|
"total_comments": len(all_comments),
|
|
1337
1568
|
"has_new_activity": len(new_comments) > 0,
|
|
1338
|
-
}
|
|
1569
|
+
})
|
|
1339
1570
|
|
|
1340
1571
|
except subprocess.TimeoutExpired:
|
|
1341
|
-
return {"error": "gh command timed out after 30s", "has_new_activity": False}
|
|
1572
|
+
return _with_next_steps("sensor_github_issue", {"error": "gh command timed out after 30s", "has_new_activity": False})
|
|
1342
1573
|
except json.JSONDecodeError as e:
|
|
1343
|
-
return {"error": f"Failed to parse gh output: {e}", "has_new_activity": False}
|
|
1574
|
+
return _with_next_steps("sensor_github_issue", {"error": f"Failed to parse gh output: {e}", "has_new_activity": False})
|
|
1344
1575
|
except Exception as e:
|
|
1345
1576
|
logger.error("Sensor error: %s\n%s", e, traceback.format_exc())
|
|
1346
|
-
return {"error": str(e), "has_new_activity": False}
|
|
1577
|
+
return _with_next_steps("sensor_github_issue", {"error": str(e), "has_new_activity": False})
|
|
1347
1578
|
|
|
1348
1579
|
|
|
1349
1580
|
# ═══════════════════════════════════════════════════════════════════════
|
|
@@ -1385,14 +1616,14 @@ def delimit_version() -> Dict[str, Any]:
|
|
|
1385
1616
|
],
|
|
1386
1617
|
}
|
|
1387
1618
|
total = sum(len(v) for v in tiers.values()) + 1 # +1 for version itself
|
|
1388
|
-
return {
|
|
1619
|
+
return _with_next_steps("version", {
|
|
1389
1620
|
"version": VERSION,
|
|
1390
1621
|
"server": "delimit-unified",
|
|
1391
1622
|
"total_tools": total,
|
|
1392
1623
|
"tiers": tiers,
|
|
1393
1624
|
"adapter_contract": "v1.0",
|
|
1394
1625
|
"authority": "delimit-gateway",
|
|
1395
|
-
}
|
|
1626
|
+
})
|
|
1396
1627
|
|
|
1397
1628
|
|
|
1398
1629
|
# ═══════════════════════════════════════════════════════════════════════
|
|
@@ -1414,6 +1645,35 @@ TOOL_HELP = {
|
|
|
1414
1645
|
}
|
|
1415
1646
|
|
|
1416
1647
|
|
|
1648
|
+
STANDARD_WORKFLOWS = [
|
|
1649
|
+
{
|
|
1650
|
+
"name": "Project Onboarding",
|
|
1651
|
+
"description": "Set up governance for a new project",
|
|
1652
|
+
"steps": ["delimit_init", "delimit_gov_health", "delimit_lint", "delimit_test_coverage", "delimit_security_scan"],
|
|
1653
|
+
},
|
|
1654
|
+
{
|
|
1655
|
+
"name": "Pre-Commit Check",
|
|
1656
|
+
"description": "Validate changes before committing",
|
|
1657
|
+
"steps": ["delimit_lint", "delimit_test_coverage", "delimit_semver"],
|
|
1658
|
+
},
|
|
1659
|
+
{
|
|
1660
|
+
"name": "Security Audit",
|
|
1661
|
+
"description": "Full security scan with evidence collection",
|
|
1662
|
+
"steps": ["delimit_security_scan", "delimit_evidence_collect", "delimit_evidence_verify"],
|
|
1663
|
+
},
|
|
1664
|
+
{
|
|
1665
|
+
"name": "API Change Review",
|
|
1666
|
+
"description": "Review and document an API change",
|
|
1667
|
+
"steps": ["delimit_diff", "delimit_semver", "delimit_explain", "delimit_lint"],
|
|
1668
|
+
},
|
|
1669
|
+
{
|
|
1670
|
+
"name": "Deploy Pipeline",
|
|
1671
|
+
"description": "Build, publish, and verify a deployment",
|
|
1672
|
+
"steps": ["delimit_deploy_build", "delimit_deploy_publish", "delimit_deploy_verify"],
|
|
1673
|
+
},
|
|
1674
|
+
]
|
|
1675
|
+
|
|
1676
|
+
|
|
1417
1677
|
@mcp.tool()
|
|
1418
1678
|
def delimit_help(tool_name: str = "") -> Dict[str, Any]:
|
|
1419
1679
|
"""Get help for a Delimit tool — what it does, parameters, and examples.
|
|
@@ -1422,19 +1682,20 @@ def delimit_help(tool_name: str = "") -> Dict[str, Any]:
|
|
|
1422
1682
|
tool_name: Tool name (e.g. 'lint', 'gov_health'). Leave empty for overview.
|
|
1423
1683
|
"""
|
|
1424
1684
|
if not tool_name:
|
|
1425
|
-
return {
|
|
1685
|
+
return _with_next_steps("help", {
|
|
1426
1686
|
"message": "Delimit has 77 tools. Here are the most useful ones to start with:",
|
|
1427
1687
|
"essential_tools": {k: v["desc"] for k, v in TOOL_HELP.items()},
|
|
1688
|
+
"workflows": STANDARD_WORKFLOWS,
|
|
1428
1689
|
"tip": "Run delimit_help(tool_name='lint') for detailed help on a specific tool.",
|
|
1429
1690
|
"all_tools": "Run delimit_version() for the complete list.",
|
|
1430
|
-
}
|
|
1691
|
+
})
|
|
1431
1692
|
|
|
1432
1693
|
# Normalize name
|
|
1433
1694
|
clean = tool_name.replace("delimit_", "").replace("mcp__delimit__delimit_", "")
|
|
1434
1695
|
info = TOOL_HELP.get(clean)
|
|
1435
1696
|
if info:
|
|
1436
|
-
return {"tool": clean, **info}
|
|
1437
|
-
return {"error": f"No help for '{tool_name}'. Try: {', '.join(TOOL_HELP.keys())}"}
|
|
1697
|
+
return _with_next_steps("help", {"tool": clean, **info})
|
|
1698
|
+
return _with_next_steps("help", {"error": f"No help for '{tool_name}'. Try: {', '.join(TOOL_HELP.keys())}"})
|
|
1438
1699
|
|
|
1439
1700
|
|
|
1440
1701
|
@mcp.tool()
|
|
@@ -1495,13 +1756,19 @@ def delimit_diagnose(project_path: str = ".") -> Dict[str, Any]:
|
|
|
1495
1756
|
|
|
1496
1757
|
# Summary
|
|
1497
1758
|
status = "healthy" if not issues else "issues_found"
|
|
1498
|
-
|
|
1759
|
+
result = {
|
|
1499
1760
|
"status": status,
|
|
1500
1761
|
"checks": checks,
|
|
1501
1762
|
"issues": issues,
|
|
1502
1763
|
"issue_count": len(issues),
|
|
1503
1764
|
"tip": "If everything looks good but tools aren't working, try restarting Claude Code.",
|
|
1504
1765
|
}
|
|
1766
|
+
# Dynamic next_steps: suggest init if not initialized
|
|
1767
|
+
diagnose_next = []
|
|
1768
|
+
if not delimit_dir.is_dir():
|
|
1769
|
+
diagnose_next.append({"tool": "delimit_init", "reason": "Initialize governance for this project", "suggested_args": {"preset": "default"}, "is_premium": False})
|
|
1770
|
+
result["next_steps"] = diagnose_next
|
|
1771
|
+
return result
|
|
1505
1772
|
|
|
1506
1773
|
|
|
1507
1774
|
# ═══════════════════════════════════════════════════════════════════════
|
|
@@ -1517,14 +1784,161 @@ def delimit_activate(license_key: str) -> Dict[str, Any]:
|
|
|
1517
1784
|
license_key: The license key to activate (e.g. DELIMIT-XXXX-XXXX-XXXX).
|
|
1518
1785
|
"""
|
|
1519
1786
|
from ai.license import activate_license
|
|
1520
|
-
return activate_license(license_key)
|
|
1787
|
+
return _with_next_steps("activate", activate_license(license_key))
|
|
1521
1788
|
|
|
1522
1789
|
|
|
1523
1790
|
@mcp.tool()
|
|
1524
1791
|
def delimit_license_status() -> Dict[str, Any]:
|
|
1525
1792
|
"""Check current Delimit license status -- tier, validity, and expiry."""
|
|
1526
1793
|
from ai.license import get_license
|
|
1527
|
-
return get_license()
|
|
1794
|
+
return _with_next_steps("license_status", get_license())
|
|
1795
|
+
|
|
1796
|
+
|
|
1797
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
1798
|
+
# LEDGER (Strategy + Operational Task Tracking)
|
|
1799
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
1800
|
+
|
|
1801
|
+
|
|
1802
|
+
@mcp.tool()
|
|
1803
|
+
def delimit_ledger_add(
|
|
1804
|
+
title: str,
|
|
1805
|
+
ledger: str = "ops",
|
|
1806
|
+
type: str = "task",
|
|
1807
|
+
priority: str = "P1",
|
|
1808
|
+
description: str = "",
|
|
1809
|
+
source: str = "session",
|
|
1810
|
+
) -> Dict[str, Any]:
|
|
1811
|
+
"""Add a new item to the strategy or operational ledger.
|
|
1812
|
+
|
|
1813
|
+
The ledger tracks what needs to be done across sessions. Use "ops" for
|
|
1814
|
+
tasks/bugs/features, "strategy" for consensus decisions and direction.
|
|
1815
|
+
|
|
1816
|
+
Args:
|
|
1817
|
+
title: What needs to be done.
|
|
1818
|
+
ledger: "ops" (tasks, bugs, features) or "strategy" (decisions, direction).
|
|
1819
|
+
type: task, fix, feat, strategy, consensus.
|
|
1820
|
+
priority: P0 (urgent), P1 (important), P2 (nice to have).
|
|
1821
|
+
description: Details.
|
|
1822
|
+
source: Where this came from (session, consensus, focus-group, etc).
|
|
1823
|
+
"""
|
|
1824
|
+
from ai.ledger_manager import add_item
|
|
1825
|
+
return add_item(title=title, ledger=ledger, type=type, priority=priority,
|
|
1826
|
+
description=description, source=source)
|
|
1827
|
+
|
|
1828
|
+
|
|
1829
|
+
@mcp.tool()
|
|
1830
|
+
def delimit_ledger_done(item_id: str, note: str = "") -> Dict[str, Any]:
|
|
1831
|
+
"""Mark a ledger item as done.
|
|
1832
|
+
|
|
1833
|
+
Args:
|
|
1834
|
+
item_id: The item ID (e.g. LED-001 or STR-001).
|
|
1835
|
+
note: Optional completion note.
|
|
1836
|
+
"""
|
|
1837
|
+
from ai.ledger_manager import update_item
|
|
1838
|
+
return update_item(item_id=item_id, status="done", note=note)
|
|
1839
|
+
|
|
1840
|
+
|
|
1841
|
+
@mcp.tool()
|
|
1842
|
+
def delimit_ledger_list(
|
|
1843
|
+
ledger: str = "both",
|
|
1844
|
+
status: str = "",
|
|
1845
|
+
priority: str = "",
|
|
1846
|
+
limit: int = 20,
|
|
1847
|
+
) -> Dict[str, Any]:
|
|
1848
|
+
"""List ledger items — see what's open, done, or in progress.
|
|
1849
|
+
|
|
1850
|
+
Args:
|
|
1851
|
+
ledger: "ops", "strategy", or "both".
|
|
1852
|
+
status: Filter by status — "open", "done", "in_progress", or empty for all.
|
|
1853
|
+
priority: Filter by priority — "P0", "P1", "P2", or empty for all.
|
|
1854
|
+
limit: Max items to return.
|
|
1855
|
+
"""
|
|
1856
|
+
from ai.ledger_manager import list_items
|
|
1857
|
+
return list_items(ledger=ledger, status=status or None, priority=priority or None, limit=limit)
|
|
1858
|
+
|
|
1859
|
+
|
|
1860
|
+
@mcp.tool()
|
|
1861
|
+
def delimit_ledger_context() -> Dict[str, Any]:
|
|
1862
|
+
"""Get a quick summary of what's open in the ledger — use at session start.
|
|
1863
|
+
|
|
1864
|
+
Returns the top 5 open items by priority so the AI knows what to work on.
|
|
1865
|
+
"""
|
|
1866
|
+
from ai.ledger_manager import get_context
|
|
1867
|
+
return get_context()
|
|
1868
|
+
|
|
1869
|
+
|
|
1870
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
1871
|
+
# DELIBERATION (Multi-Round Consensus)
|
|
1872
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
1873
|
+
|
|
1874
|
+
|
|
1875
|
+
@mcp.tool()
|
|
1876
|
+
def delimit_models(action: str = "list") -> Dict[str, Any]:
|
|
1877
|
+
"""View and configure AI models for multi-model deliberation.
|
|
1878
|
+
|
|
1879
|
+
Shows which models are available for consensus runs. Models auto-detect
|
|
1880
|
+
from environment variables (XAI_API_KEY, GEMINI_API_KEY, OPENAI_API_KEY).
|
|
1881
|
+
|
|
1882
|
+
Args:
|
|
1883
|
+
action: 'list' to show configured models.
|
|
1884
|
+
"""
|
|
1885
|
+
from ai.deliberation import configure_models
|
|
1886
|
+
return configure_models()
|
|
1887
|
+
|
|
1888
|
+
|
|
1889
|
+
@mcp.tool()
|
|
1890
|
+
def delimit_deliberate(
|
|
1891
|
+
question: str,
|
|
1892
|
+
context: str = "",
|
|
1893
|
+
mode: str = "dialogue",
|
|
1894
|
+
max_rounds: int = 3,
|
|
1895
|
+
save_path: str = "",
|
|
1896
|
+
) -> Dict[str, Any]:
|
|
1897
|
+
"""Run multi-model consensus via real AI-to-AI deliberation.
|
|
1898
|
+
|
|
1899
|
+
This is the consensus tool. Models (Grok 4, Gemini, Codex) debate each other
|
|
1900
|
+
directly until they reach unanimous agreement.
|
|
1901
|
+
|
|
1902
|
+
Modes:
|
|
1903
|
+
- "dialogue": Short conversational turns like a group chat (default, 6 rounds)
|
|
1904
|
+
- "debate": Long-form essays with full counter-arguments (3 rounds)
|
|
1905
|
+
|
|
1906
|
+
Args:
|
|
1907
|
+
question: The question to reach consensus on.
|
|
1908
|
+
context: Background context for all models.
|
|
1909
|
+
mode: "dialogue" (short turns) or "debate" (long essays).
|
|
1910
|
+
max_rounds: Maximum rounds (default 3 for debate, 6 for dialogue).
|
|
1911
|
+
save_path: Optional file path to save the full transcript.
|
|
1912
|
+
"""
|
|
1913
|
+
from ai.deliberation import deliberate
|
|
1914
|
+
result = deliberate(
|
|
1915
|
+
question=question,
|
|
1916
|
+
context=context,
|
|
1917
|
+
mode=mode,
|
|
1918
|
+
max_rounds=max_rounds,
|
|
1919
|
+
save_path=save_path or "",
|
|
1920
|
+
)
|
|
1921
|
+
|
|
1922
|
+
# Add summary for Claude to review
|
|
1923
|
+
rounds_count = len(result.get("rounds", []))
|
|
1924
|
+
unanimous = result.get("unanimous", False)
|
|
1925
|
+
|
|
1926
|
+
summary = {
|
|
1927
|
+
"status": "unanimous" if unanimous else "no_consensus",
|
|
1928
|
+
"rounds": rounds_count,
|
|
1929
|
+
"agreed_at_round": result.get("agreed_at_round"),
|
|
1930
|
+
"final_verdict": result.get("final_verdict"),
|
|
1931
|
+
"transcript_saved": result.get("saved_to", save_path),
|
|
1932
|
+
"note": "Review the full transcript. As orchestrator, provide your own analysis and final synthesis.",
|
|
1933
|
+
}
|
|
1934
|
+
|
|
1935
|
+
# Include last round responses for immediate review
|
|
1936
|
+
if result.get("rounds"):
|
|
1937
|
+
last_round = result["rounds"][-1]
|
|
1938
|
+
summary["gemini_final_response"] = last_round["responses"].get("gemini", "")[:2000]
|
|
1939
|
+
summary["grok_final_response"] = last_round["responses"].get("grok", "")[:2000]
|
|
1940
|
+
|
|
1941
|
+
return summary
|
|
1528
1942
|
|
|
1529
1943
|
|
|
1530
1944
|
# ═══════════════════════════════════════════════════════════════════════
|