@covibes/zeroshot 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +167 -0
- package/LICENSE +21 -0
- package/README.md +364 -0
- package/cli/index.js +3990 -0
- package/cluster-templates/base-templates/debug-workflow.json +181 -0
- package/cluster-templates/base-templates/full-workflow.json +455 -0
- package/cluster-templates/base-templates/single-worker.json +48 -0
- package/cluster-templates/base-templates/worker-validator.json +131 -0
- package/cluster-templates/conductor-bootstrap.json +122 -0
- package/cluster-templates/conductor-junior-bootstrap.json +69 -0
- package/docker/zeroshot-cluster/Dockerfile +132 -0
- package/lib/completion.js +174 -0
- package/lib/id-detector.js +53 -0
- package/lib/settings.js +97 -0
- package/lib/stream-json-parser.js +236 -0
- package/package.json +121 -0
- package/src/agent/agent-config.js +121 -0
- package/src/agent/agent-context-builder.js +241 -0
- package/src/agent/agent-hook-executor.js +329 -0
- package/src/agent/agent-lifecycle.js +555 -0
- package/src/agent/agent-stuck-detector.js +256 -0
- package/src/agent/agent-task-executor.js +1034 -0
- package/src/agent/agent-trigger-evaluator.js +67 -0
- package/src/agent-wrapper.js +459 -0
- package/src/agents/git-pusher-agent.json +20 -0
- package/src/attach/attach-client.js +438 -0
- package/src/attach/attach-server.js +543 -0
- package/src/attach/index.js +35 -0
- package/src/attach/protocol.js +220 -0
- package/src/attach/ring-buffer.js +121 -0
- package/src/attach/socket-discovery.js +242 -0
- package/src/claude-task-runner.js +468 -0
- package/src/config-router.js +80 -0
- package/src/config-validator.js +598 -0
- package/src/github.js +103 -0
- package/src/isolation-manager.js +1042 -0
- package/src/ledger.js +429 -0
- package/src/logic-engine.js +223 -0
- package/src/message-bus-bridge.js +139 -0
- package/src/message-bus.js +202 -0
- package/src/name-generator.js +232 -0
- package/src/orchestrator.js +1938 -0
- package/src/schemas/sub-cluster.js +156 -0
- package/src/sub-cluster-wrapper.js +545 -0
- package/src/task-runner.js +28 -0
- package/src/template-resolver.js +347 -0
- package/src/tui/CHANGES.txt +133 -0
- package/src/tui/LAYOUT.md +261 -0
- package/src/tui/README.txt +192 -0
- package/src/tui/TWO-LEVEL-NAVIGATION.md +186 -0
- package/src/tui/data-poller.js +325 -0
- package/src/tui/demo.js +208 -0
- package/src/tui/formatters.js +123 -0
- package/src/tui/index.js +193 -0
- package/src/tui/keybindings.js +383 -0
- package/src/tui/layout.js +317 -0
- package/src/tui/renderer.js +194 -0
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "Two-Tier Conductor (Complexity × TaskType)",
|
|
3
|
+
"description": "Cost-optimized conductor: Haiku junior for 2D classification (complexity + taskType), Sonnet senior for UNCERTAIN tasks. Routes to appropriate cluster config via helpers.getConfig().",
|
|
4
|
+
"agents": [
|
|
5
|
+
{
|
|
6
|
+
"id": "junior-conductor",
|
|
7
|
+
"role": "conductor",
|
|
8
|
+
"model": "haiku",
|
|
9
|
+
"outputFormat": "json",
|
|
10
|
+
"jsonSchema": {
|
|
11
|
+
"type": "object",
|
|
12
|
+
"properties": {
|
|
13
|
+
"complexity": {
|
|
14
|
+
"type": "string",
|
|
15
|
+
"enum": ["TRIVIAL", "SIMPLE", "STANDARD", "CRITICAL", "UNCERTAIN"],
|
|
16
|
+
"description": "Task complexity level"
|
|
17
|
+
},
|
|
18
|
+
"taskType": {
|
|
19
|
+
"type": "string",
|
|
20
|
+
"enum": ["INQUIRY", "TASK", "DEBUG"],
|
|
21
|
+
"description": "Type of work: INQUIRY (questions/exploration), TASK (implement new), DEBUG (fix broken)"
|
|
22
|
+
},
|
|
23
|
+
"reasoning": {
|
|
24
|
+
"type": "string",
|
|
25
|
+
"description": "Why this classification (1-2 sentences)"
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
"required": ["complexity", "taskType", "reasoning"]
|
|
29
|
+
},
|
|
30
|
+
"prompt": {
|
|
31
|
+
"system": "You are the JUNIOR CONDUCTOR (Haiku) - fast, cost-efficient task classification.\n\n## Your Job\nClassify tasks on TWO dimensions: COMPLEXITY and TASK TYPE.\n\n## COMPLEXITY (how hard/risky)\n\n**TRIVIAL** - One command, one file, mechanical\n**SIMPLE** - One concern, straightforward\n**STANDARD** - Multi-file, needs planning\n**CRITICAL** - High risk (auth, payments, security, production)\n**UNCERTAIN** - Escalate to senior conductor\n\n## TASK TYPE (what kind of action)\n\n**INQUIRY** - Questions, exploration, understanding\n- \"How does X work?\", \"What files handle Y?\", \"Explain Z\"\n- NO changes made, just gathering information\n\n**TASK** - Implement something new\n- \"Add feature X\", \"Create Y\", \"Implement Z\"\n- Building new functionality\n\n**DEBUG** - Fix something broken\n- \"Why is X failing?\", \"Fix bug in Y\", \"Debug Z\"\n- Investigating and fixing existing issues\n\n## Output Format\n\n```json\n{\n \"complexity\": \"TRIVIAL|SIMPLE|STANDARD|CRITICAL|UNCERTAIN\",\n \"taskType\": \"INQUIRY|TASK|DEBUG\",\n \"reasoning\": \"Brief explanation\"\n}\n```\n\n## Examples\n\n\"How does auth work?\" → SIMPLE, INQUIRY\n\"Add rate limiting\" → STANDARD, TASK\n\"Fix null pointer bug\" → SIMPLE, DEBUG\n\"Why is the API slow?\" → STANDARD, DEBUG\n\"Deploy to AWS\" → CRITICAL, TASK\n\"terraform plan\" → SIMPLE, TASK\n\"What files handle routing?\" → TRIVIAL, INQUIRY\n\n## Critical Rules\n\n1. ALWAYS output both dimensions\n2. INQUIRY = read-only, TASK = create new, DEBUG = fix broken\n3. complexity=UNCERTAIN only when truly ambiguous\n\nTask: {{ISSUE_OPENED.content.text}}"
|
|
32
|
+
},
|
|
33
|
+
"contextStrategy": {
|
|
34
|
+
"sources": [{ "topic": "ISSUE_OPENED", "limit": 1 }],
|
|
35
|
+
"format": "chronological",
|
|
36
|
+
"maxTokens": 100000
|
|
37
|
+
},
|
|
38
|
+
"triggers": [
|
|
39
|
+
{
|
|
40
|
+
"topic": "ISSUE_OPENED",
|
|
41
|
+
"logic": {
|
|
42
|
+
"engine": "javascript",
|
|
43
|
+
"script": "return message.sender === 'system';"
|
|
44
|
+
},
|
|
45
|
+
"action": "execute_task"
|
|
46
|
+
}
|
|
47
|
+
],
|
|
48
|
+
"hooks": {
|
|
49
|
+
"onComplete": {
|
|
50
|
+
"action": "publish_message",
|
|
51
|
+
"transform": {
|
|
52
|
+
"engine": "javascript",
|
|
53
|
+
"script": "const { complexity, taskType, reasoning } = result;\nconst taskText = triggeringMessage.content?.text || '';\n\nif (complexity === 'UNCERTAIN') {\n return {\n topic: 'CONDUCTOR_ESCALATE',\n content: {\n text: reasoning,\n data: { complexity, taskType, reasoning, taskText }\n }\n };\n}\n\nconst config = helpers.getConfig(complexity, taskType);\n\nreturn {\n topic: 'CLUSTER_OPERATIONS',\n content: {\n text: `[${complexity}:${taskType}] ${reasoning}`,\n data: {\n complexity,\n taskType,\n operations: [\n { action: 'load_config', config },\n { action: 'publish', topic: 'ISSUE_OPENED', content: { text: taskText } }\n ]\n }\n }\n};"
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"id": "senior-conductor",
|
|
60
|
+
"role": "conductor",
|
|
61
|
+
"model": "sonnet",
|
|
62
|
+
"outputFormat": "json",
|
|
63
|
+
"jsonSchema": {
|
|
64
|
+
"type": "object",
|
|
65
|
+
"properties": {
|
|
66
|
+
"complexity": {
|
|
67
|
+
"type": "string",
|
|
68
|
+
"enum": ["TRIVIAL", "SIMPLE", "STANDARD", "CRITICAL"],
|
|
69
|
+
"description": "Task complexity (no UNCERTAIN - must decide)"
|
|
70
|
+
},
|
|
71
|
+
"taskType": {
|
|
72
|
+
"type": "string",
|
|
73
|
+
"enum": ["INQUIRY", "TASK", "DEBUG"],
|
|
74
|
+
"description": "Type of work after analysis"
|
|
75
|
+
},
|
|
76
|
+
"reasoning": {
|
|
77
|
+
"type": "string",
|
|
78
|
+
"description": "Detailed explanation"
|
|
79
|
+
}
|
|
80
|
+
},
|
|
81
|
+
"required": ["complexity", "taskType", "reasoning"]
|
|
82
|
+
},
|
|
83
|
+
"prompt": {
|
|
84
|
+
"system": "You are the SENIOR CONDUCTOR - expert task analyzer for ambiguous tasks.\n\nThe junior conductor was uncertain. Analyze deeply and make a definitive classification.\n\n## COMPLEXITY\n\n**TRIVIAL** - One command/file, mechanical, no risk\n**SIMPLE** - One concern, straightforward\n**STANDARD** - Multi-file, needs planning\n**CRITICAL** - High risk (auth/payments/security/production)\n\n## TASK TYPE\n\n**INQUIRY** - Questions, exploration (read-only)\n**TASK** - Implement something new (create)\n**DEBUG** - Fix something broken (investigate + fix)\n\n## Decision Rules\n\n1. YOU MUST DECIDE - no UNCERTAIN output\n2. When in doubt about complexity, go ONE LEVEL HIGHER\n3. INQUIRY vs TASK vs DEBUG based on intent, not keywords\n4. Consider: Is user asking a question? Building new? Fixing broken?\n\nJunior classified as UNCERTAIN. Original task and reasoning follow."
|
|
85
|
+
},
|
|
86
|
+
"contextStrategy": {
|
|
87
|
+
"sources": [
|
|
88
|
+
{ "topic": "ISSUE_OPENED", "limit": 1 },
|
|
89
|
+
{
|
|
90
|
+
"topic": "CONDUCTOR_ESCALATE",
|
|
91
|
+
"since": "last_agent_start",
|
|
92
|
+
"limit": 1
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
"topic": "CLUSTER_OPERATIONS_VALIDATION_FAILED",
|
|
96
|
+
"since": "cluster_start",
|
|
97
|
+
"limit": 3
|
|
98
|
+
}
|
|
99
|
+
],
|
|
100
|
+
"format": "chronological",
|
|
101
|
+
"maxTokens": 100000
|
|
102
|
+
},
|
|
103
|
+
"maxRetries": 3,
|
|
104
|
+
"triggers": [
|
|
105
|
+
{ "topic": "CONDUCTOR_ESCALATE", "action": "execute_task" },
|
|
106
|
+
{
|
|
107
|
+
"topic": "CLUSTER_OPERATIONS_VALIDATION_FAILED",
|
|
108
|
+
"action": "execute_task"
|
|
109
|
+
}
|
|
110
|
+
],
|
|
111
|
+
"hooks": {
|
|
112
|
+
"onComplete": {
|
|
113
|
+
"action": "publish_message",
|
|
114
|
+
"transform": {
|
|
115
|
+
"engine": "javascript",
|
|
116
|
+
"script": "const { complexity, taskType, reasoning } = result;\n\nlet taskText = triggeringMessage.content?.data?.taskText || '';\nif (!taskText) {\n taskText = triggeringMessage.content?.text || '';\n}\n\nconst config = helpers.getConfig(complexity, taskType);\n\nreturn {\n topic: 'CLUSTER_OPERATIONS',\n content: {\n text: `Senior: [${complexity}:${taskType}] ${reasoning}`,\n data: {\n complexity,\n taskType,\n operations: [\n { action: 'load_config', config },\n { action: 'publish', topic: 'ISSUE_OPENED', content: { text: taskText } }\n ]\n }\n }\n};"
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
]
|
|
122
|
+
}
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "Junior Conductor Bootstrap",
|
|
3
|
+
"description": "Cost-optimized haiku conductor for fast task classification - spawns agents directly for TRIVIAL/SIMPLE/STANDARD/CRITICAL, escalates only for UNCERTAIN tasks",
|
|
4
|
+
"agents": [
|
|
5
|
+
{
|
|
6
|
+
"id": "junior-conductor",
|
|
7
|
+
"role": "conductor",
|
|
8
|
+
"model": "haiku",
|
|
9
|
+
"outputFormat": "json",
|
|
10
|
+
"jsonSchema": {
|
|
11
|
+
"type": "object",
|
|
12
|
+
"properties": {
|
|
13
|
+
"classification": {
|
|
14
|
+
"type": "string",
|
|
15
|
+
"enum": ["TRIVIAL", "SIMPLE", "STANDARD", "CRITICAL", "UNCERTAIN"],
|
|
16
|
+
"description": "Task complexity category"
|
|
17
|
+
},
|
|
18
|
+
"reasoning": {
|
|
19
|
+
"type": "string",
|
|
20
|
+
"description": "Why this classification (1-2 sentences)"
|
|
21
|
+
},
|
|
22
|
+
"operations": {
|
|
23
|
+
"type": "array",
|
|
24
|
+
"description": "CLUSTER_OPERATIONS to spawn agents (empty if UNCERTAIN)"
|
|
25
|
+
}
|
|
26
|
+
},
|
|
27
|
+
"required": ["classification", "reasoning", "operations"]
|
|
28
|
+
},
|
|
29
|
+
"prompt": {
|
|
30
|
+
"system": "You are the JUNIOR CONDUCTOR (Haiku) - fast, cost-efficient task classification.\n\n## Your Job\nClassify tasks into complexity categories and spawn appropriate agent clusters.\nYou handle 95% of tasks. Only escalate when the DECISION ITSELF is hard.\n\n## Categories\n\n**TRIVIAL** (haiku worker only):\n- Typos, docs, simple renames, config changes\n- Simple scripts (hello world, count to 100)\n- No planning needed, mechanical implementation\n- Examples: \"fix typo in README\", \"create Python script that counts to 100\"\n\n**SIMPLE** (sonnet worker + haiku validator):\n- Single-file bug fixes\n- Small feature additions\n- Basic refactoring (one function)\n- Examples: \"fix null pointer in user service\", \"add validation to form\"\n\n**STANDARD** (planner + worker + 2 validators):\n- Multi-file features\n- Cross-component changes\n- Non-trivial refactoring\n- Examples: \"add rate limiting to API\", \"implement search filtering\"\n\n**CRITICAL** (planner(opus) + worker + security + reviewer + tester):\n- Auth, payments, security\n- Data migrations\n- Production-critical changes\n- Examples: \"implement OAuth2 login\", \"add payment processing\"\n\n**UNCERTAIN** (escalate to senior conductor):\n- Ambiguous requirements (can't determine scope)\n- Unknown unknowns (\"refactor auth\" - how big?)\n- Edge cases not covered by examples\n- Contradictory requirements\n\n## Output Format\n\n```json\n{\n \"classification\": \"TRIVIAL|SIMPLE|STANDARD|CRITICAL|UNCERTAIN\",\n \"reasoning\": \"Brief explanation (1-2 sentences)\",\n \"operations\": [/* CLUSTER_OPERATIONS array or empty if UNCERTAIN */]\n}\n```\n\n## Agent Primitives - Copy these exactly\n\n### WORKER\n{\"id\":\"worker\",\"role\":\"implementation\",\"model\":\"haiku or sonnet\",\"verify\":true,\"maxIterations\":30,\"prompt\":{\"system\":\"Implement the task. On iteration 1: follow the plan or issue. On iteration 2+: fix ALL issues from validators.\"},\"contextStrategy\":{\"sources\":[{\"topic\":\"ISSUE_OPENED\",\"limit\":1},{\"topic\":\"PLAN_READY\",\"limit\":1},{\"topic\":\"VALIDATION_RESULT\",\"since\":\"last_task_end\",\"limit\":10}],\"format\":\"chronological\",\"maxTokens\":100000},\"triggers\":[{\"topic\":\"PLAN_READY\",\"action\":\"execute_task\"},{\"topic\":\"ISSUE_OPENED\",\"action\":\"execute_task\"},{\"topic\":\"VALIDATION_RESULT\",\"logic\":{\"engine\":\"javascript\",\"script\":\"const validators = cluster.getAgentsByRole('validator');\\nconst lastPush = ledger.findLast({ topic: 'IMPLEMENTATION_READY' });\\nif (!lastPush) return false;\\nconst responses = ledger.query({ topic: 'VALIDATION_RESULT', since: lastPush.timestamp });\\nif (responses.length < validators.length) return false;\\nreturn responses.some(r => r.content?.data?.approved === false || r.content?.data?.approved === 'false');\"},\"action\":\"execute_task\"},{\"topic\":\"CLUSTER_RESUMED\",\"action\":\"execute_task\"}],\"hooks\":{\"onComplete\":{\"action\":\"publish_message\",\"config\":{\"topic\":\"IMPLEMENTATION_READY\",\"content\":{\"text\":\"Implementation complete.\"}}}}}\n\n### PLANNER\n{\"id\":\"planner\",\"role\":\"planning\",\"model\":\"sonnet\",\"outputFormat\":\"json\",\"jsonSchema\":{\"type\":\"object\",\"properties\":{\"plan\":{\"type\":\"string\"},\"summary\":{\"type\":\"string\"},\"filesAffected\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}},\"required\":[\"plan\",\"summary\",\"filesAffected\"]},\"prompt\":{\"system\":\"Create a clear implementation plan. List files to modify and concrete steps.\"},\"contextStrategy\":{\"sources\":[{\"topic\":\"ISSUE_OPENED\",\"limit\":1}],\"format\":\"chronological\",\"maxTokens\":100000},\"triggers\":[{\"topic\":\"ISSUE_OPENED\",\"action\":\"execute_task\"}],\"hooks\":{\"onComplete\":{\"action\":\"publish_message\",\"config\":{\"topic\":\"PLAN_READY\",\"content\":{\"text\":\"{{result.plan}}\",\"data\":{\"summary\":\"{{result.summary}}\",\"filesAffected\":\"{{result.filesAffected}}\"}}}}}} \n\n### VALIDATOR\n{\"id\":\"validator\",\"role\":\"validator\",\"model\":\"haiku or sonnet\",\"outputFormat\":\"json\",\"jsonSchema\":{\"type\":\"object\",\"properties\":{\"approved\":{\"type\":\"boolean\"},\"summary\":{\"type\":\"string\"},\"errors\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}},\"required\":[\"approved\",\"summary\",\"errors\"]},\"prompt\":{\"system\":\"Verify implementation meets requirements. APPROVE if core functionality works. REJECT only for missing/broken functionality.\"},\"contextStrategy\":{\"sources\":[{\"topic\":\"ISSUE_OPENED\",\"limit\":1},{\"topic\":\"PLAN_READY\",\"limit\":1},{\"topic\":\"IMPLEMENTATION_READY\",\"limit\":1}],\"format\":\"chronological\",\"maxTokens\":50000},\"triggers\":[{\"topic\":\"IMPLEMENTATION_READY\",\"action\":\"execute_task\"}],\"hooks\":{\"onComplete\":{\"action\":\"publish_message\",\"config\":{\"topic\":\"VALIDATION_RESULT\",\"content\":{\"text\":\"{{result.summary}}\",\"data\":{\"approved\":\"{{result.approved}}\",\"errors\":\"{{result.errors}}\"}}}}}}\n\n### CODE REVIEWER\n{\"id\":\"reviewer\",\"role\":\"validator\",\"model\":\"sonnet\",\"outputFormat\":\"json\",\"jsonSchema\":{\"type\":\"object\",\"properties\":{\"approved\":{\"type\":\"boolean\"},\"summary\":{\"type\":\"string\"},\"issues\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}},\"required\":[\"approved\",\"summary\",\"issues\"]},\"prompt\":{\"system\":\"Code review for security and quality. REJECT for: SQL injection, XSS, auth bypass, exposed secrets, silent error swallowing. APPROVE if code is functional and safe.\"},\"contextStrategy\":{\"sources\":[{\"topic\":\"ISSUE_OPENED\",\"limit\":1},{\"topic\":\"PLAN_READY\",\"limit\":1},{\"topic\":\"IMPLEMENTATION_READY\",\"limit\":1}],\"format\":\"chronological\",\"maxTokens\":50000},\"triggers\":[{\"topic\":\"IMPLEMENTATION_READY\",\"action\":\"execute_task\"}],\"hooks\":{\"onComplete\":{\"action\":\"publish_message\",\"config\":{\"topic\":\"VALIDATION_RESULT\",\"content\":{\"text\":\"{{result.summary}}\",\"data\":{\"approved\":\"{{result.approved}}\",\"issues\":\"{{result.issues}}\"}}}}}}\n\n### SECURITY VALIDATOR\n{\"id\":\"security-validator\",\"role\":\"validator\",\"model\":\"sonnet\",\"outputFormat\":\"json\",\"jsonSchema\":{\"type\":\"object\",\"properties\":{\"approved\":{\"type\":\"boolean\"},\"summary\":{\"type\":\"string\"},\"vulnerabilities\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}},\"required\":[\"approved\",\"summary\",\"vulnerabilities\"]},\"prompt\":{\"system\":\"Deep security review. Check for: SQL injection, XSS, CSRF, auth bypass, insecure crypto, path traversal, exposed secrets. REJECT if any vulnerability found.\"},\"contextStrategy\":{\"sources\":[{\"topic\":\"ISSUE_OPENED\",\"limit\":1},{\"topic\":\"PLAN_READY\",\"limit\":1},{\"topic\":\"IMPLEMENTATION_READY\",\"limit\":1}],\"format\":\"chronological\",\"maxTokens\":50000},\"triggers\":[{\"topic\":\"IMPLEMENTATION_READY\",\"action\":\"execute_task\"}],\"hooks\":{\"onComplete\":{\"action\":\"publish_message\",\"config\":{\"topic\":\"VALIDATION_RESULT\",\"content\":{\"text\":\"{{result.summary}}\",\"data\":{\"approved\":\"{{result.approved}}\",\"vulnerabilities\":\"{{result.vulnerabilities}}\"}}}}}}\n\n### TESTER\n{\"id\":\"tester\",\"role\":\"validator\",\"model\":\"sonnet\",\"outputFormat\":\"json\",\"jsonSchema\":{\"type\":\"object\",\"properties\":{\"approved\":{\"type\":\"boolean\"},\"summary\":{\"type\":\"string\"},\"bugs\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}},\"required\":[\"approved\",\"summary\",\"bugs\"]},\"prompt\":{\"system\":\"QA testing. Test basic usage and common edge cases. REJECT if core functionality broken or crashes. APPROVE if it basically works.\"},\"contextStrategy\":{\"sources\":[{\"topic\":\"ISSUE_OPENED\",\"limit\":1},{\"topic\":\"PLAN_READY\",\"limit\":1},{\"topic\":\"IMPLEMENTATION_READY\",\"limit\":1}],\"format\":\"chronological\",\"maxTokens\":50000},\"triggers\":[{\"topic\":\"IMPLEMENTATION_READY\",\"action\":\"execute_task\"}],\"hooks\":{\"onComplete\":{\"action\":\"publish_message\",\"config\":{\"topic\":\"VALIDATION_RESULT\",\"content\":{\"text\":\"{{result.summary}}\",\"data\":{\"approved\":\"{{result.approved}}\",\"bugs\":\"{{result.bugs}}\"}}}}}}\n\n### COMPLETION DETECTOR (with validators)\n{\"id\":\"completion-detector\",\"role\":\"orchestrator\",\"triggers\":[{\"topic\":\"VALIDATION_RESULT\",\"logic\":{\"engine\":\"javascript\",\"script\":\"const validators = cluster.getAgentsByRole('validator');\\nconst lastPush = ledger.findLast({ topic: 'IMPLEMENTATION_READY' });\\nif (!lastPush) return false;\\nconst responses = ledger.query({ topic: 'VALIDATION_RESULT', since: lastPush.timestamp });\\nif (responses.length < validators.length) return false;\\nreturn responses.every(r => r.content?.data?.approved === true || r.content?.data?.approved === 'true');\"},\"action\":\"stop_cluster\"}]}\n\n### SIMPLE COMPLETION DETECTOR (no validators)\n{\"id\":\"completion-detector\",\"role\":\"orchestrator\",\"triggers\":[{\"topic\":\"IMPLEMENTATION_READY\",\"action\":\"stop_cluster\"}]}\n\n## Critical Rules\n\n1. Use categorical classification - NO confidence scores\n2. Only output UNCERTAIN when decision itself is hard\n3. Include full operations array for TRIVIAL/SIMPLE/STANDARD/CRITICAL\n4. Fail fast - if task is malformed/unclear, classify as UNCERTAIN\n5. Follow exact patterns above for operations\n6. For TRIVIAL: use haiku worker + simple completion detector\n7. For SIMPLE: use sonnet worker + haiku validator + full completion detector\n8. For STANDARD: use sonnet planner + sonnet worker + sonnet validator + sonnet reviewer + full completion detector\n9. For CRITICAL: use opus planner + sonnet worker + sonnet security-validator + sonnet reviewer + sonnet tester + full completion detector\n10. ALWAYS republish ISSUE_OPENED as last operation with full task text\n\nTask: {{ISSUE_OPENED.content.text}}"
|
|
31
|
+
},
|
|
32
|
+
"contextStrategy": {
|
|
33
|
+
"sources": [
|
|
34
|
+
{
|
|
35
|
+
"topic": "ISSUE_OPENED",
|
|
36
|
+
"limit": 1
|
|
37
|
+
}
|
|
38
|
+
],
|
|
39
|
+
"format": "chronological",
|
|
40
|
+
"maxTokens": 100000
|
|
41
|
+
},
|
|
42
|
+
"triggers": [
|
|
43
|
+
{
|
|
44
|
+
"topic": "ISSUE_OPENED",
|
|
45
|
+
"logic": {
|
|
46
|
+
"engine": "javascript",
|
|
47
|
+
"script": "return message.sender === 'system';"
|
|
48
|
+
},
|
|
49
|
+
"action": "execute_task"
|
|
50
|
+
}
|
|
51
|
+
],
|
|
52
|
+
"hooks": {
|
|
53
|
+
"onComplete": {
|
|
54
|
+
"action": "publish_message",
|
|
55
|
+
"config": {
|
|
56
|
+
"topic": "JUNIOR_CLASSIFICATION",
|
|
57
|
+
"content": {
|
|
58
|
+
"text": "{{result.reasoning}}",
|
|
59
|
+
"data": {
|
|
60
|
+
"classification": "{{result.classification}}",
|
|
61
|
+
"operations": "{{result.operations}}"
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
]
|
|
69
|
+
}
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
# Base image for vibe cluster isolation mode
|
|
2
|
+
# Provides: Node.js, Python, Git, Chromium, Claude CLI, Playwright deps, Infrastructure tools
|
|
3
|
+
#
|
|
4
|
+
# Build: docker build -t vibe-cluster-base vibe/cluster/docker/vibe-cluster/
|
|
5
|
+
# Usage: vibe run <task> --isolation
|
|
6
|
+
|
|
7
|
+
FROM node:20-slim
|
|
8
|
+
|
|
9
|
+
# Version pinning for infrastructure tools
|
|
10
|
+
ARG AWS_CLI_VERSION=2.15.10
|
|
11
|
+
ARG TERRAFORM_VERSION=1.6.6
|
|
12
|
+
ARG KUBECTL_VERSION=1.29.0
|
|
13
|
+
ARG HELM_VERSION=3.13.3
|
|
14
|
+
ARG INFRACOST_VERSION=0.10.32
|
|
15
|
+
ARG TFLINT_VERSION=0.50.0
|
|
16
|
+
ARG TFSEC_VERSION=1.28.4
|
|
17
|
+
|
|
18
|
+
# Install system dependencies for e2e testing and development
|
|
19
|
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
20
|
+
git \
|
|
21
|
+
curl \
|
|
22
|
+
ca-certificates \
|
|
23
|
+
gnupg \
|
|
24
|
+
unzip \
|
|
25
|
+
# Docker for starting services
|
|
26
|
+
docker.io \
|
|
27
|
+
docker-compose \
|
|
28
|
+
# Python for general development
|
|
29
|
+
python3 \
|
|
30
|
+
python3-pip \
|
|
31
|
+
python3-venv \
|
|
32
|
+
# Chromium dependencies
|
|
33
|
+
chromium \
|
|
34
|
+
fonts-liberation \
|
|
35
|
+
libasound2 \
|
|
36
|
+
libatk-bridge2.0-0 \
|
|
37
|
+
libatk1.0-0 \
|
|
38
|
+
libcups2 \
|
|
39
|
+
libdbus-1-3 \
|
|
40
|
+
libdrm2 \
|
|
41
|
+
libgbm1 \
|
|
42
|
+
libgtk-3-0 \
|
|
43
|
+
libnspr4 \
|
|
44
|
+
libnss3 \
|
|
45
|
+
libx11-xcb1 \
|
|
46
|
+
libxcomposite1 \
|
|
47
|
+
libxdamage1 \
|
|
48
|
+
libxfixes3 \
|
|
49
|
+
libxrandr2 \
|
|
50
|
+
xdg-utils \
|
|
51
|
+
&& rm -rf /var/lib/apt/lists/* \
|
|
52
|
+
# Create python symlink for compatibility
|
|
53
|
+
&& ln -sf /usr/bin/python3 /usr/bin/python
|
|
54
|
+
|
|
55
|
+
# Install GitHub CLI for git authentication
|
|
56
|
+
RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
|
|
57
|
+
&& chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
|
|
58
|
+
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
|
|
59
|
+
&& apt-get update && apt-get install -y gh \
|
|
60
|
+
&& rm -rf /var/lib/apt/lists/*
|
|
61
|
+
|
|
62
|
+
# Install infrastructure tools
|
|
63
|
+
# AWS CLI v2
|
|
64
|
+
RUN curl -fsSL "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${AWS_CLI_VERSION}.zip" -o /tmp/awscliv2.zip \
|
|
65
|
+
&& unzip -q /tmp/awscliv2.zip -d /tmp \
|
|
66
|
+
&& /tmp/aws/install \
|
|
67
|
+
&& rm -rf /tmp/awscliv2.zip /tmp/aws
|
|
68
|
+
|
|
69
|
+
# Terraform
|
|
70
|
+
RUN curl -fsSL "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" -o /tmp/terraform.zip \
|
|
71
|
+
&& unzip -q /tmp/terraform.zip -d /usr/local/bin \
|
|
72
|
+
&& chmod +x /usr/local/bin/terraform \
|
|
73
|
+
&& rm /tmp/terraform.zip
|
|
74
|
+
|
|
75
|
+
# kubectl
|
|
76
|
+
RUN curl -fsSL "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl" -o /usr/local/bin/kubectl \
|
|
77
|
+
&& chmod +x /usr/local/bin/kubectl
|
|
78
|
+
|
|
79
|
+
# Helm
|
|
80
|
+
RUN curl -fsSL "https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz" -o /tmp/helm.tar.gz \
|
|
81
|
+
&& tar -xzf /tmp/helm.tar.gz -C /tmp \
|
|
82
|
+
&& mv /tmp/linux-amd64/helm /usr/local/bin/helm \
|
|
83
|
+
&& chmod +x /usr/local/bin/helm \
|
|
84
|
+
&& rm -rf /tmp/helm.tar.gz /tmp/linux-amd64
|
|
85
|
+
|
|
86
|
+
# infracost (cost estimation)
|
|
87
|
+
RUN curl -fsSL "https://github.com/infracost/infracost/releases/download/v${INFRACOST_VERSION}/infracost-linux-amd64.tar.gz" -o /tmp/infracost.tar.gz \
|
|
88
|
+
&& tar -xzf /tmp/infracost.tar.gz -C /tmp \
|
|
89
|
+
&& mv /tmp/infracost-linux-amd64 /usr/local/bin/infracost \
|
|
90
|
+
&& chmod +x /usr/local/bin/infracost \
|
|
91
|
+
&& rm /tmp/infracost.tar.gz
|
|
92
|
+
|
|
93
|
+
# tflint (terraform linter)
|
|
94
|
+
RUN curl -fsSL "https://github.com/terraform-linters/tflint/releases/download/v${TFLINT_VERSION}/tflint_linux_amd64.zip" -o /tmp/tflint.zip \
|
|
95
|
+
&& unzip -q /tmp/tflint.zip -d /usr/local/bin \
|
|
96
|
+
&& chmod +x /usr/local/bin/tflint \
|
|
97
|
+
&& rm /tmp/tflint.zip
|
|
98
|
+
|
|
99
|
+
# tfsec (security scanner)
|
|
100
|
+
RUN curl -fsSL "https://github.com/aquasecurity/tfsec/releases/download/v${TFSEC_VERSION}/tfsec-linux-amd64" -o /usr/local/bin/tfsec \
|
|
101
|
+
&& chmod +x /usr/local/bin/tfsec
|
|
102
|
+
|
|
103
|
+
# Set AWS_PAGER to empty to disable paging in AWS CLI
|
|
104
|
+
ENV AWS_PAGER=""
|
|
105
|
+
|
|
106
|
+
# Set Chromium path for Playwright
|
|
107
|
+
ENV CHROME_BIN=/usr/bin/chromium
|
|
108
|
+
ENV PLAYWRIGHT_CHROMIUM_EXECUTABLE_PATH=/usr/bin/chromium
|
|
109
|
+
|
|
110
|
+
# Install Claude CLI globally
|
|
111
|
+
RUN npm install -g @anthropic-ai/claude-code
|
|
112
|
+
|
|
113
|
+
# Install Playwright (uses system Chromium)
|
|
114
|
+
RUN npx playwright install-deps chromium 2>/dev/null || true
|
|
115
|
+
|
|
116
|
+
# Add node user to docker group for Docker socket access
|
|
117
|
+
RUN groupadd -f docker && usermod -aG docker node
|
|
118
|
+
|
|
119
|
+
# Use existing 'node' user from base image (uid 1000)
|
|
120
|
+
# Create directories with proper ownership for Claude CLI
|
|
121
|
+
RUN mkdir -p /home/node/.claude /home/node/.config/gh \
|
|
122
|
+
&& chown -R node:node /home/node
|
|
123
|
+
|
|
124
|
+
# Create workspace directory with node ownership
|
|
125
|
+
RUN mkdir -p /workspace && chown node:node /workspace
|
|
126
|
+
WORKDIR /workspace
|
|
127
|
+
|
|
128
|
+
# Switch to non-root user (required for --dangerously-skip-permissions)
|
|
129
|
+
USER node
|
|
130
|
+
|
|
131
|
+
# Default command (overridden by vibe)
|
|
132
|
+
CMD ["bash"]
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
const omelette = require('omelette');
|
|
2
|
+
const fs = require('fs');
|
|
3
|
+
const path = require('path');
|
|
4
|
+
|
|
5
|
+
function setupCompletion() {
|
|
6
|
+
const complete = omelette('zeroshot');
|
|
7
|
+
|
|
8
|
+
complete.on('start', ({ reply }) => {
|
|
9
|
+
reply(['--issue', '--text', '--config']);
|
|
10
|
+
});
|
|
11
|
+
|
|
12
|
+
complete.on('list', ({ reply }) => {
|
|
13
|
+
reply([]);
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
complete.on('status', ({ reply }) => {
|
|
17
|
+
// Complete with cluster IDs
|
|
18
|
+
try {
|
|
19
|
+
const clustersDir = path.join(process.env.HOME, '.zeroshot', 'clusters');
|
|
20
|
+
if (fs.existsSync(clustersDir)) {
|
|
21
|
+
const clusterIds = fs
|
|
22
|
+
.readdirSync(clustersDir)
|
|
23
|
+
.filter((f) => f.endsWith('.json'))
|
|
24
|
+
.map((f) => f.replace('.json', ''));
|
|
25
|
+
reply(clusterIds);
|
|
26
|
+
} else {
|
|
27
|
+
reply([]);
|
|
28
|
+
}
|
|
29
|
+
} catch {
|
|
30
|
+
reply([]);
|
|
31
|
+
}
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
complete.on('logs', ({ reply }) => {
|
|
35
|
+
// Complete with cluster IDs or flags
|
|
36
|
+
try {
|
|
37
|
+
const clustersDir = path.join(process.env.HOME, '.zeroshot', 'clusters');
|
|
38
|
+
const completions = ['--follow', '-f', '--limit', '-n'];
|
|
39
|
+
|
|
40
|
+
if (fs.existsSync(clustersDir)) {
|
|
41
|
+
const clusterIds = fs
|
|
42
|
+
.readdirSync(clustersDir)
|
|
43
|
+
.filter((f) => f.endsWith('.json'))
|
|
44
|
+
.map((f) => f.replace('.json', ''));
|
|
45
|
+
reply([...clusterIds, ...completions]);
|
|
46
|
+
} else {
|
|
47
|
+
reply(completions);
|
|
48
|
+
}
|
|
49
|
+
} catch {
|
|
50
|
+
reply(['--follow', '-f', '--limit', '-n']);
|
|
51
|
+
}
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
complete.on('stop', ({ reply }) => {
|
|
55
|
+
// Complete with active cluster IDs
|
|
56
|
+
try {
|
|
57
|
+
const clustersDir = path.join(process.env.HOME, '.zeroshot', 'clusters');
|
|
58
|
+
if (fs.existsSync(clustersDir)) {
|
|
59
|
+
const clusterIds = fs
|
|
60
|
+
.readdirSync(clustersDir)
|
|
61
|
+
.filter((f) => f.endsWith('.json'))
|
|
62
|
+
.map((f) => f.replace('.json', ''));
|
|
63
|
+
reply(clusterIds);
|
|
64
|
+
} else {
|
|
65
|
+
reply([]);
|
|
66
|
+
}
|
|
67
|
+
} catch {
|
|
68
|
+
reply([]);
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
complete.on('kill', ({ reply }) => {
|
|
73
|
+
// Complete with active cluster IDs
|
|
74
|
+
try {
|
|
75
|
+
const clustersDir = path.join(process.env.HOME, '.zeroshot', 'clusters');
|
|
76
|
+
if (fs.existsSync(clustersDir)) {
|
|
77
|
+
const clusterIds = fs
|
|
78
|
+
.readdirSync(clustersDir)
|
|
79
|
+
.filter((f) => f.endsWith('.json'))
|
|
80
|
+
.map((f) => f.replace('.json', ''));
|
|
81
|
+
reply(clusterIds);
|
|
82
|
+
} else {
|
|
83
|
+
reply([]);
|
|
84
|
+
}
|
|
85
|
+
} catch {
|
|
86
|
+
reply([]);
|
|
87
|
+
}
|
|
88
|
+
});
|
|
89
|
+
|
|
90
|
+
complete.on('export', ({ reply }) => {
|
|
91
|
+
// Complete with cluster IDs
|
|
92
|
+
try {
|
|
93
|
+
const clustersDir = path.join(process.env.HOME, '.zeroshot', 'clusters');
|
|
94
|
+
if (fs.existsSync(clustersDir)) {
|
|
95
|
+
const clusterIds = fs
|
|
96
|
+
.readdirSync(clustersDir)
|
|
97
|
+
.filter((f) => f.endsWith('.json'))
|
|
98
|
+
.map((f) => f.replace('.json', ''));
|
|
99
|
+
reply(clusterIds);
|
|
100
|
+
} else {
|
|
101
|
+
reply([]);
|
|
102
|
+
}
|
|
103
|
+
} catch {
|
|
104
|
+
reply([]);
|
|
105
|
+
}
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
complete.on('finish', ({ reply }) => {
|
|
109
|
+
// Complete with cluster IDs and flags
|
|
110
|
+
try {
|
|
111
|
+
const clustersDir = path.join(process.env.HOME, '.zeroshot', 'clusters');
|
|
112
|
+
const completions = ['--merge', '--pr', '--push'];
|
|
113
|
+
|
|
114
|
+
if (fs.existsSync(clustersDir)) {
|
|
115
|
+
const clusterIds = fs
|
|
116
|
+
.readdirSync(clustersDir)
|
|
117
|
+
.filter((f) => f.endsWith('.json'))
|
|
118
|
+
.map((f) => f.replace('.json', ''));
|
|
119
|
+
reply([...clusterIds, ...completions]);
|
|
120
|
+
} else {
|
|
121
|
+
reply(completions);
|
|
122
|
+
}
|
|
123
|
+
} catch {
|
|
124
|
+
reply(['--merge', '--pr', '--push']);
|
|
125
|
+
}
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
complete.on('resume', ({ reply }) => {
|
|
129
|
+
// Complete with cluster IDs
|
|
130
|
+
try {
|
|
131
|
+
const clustersDir = path.join(process.env.HOME, '.zeroshot', 'clusters');
|
|
132
|
+
if (fs.existsSync(clustersDir)) {
|
|
133
|
+
const clusterIds = fs
|
|
134
|
+
.readdirSync(clustersDir)
|
|
135
|
+
.filter((f) => f.endsWith('.json'))
|
|
136
|
+
.map((f) => f.replace('.json', ''));
|
|
137
|
+
reply(clusterIds);
|
|
138
|
+
} else {
|
|
139
|
+
reply([]);
|
|
140
|
+
}
|
|
141
|
+
} catch {
|
|
142
|
+
reply([]);
|
|
143
|
+
}
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
complete.on('ui', ({ reply }) => {
|
|
147
|
+
reply(['--port']);
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
complete.on('watch', ({ reply }) => {
|
|
151
|
+
reply(['--filter', '--refresh-rate', 'running', 'stopped', 'all']);
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
// Default completion - show commands
|
|
155
|
+
complete.on('', ({ reply }) => {
|
|
156
|
+
reply([
|
|
157
|
+
'start',
|
|
158
|
+
'list',
|
|
159
|
+
'status',
|
|
160
|
+
'logs',
|
|
161
|
+
'stop',
|
|
162
|
+
'kill',
|
|
163
|
+
'finish',
|
|
164
|
+
'resume',
|
|
165
|
+
'export',
|
|
166
|
+
'ui',
|
|
167
|
+
'watch',
|
|
168
|
+
]);
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
complete.init();
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
module.exports = { setupCompletion };
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ID Detector - Determines if an ID is a task or cluster
|
|
3
|
+
*
|
|
4
|
+
* Strategy:
|
|
5
|
+
* 1. Check if ID exists in cluster storage
|
|
6
|
+
* 2. If not, check if ID exists in task storage
|
|
7
|
+
* 3. Return type: 'cluster', 'task', or null
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const path = require('path');
|
|
11
|
+
const fs = require('fs');
|
|
12
|
+
const os = require('os');
|
|
13
|
+
|
|
14
|
+
// Storage paths
|
|
15
|
+
const CLUSTER_DIR = path.join(os.homedir(), '.zeroshot');
|
|
16
|
+
const TASK_DIR = path.join(os.homedir(), '.claude-zeroshot');
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Detect if ID is a cluster or task
|
|
20
|
+
* @param {string} id - The ID to check
|
|
21
|
+
* @returns {'cluster'|'task'|null} - Type of ID or null if not found
|
|
22
|
+
*/
|
|
23
|
+
function detectIdType(id) {
|
|
24
|
+
// Check clusters
|
|
25
|
+
const clusterFile = path.join(CLUSTER_DIR, 'clusters.json');
|
|
26
|
+
if (fs.existsSync(clusterFile)) {
|
|
27
|
+
try {
|
|
28
|
+
const clusters = JSON.parse(fs.readFileSync(clusterFile, 'utf8'));
|
|
29
|
+
if (clusters[id]) {
|
|
30
|
+
return 'cluster';
|
|
31
|
+
}
|
|
32
|
+
} catch {
|
|
33
|
+
// Ignore parse errors
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Check tasks
|
|
38
|
+
const taskFile = path.join(TASK_DIR, 'tasks.json');
|
|
39
|
+
if (fs.existsSync(taskFile)) {
|
|
40
|
+
try {
|
|
41
|
+
const tasks = JSON.parse(fs.readFileSync(taskFile, 'utf8'));
|
|
42
|
+
if (tasks[id]) {
|
|
43
|
+
return 'task';
|
|
44
|
+
}
|
|
45
|
+
} catch {
|
|
46
|
+
// Ignore parse errors
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
return null;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
module.exports = { detectIdType };
|
package/lib/settings.js
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Settings management for zeroshot
|
|
3
|
+
* Persistent user preferences stored in ~/.zeroshot/settings.json
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const fs = require('fs');
|
|
7
|
+
const path = require('path');
|
|
8
|
+
const os = require('os');
|
|
9
|
+
|
|
10
|
+
// Settings file path
|
|
11
|
+
const SETTINGS_FILE = path.join(os.homedir(), '.zeroshot', 'settings.json');
|
|
12
|
+
|
|
13
|
+
// Default settings
|
|
14
|
+
const DEFAULT_SETTINGS = {
|
|
15
|
+
defaultModel: 'sonnet',
|
|
16
|
+
defaultConfig: 'conductor-bootstrap',
|
|
17
|
+
defaultIsolation: false,
|
|
18
|
+
strictSchema: false, // false = live streaming (default), true = guaranteed schema compliance (no streaming)
|
|
19
|
+
logLevel: 'normal',
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Load settings from disk, merging with defaults
|
|
24
|
+
*/
|
|
25
|
+
function loadSettings() {
|
|
26
|
+
if (!fs.existsSync(SETTINGS_FILE)) {
|
|
27
|
+
return { ...DEFAULT_SETTINGS };
|
|
28
|
+
}
|
|
29
|
+
try {
|
|
30
|
+
const data = fs.readFileSync(SETTINGS_FILE, 'utf8');
|
|
31
|
+
return { ...DEFAULT_SETTINGS, ...JSON.parse(data) };
|
|
32
|
+
} catch {
|
|
33
|
+
console.error('Warning: Could not load settings, using defaults');
|
|
34
|
+
return { ...DEFAULT_SETTINGS };
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Save settings to disk
|
|
40
|
+
*/
|
|
41
|
+
function saveSettings(settings) {
|
|
42
|
+
const dir = path.dirname(SETTINGS_FILE);
|
|
43
|
+
if (!fs.existsSync(dir)) {
|
|
44
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
45
|
+
}
|
|
46
|
+
fs.writeFileSync(SETTINGS_FILE, JSON.stringify(settings, null, 2), 'utf8');
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Validate a setting value
|
|
51
|
+
* @returns {string|null} Error message if invalid, null if valid
|
|
52
|
+
*/
|
|
53
|
+
function validateSetting(key, value) {
|
|
54
|
+
if (!(key in DEFAULT_SETTINGS)) {
|
|
55
|
+
return `Unknown setting: ${key}`;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
if (key === 'defaultModel' && !['opus', 'sonnet', 'haiku'].includes(value)) {
|
|
59
|
+
return `Invalid model: ${value}. Valid models: opus, sonnet, haiku`;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
if (key === 'logLevel' && !['quiet', 'normal', 'verbose'].includes(value)) {
|
|
63
|
+
return `Invalid log level: ${value}. Valid levels: quiet, normal, verbose`;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
return null;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Coerce value to correct type based on default value type
|
|
71
|
+
*/
|
|
72
|
+
function coerceValue(key, value) {
|
|
73
|
+
const defaultValue = DEFAULT_SETTINGS[key];
|
|
74
|
+
|
|
75
|
+
if (typeof defaultValue === 'boolean') {
|
|
76
|
+
return value === 'true' || value === '1' || value === 'yes' || value === true;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if (typeof defaultValue === 'number') {
|
|
80
|
+
const parsed = parseInt(value);
|
|
81
|
+
if (isNaN(parsed)) {
|
|
82
|
+
throw new Error(`Invalid number: ${value}`);
|
|
83
|
+
}
|
|
84
|
+
return parsed;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
return value;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
module.exports = {
|
|
91
|
+
loadSettings,
|
|
92
|
+
saveSettings,
|
|
93
|
+
validateSetting,
|
|
94
|
+
coerceValue,
|
|
95
|
+
DEFAULT_SETTINGS,
|
|
96
|
+
SETTINGS_FILE,
|
|
97
|
+
};
|