wogiflow 2.11.0 → 2.12.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/docs/explore-agents.md +35 -0
- package/lib/workspace.js +42 -11
- package/package.json +1 -1
- package/scripts/flow-mcp-capabilities.js +617 -0
- package/scripts/hooks/core/manager-boundary-gate.js +388 -0
- package/scripts/hooks/core/task-completed.js +60 -2
- package/scripts/hooks/entry/claude-code/pre-tool-use.js +25 -0
- package/scripts/postinstall.js +15 -4
|
@@ -226,6 +226,41 @@ Return:
|
|
|
226
226
|
|
|
227
227
|
**Blast-radius artifact**: Results are persisted to `.workflow/state/blast-radius-{taskId}.json` for use by downstream gates (context estimator, standards compliance, workspace dispatch).
|
|
228
228
|
|
|
229
|
+
## MCP Capability Injection (Pre-Launch)
|
|
230
|
+
|
|
231
|
+
Before launching agents, inject MCP capability hints so agents can leverage available MCP tools (CC 2.1.101+ — sub-agents inherit MCP tools from parent session).
|
|
232
|
+
|
|
233
|
+
**Step 1**: Check if capabilities are classified:
|
|
234
|
+
```bash
|
|
235
|
+
node scripts/flow-mcp-capabilities.js check-cache
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
**Step 2**: If `cache-miss` AND you have MCP tools available (tools starting with `mcp__` in your tool catalog):
|
|
239
|
+
1. Inspect your available `mcp__*` tools
|
|
240
|
+
2. For each tool, classify it into a capability category using the category definitions:
|
|
241
|
+
```bash
|
|
242
|
+
node scripts/flow-mcp-capabilities.js categories
|
|
243
|
+
```
|
|
244
|
+
3. Cache the classifications:
|
|
245
|
+
```bash
|
|
246
|
+
node scripts/flow-mcp-capabilities.js cache '<json>'
|
|
247
|
+
```
|
|
248
|
+
Format: `{ "server-name": { "tools": [{ "name": "mcp__server__tool", "description": "...", "category": "category-id" }] } }`
|
|
249
|
+
|
|
250
|
+
**Step 3**: For each agent, get its role-specific hint and append to the agent prompt:
|
|
251
|
+
```bash
|
|
252
|
+
node scripts/flow-mcp-capabilities.js hint explore-codebase # Agent 1
|
|
253
|
+
node scripts/flow-mcp-capabilities.js hint explore-practices # Agent 2
|
|
254
|
+
node scripts/flow-mcp-capabilities.js hint explore-versions # Agent 3
|
|
255
|
+
node scripts/flow-mcp-capabilities.js hint explore-risk # Agent 4
|
|
256
|
+
node scripts/flow-mcp-capabilities.js hint explore-standards # Agent 5
|
|
257
|
+
node scripts/flow-mcp-capabilities.js hint explore-impact # Agent 6
|
|
258
|
+
```
|
|
259
|
+
|
|
260
|
+
If the hint is non-empty, append it to the agent's prompt. If empty (no relevant MCP tools for that role), skip — the agent works fine without them.
|
|
261
|
+
|
|
262
|
+
**Skip when**: No MCP servers configured (`node scripts/flow-mcp-capabilities.js discover` returns empty), or `config.mcpCapabilities.enabled` is false.
|
|
263
|
+
|
|
229
264
|
## Launching
|
|
230
265
|
|
|
231
266
|
All agents launch in parallel as `Agent(subagent_type=Explore)` calls in a single message. When `config.hybrid.enabled`, use the `model` parameter on each Agent call to route by task type:
|
package/lib/workspace.js
CHANGED
|
@@ -717,20 +717,25 @@ grep -l '"from": "<repo-name>"' .workspace/messages/*.json 2>/dev/null
|
|
|
717
717
|
4. If no message after 30s, check the worker's \`ready.json\` for task status
|
|
718
718
|
5. Once message arrives, read it and present the results to the user
|
|
719
719
|
|
|
720
|
-
**Message format** (what workers write automatically):
|
|
720
|
+
**Message format** (what workers write automatically via the task-completed hook):
|
|
721
721
|
\`\`\`json
|
|
722
722
|
{
|
|
723
|
-
"id": "msg
|
|
723
|
+
"id": "msg-<taskId>-<timestamp>",
|
|
724
724
|
"from": "<repo-name>",
|
|
725
725
|
"to": "manager",
|
|
726
726
|
"type": "task-complete",
|
|
727
727
|
"subject": "Task completed: <title>",
|
|
728
|
-
"body": "**Task**: ...\\n**Files changed**: ...\\n**
|
|
728
|
+
"body": "**Task**: ...\\n**Files changed**: ...\\n**Verification evidence**: ...",
|
|
729
729
|
"taskId": "wf-XXXXXXXX",
|
|
730
|
-
"status": "pending"
|
|
730
|
+
"status": "pending",
|
|
731
|
+
"verified": true,
|
|
732
|
+
"evidenceTier": "Tier 3 (INTERACTIVE)|Tier 2 (OBSERVATIONAL)|unknown",
|
|
733
|
+
"timestamp": "ISO-8601"
|
|
731
734
|
}
|
|
732
735
|
\`\`\`
|
|
733
736
|
|
|
737
|
+
**Trust model**: Messages with \`"verified": true\` went through WogiFlow's quality gates (gate latch check). Freeform curl messages from workers are progress reports, not verified completions — investigate if a worker reports "done" via curl but no structured task-complete message arrives.
|
|
738
|
+
|
|
734
739
|
**After reading a result**: Present the findings to the user. If the task requires follow-up (e.g., bug investigation found the issue in the other repo), dispatch the fix to the appropriate worker.
|
|
735
740
|
|
|
736
741
|
**You are the SINGLE interface for the user.** They should never need to look at worker terminals. Read the messages, synthesize, and present.
|
|
@@ -1128,17 +1133,21 @@ This is NON-OPTIONAL. Every channel-dispatched task MUST end with a reply to the
|
|
|
1128
1133
|
**Talk to PEERS directly** (do NOT go through the manager) when you need:
|
|
1129
1134
|
- API shape/endpoint details from the other repo ("What does POST /customers return?")
|
|
1130
1135
|
- Data model clarifications ("Does the Invoice entity have a lineItems relation?")
|
|
1136
|
+
- **Test credentials or accounts** ("Do you have E2E login credentials?" / "Can you create a test admin user?")
|
|
1137
|
+
- **Shared resources** ("Can you expose a test API token?" / "What's the connection string for staging?")
|
|
1131
1138
|
- Coordination on shared types or contracts
|
|
1132
1139
|
- Confirmation that the other side is ready for integration
|
|
1133
1140
|
- Any technical question that the other repo can answer from its codebase
|
|
1134
1141
|
|
|
1142
|
+
Peers can not only ANSWER questions — they can CREATE resources you need (test accounts, API tokens, seed data, environment configs).
|
|
1143
|
+
|
|
1135
1144
|
**Talk to the MANAGER** (which means the user) only when:
|
|
1136
1145
|
- You have **results to report** (task completed, investigation findings)
|
|
1137
1146
|
- You need a **user decision** (design choice, product question, approval)
|
|
1138
|
-
- You are **stuck** and no peer can help
|
|
1147
|
+
- You are **stuck** and no peer can help (after trying Steps 1-2 below)
|
|
1139
1148
|
- You need to **escalate** something the user should know about
|
|
1140
1149
|
|
|
1141
|
-
**Rule**: If a peer can answer your question,
|
|
1150
|
+
**Rule**: If a peer can answer your question or create what you need, go to the peer. Do NOT route through the manager — the manager doesn't read source code and will just relay your question anyway. Cut out the middleman.
|
|
1142
1151
|
|
|
1143
1152
|
**Peer channels:**
|
|
1144
1153
|
${peerNames.map(p => `- **${p}**: \`curl -s -X POST http://localhost:${channelMembers[p]?.port || '???'} -H "X-Wogi-From: ${name}" -d "your question"\``).join('\n')}
|
|
@@ -1152,15 +1161,37 @@ You are a workspace worker. There is NO human watching your terminal. You MUST o
|
|
|
1152
1161
|
- **Auto-approve all reviews**: If /wogi-review asks for fix options, choose Option 1 (fix all) automatically.
|
|
1153
1162
|
- **Never ask clarifying questions**: If something is ambiguous, make a reasonable decision and note it in your reply to the manager.
|
|
1154
1163
|
|
|
1155
|
-
###
|
|
1164
|
+
### CRITICAL: Stop, Don't Degrade
|
|
1165
|
+
|
|
1166
|
+
**If you cannot verify your work to the required evidence tier, you may NOT mark the task as complete.** Report it as BLOCKED with the specific verification gap.
|
|
1167
|
+
|
|
1168
|
+
- If browser verification is required but you can't log in → BLOCKED (not done)
|
|
1169
|
+
- If API testing is required but the server is unreachable → BLOCKED (not done)
|
|
1170
|
+
- If you need credentials you don't have → resolve via Steps 1-2 below, then continue. If unresolvable → BLOCKED.
|
|
1171
|
+
|
|
1172
|
+
**Overnight runs STOP when verification is impossible.** Never degrade quality to keep the queue moving. A blocked task with honest status is infinitely better than a "completed" task that doesn't work.
|
|
1173
|
+
|
|
1174
|
+
### When You're Blocked — Resolution Protocol
|
|
1175
|
+
|
|
1176
|
+
**Step 1: Self-resolve** — check \`.workspace/state/\` for credentials, configs, tokens, test accounts, and any other shared resources. Also check \`.workspace/messages/\` for prior conversations where the resource may have been mentioned.
|
|
1177
|
+
|
|
1178
|
+
**Step 2: Ask peers** — peers can CREATE what you need (test accounts, API tokens, seed data). Send a direct request:
|
|
1179
|
+
\`curl -s -X POST http://localhost:{peer_port} -H "X-Wogi-From: ${name}" -d "I need E2E test credentials. Do you have them, or can you create a test admin account?"\`
|
|
1180
|
+
|
|
1181
|
+
**Step 3: ONLY THEN escalate** to the manager, including what you already tried:
|
|
1182
|
+
|
|
1183
|
+
To escalate: \`curl -s -X POST http://localhost:${config.channels.managerPort || (config.channels.basePort - 1)} -H "X-Wogi-From: ${name}" -d "## Need Decision: [problem]
|
|
1184
|
+
Checked .workspace/state/: [what was found]
|
|
1185
|
+
Asked peers: [who, what response]
|
|
1186
|
+
Why this needs the owner: [explanation]"\`
|
|
1187
|
+
|
|
1188
|
+
### When to Escalate (After Steps 1-2)
|
|
1156
1189
|
|
|
1157
1190
|
Only send a question to the manager (instead of results) when:
|
|
1158
|
-
- The task requires a **design decision** that could go multiple ways
|
|
1191
|
+
- The task requires a **design decision** that could go multiple ways
|
|
1159
1192
|
- The task would **break an API contract** that other repos depend on
|
|
1160
1193
|
- The task requires **deleting user data** or making irreversible changes
|
|
1161
|
-
-
|
|
1162
|
-
|
|
1163
|
-
To escalate: \`curl -s -X POST http://localhost:${config.channels.managerPort || (config.channels.basePort - 1)} -H "X-Wogi-From: ${name}" -d "## Need Decision: [describe the choice and options]"\`
|
|
1194
|
+
- Steps 1-2 failed and you are genuinely **stuck**
|
|
1164
1195
|
|
|
1165
1196
|
For everything else — just do the work and report results.
|
|
1166
1197
|
|
package/package.json
CHANGED
|
@@ -0,0 +1,617 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Wogi Flow - MCP Capability Discovery for Sub-Agents
|
|
5
|
+
*
|
|
6
|
+
* Discovers available MCP servers, classifies their tools into generic
|
|
7
|
+
* capability categories, and generates role-specific prompt fragments
|
|
8
|
+
* so sub-agents know what MCP tools they have and when to use them.
|
|
9
|
+
*
|
|
10
|
+
* Design: The script handles discovery, taxonomy, caching, and formatting.
|
|
11
|
+
* The AI orchestrator handles classification (only it can see tool catalogs
|
|
12
|
+
* at runtime). Classifications are cached per session.
|
|
13
|
+
*
|
|
14
|
+
* Source: CC 2.1.101 — sub-agents now inherit MCP tools from parent session,
|
|
15
|
+
* but need awareness of what's available and when to use each tool.
|
|
16
|
+
*
|
|
17
|
+
* Usage:
|
|
18
|
+
* node flow-mcp-capabilities.js check-cache
|
|
19
|
+
* node flow-mcp-capabilities.js categories
|
|
20
|
+
* node flow-mcp-capabilities.js roles
|
|
21
|
+
* node flow-mcp-capabilities.js hint <role>
|
|
22
|
+
* node flow-mcp-capabilities.js cache '<json>'
|
|
23
|
+
* node flow-mcp-capabilities.js clear
|
|
24
|
+
* node flow-mcp-capabilities.js discover
|
|
25
|
+
* node flow-mcp-capabilities.js classify-prompt
|
|
26
|
+
*
|
|
27
|
+
* Programmatic:
|
|
28
|
+
* const { getCapabilityCategories, getRoleCapabilities, generateHint } = require('./flow-mcp-capabilities');
|
|
29
|
+
*/
|
|
30
|
+
|
|
31
|
+
'use strict';
|
|
32
|
+
|
|
33
|
+
const fs = require('node:fs');
|
|
34
|
+
const path = require('node:path');
|
|
35
|
+
const { PATHS, getConfig, safeJsonParse, readJson, writeJson, fileExists } = require('./flow-utils');
|
|
36
|
+
|
|
37
|
+
// Prototype pollution protection — same pattern as flow-plugin-registry.js
|
|
38
|
+
const DANGEROUS_KEYS = new Set(['__proto__', 'constructor', 'prototype']);
|
|
39
|
+
|
|
40
|
+
// ============================================================
|
|
41
|
+
// Constants
|
|
42
|
+
// ============================================================
|
|
43
|
+
|
|
44
|
+
const CACHE_PATH = path.join(PATHS.state, 'mcp-capabilities.json');
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Generic capability categories.
|
|
48
|
+
* Each category is defined by its PURPOSE, not by any specific MCP server.
|
|
49
|
+
* The `keywords` array is used by the AI orchestrator as guidance when
|
|
50
|
+
* classifying MCP tools — tools whose names or descriptions match these
|
|
51
|
+
* keywords likely belong to this category.
|
|
52
|
+
*/
|
|
53
|
+
const DEFAULT_CATEGORIES = {
|
|
54
|
+
'documentation-lookup': {
|
|
55
|
+
description: 'Fetch library, framework, or API documentation',
|
|
56
|
+
keywords: ['docs', 'library', 'resolve', 'reference', 'documentation', 'get-library', 'api-docs', 'man-page'],
|
|
57
|
+
agentGuidance: 'When you need current API docs, migration guides, or framework-specific patterns. Prefer over web search for library documentation — results are more accurate and structured.'
|
|
58
|
+
},
|
|
59
|
+
'browser-interaction': {
|
|
60
|
+
description: 'Navigate web pages, take screenshots, evaluate DOM, interact with UI elements',
|
|
61
|
+
keywords: ['navigate', 'screenshot', 'browser', 'evaluate', 'click', 'page', 'dom', 'tab', 'scroll', 'type'],
|
|
62
|
+
agentGuidance: 'When you need to verify UI behavior, inspect rendered output, or test user interactions in a browser.'
|
|
63
|
+
},
|
|
64
|
+
'design-files': {
|
|
65
|
+
description: 'Read or interact with design tools and design systems',
|
|
66
|
+
keywords: ['figma', 'design', 'component', 'frame', 'style', 'layout', 'variant', 'token', 'sketch'],
|
|
67
|
+
agentGuidance: 'When you need to inspect design specifications, extract design tokens, or verify UI implementations against design files.'
|
|
68
|
+
},
|
|
69
|
+
'code-execution': {
|
|
70
|
+
description: 'Execute or evaluate code in a sandboxed environment',
|
|
71
|
+
keywords: ['execute', 'eval', 'run', 'sandbox', 'repl', 'notebook', 'kernel', 'interpret'],
|
|
72
|
+
agentGuidance: 'When you need to test code snippets, evaluate expressions, or run scripts in an isolated environment.'
|
|
73
|
+
},
|
|
74
|
+
'data-query': {
|
|
75
|
+
description: 'Query databases, data stores, or structured data sources',
|
|
76
|
+
keywords: ['query', 'sql', 'database', 'table', 'schema', 'select', 'collection', 'index', 'record'],
|
|
77
|
+
agentGuidance: 'When you need to inspect database schemas, run queries, or verify data integrity.'
|
|
78
|
+
},
|
|
79
|
+
'communication': {
|
|
80
|
+
description: 'Send messages or notifications to external services',
|
|
81
|
+
keywords: ['send', 'message', 'slack', 'email', 'notify', 'post', 'channel', 'webhook', 'chat'],
|
|
82
|
+
agentGuidance: 'When you need to notify team members, post updates, or send messages to external communication channels.'
|
|
83
|
+
},
|
|
84
|
+
'file-management': {
|
|
85
|
+
description: 'Manage files in external storage or cloud systems',
|
|
86
|
+
keywords: ['upload', 'download', 'storage', 'bucket', 's3', 'blob', 'drive', 'sync', 'transfer'],
|
|
87
|
+
agentGuidance: 'When you need to upload, download, or manage files in cloud storage or external file systems.'
|
|
88
|
+
},
|
|
89
|
+
'code-analysis': {
|
|
90
|
+
description: 'Static analysis, AST inspection, linting, or code intelligence',
|
|
91
|
+
keywords: ['lint', 'ast', 'analyze', 'parse', 'syntax', 'diagnostic', 'symbol', 'definition', 'reference'],
|
|
92
|
+
agentGuidance: 'When you need deeper code analysis beyond grep — AST-level queries, cross-reference lookups, or structured code intelligence.'
|
|
93
|
+
},
|
|
94
|
+
'project-management': {
|
|
95
|
+
description: 'Interact with project management tools (issues, boards, sprints)',
|
|
96
|
+
keywords: ['issue', 'ticket', 'sprint', 'board', 'jira', 'linear', 'project', 'backlog', 'assignee', 'transition'],
|
|
97
|
+
agentGuidance: 'When you need to read or update project management state — issues, sprint boards, or task tracking.'
|
|
98
|
+
},
|
|
99
|
+
'version-control': {
|
|
100
|
+
description: 'Interact with version control platforms beyond local git',
|
|
101
|
+
keywords: ['pull-request', 'pr', 'merge', 'branch', 'commit', 'review', 'diff', 'release', 'tag'],
|
|
102
|
+
agentGuidance: 'When you need to interact with remote version control — PRs, code reviews, or release management.'
|
|
103
|
+
}
|
|
104
|
+
};
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Role-to-capability mapping.
|
|
108
|
+
* Each agent role lists which capability categories would enhance its work.
|
|
109
|
+
* The orchestrator uses this to filter relevant MCP tools for each sub-agent.
|
|
110
|
+
*/
|
|
111
|
+
const DEFAULT_ROLE_CAPABILITIES = {
|
|
112
|
+
'explore-codebase': ['code-analysis', 'documentation-lookup'],
|
|
113
|
+
'explore-practices': ['documentation-lookup'],
|
|
114
|
+
'explore-versions': ['documentation-lookup'],
|
|
115
|
+
'explore-risk': ['code-analysis'],
|
|
116
|
+
'explore-standards': ['code-analysis'],
|
|
117
|
+
'explore-impact': ['code-analysis'],
|
|
118
|
+
'review-code': ['code-analysis', 'browser-interaction'],
|
|
119
|
+
'review-security': ['code-analysis'],
|
|
120
|
+
'review-architecture': ['code-analysis', 'documentation-lookup'],
|
|
121
|
+
'review-performance': ['code-analysis'],
|
|
122
|
+
'verify-ui': ['browser-interaction', 'design-files'],
|
|
123
|
+
'verify-api': ['data-query'],
|
|
124
|
+
'skeptical-evaluator': ['code-analysis', 'browser-interaction'],
|
|
125
|
+
'bug-investigation': ['code-analysis', 'browser-interaction', 'data-query'],
|
|
126
|
+
'onboard-stack': ['documentation-lookup'],
|
|
127
|
+
'general': ['documentation-lookup', 'code-analysis']
|
|
128
|
+
};
|
|
129
|
+
|
|
130
|
+
// ============================================================
|
|
131
|
+
// Configuration
|
|
132
|
+
// ============================================================
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Get MCP capabilities config, merging defaults with user overrides.
|
|
136
|
+
*/
|
|
137
|
+
function getMcpCapabilitiesConfig() {
|
|
138
|
+
const config = getConfig();
|
|
139
|
+
const userConfig = config.mcpCapabilities || {};
|
|
140
|
+
return {
|
|
141
|
+
enabled: userConfig.enabled !== false, // default: true
|
|
142
|
+
categoryOverrides: userConfig.categoryOverrides || {},
|
|
143
|
+
roleOverrides: userConfig.roleOverrides || {}
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
* Get capability categories with user overrides applied.
|
|
149
|
+
*/
|
|
150
|
+
function getCapabilityCategories() {
|
|
151
|
+
const config = getMcpCapabilitiesConfig();
|
|
152
|
+
return { ...DEFAULT_CATEGORIES, ...config.categoryOverrides };
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* Get role-to-capability mapping with user overrides applied.
|
|
157
|
+
*/
|
|
158
|
+
function getRoleCapabilities(role) {
|
|
159
|
+
const config = getMcpCapabilitiesConfig();
|
|
160
|
+
const roles = { ...DEFAULT_ROLE_CAPABILITIES, ...config.roleOverrides };
|
|
161
|
+
return roles[role] || roles['general'] || [];
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Get all role definitions.
|
|
166
|
+
*/
|
|
167
|
+
function getAllRoles() {
|
|
168
|
+
const config = getMcpCapabilitiesConfig();
|
|
169
|
+
return { ...DEFAULT_ROLE_CAPABILITIES, ...config.roleOverrides };
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
// ============================================================
|
|
173
|
+
// MCP Server Discovery
|
|
174
|
+
// ============================================================
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* Discover all configured MCP servers from settings files and .mcp.json.
|
|
178
|
+
* Returns server names only — never includes config (may contain API keys).
|
|
179
|
+
*
|
|
180
|
+
* NOTE: This intentionally duplicates some discovery logic from flow-plugin-registry.js
|
|
181
|
+
* (scanUnregisteredMcpServers). The divergences are deliberate:
|
|
182
|
+
* - This function includes .mcp.json (CC 2.1.50+ canonical location); the registry doesn't
|
|
183
|
+
* - This function includes ~/.claude/settings.json (user-level); the registry is project-only
|
|
184
|
+
* - This function skips the internalPatterns filter (we want ALL servers for capability hints)
|
|
185
|
+
* If these divergences cause issues, extract shared logic into flow-utils.js.
|
|
186
|
+
*
|
|
187
|
+
* @returns {string[]} Array of MCP server names
|
|
188
|
+
*/
|
|
189
|
+
function discoverMcpServers() {
|
|
190
|
+
const servers = new Set();
|
|
191
|
+
|
|
192
|
+
// Check .mcp.json (project-level MCP config, CC 2.1.50+)
|
|
193
|
+
const mcpJsonPath = path.join(PATHS.root, '.mcp.json');
|
|
194
|
+
if (fileExists(mcpJsonPath)) {
|
|
195
|
+
try {
|
|
196
|
+
const mcpJson = safeJsonParse(mcpJsonPath, {});
|
|
197
|
+
const mcpServers = mcpJson.mcpServers || {};
|
|
198
|
+
for (const name of Object.keys(mcpServers)) {
|
|
199
|
+
servers.add(name);
|
|
200
|
+
}
|
|
201
|
+
} catch (_err) { /* silently skip */ }
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Check .claude/settings.local.json and .claude/settings.json
|
|
205
|
+
const settingsLocations = [
|
|
206
|
+
path.join(PATHS.root, '.claude', 'settings.local.json'),
|
|
207
|
+
path.join(PATHS.root, '.claude', 'settings.json')
|
|
208
|
+
];
|
|
209
|
+
|
|
210
|
+
for (const settingsPath of settingsLocations) {
|
|
211
|
+
if (!fileExists(settingsPath)) continue;
|
|
212
|
+
try {
|
|
213
|
+
const settings = safeJsonParse(settingsPath, {});
|
|
214
|
+
const mcpServers = settings.mcpServers || {};
|
|
215
|
+
for (const name of Object.keys(mcpServers)) {
|
|
216
|
+
servers.add(name);
|
|
217
|
+
}
|
|
218
|
+
} catch (_err) { /* silently skip */ }
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Check user-level settings (~/.claude/settings.json)
|
|
222
|
+
const homePath = process.env.HOME || process.env.USERPROFILE;
|
|
223
|
+
if (homePath) {
|
|
224
|
+
const userSettingsPath = path.join(homePath, '.claude', 'settings.json');
|
|
225
|
+
if (fileExists(userSettingsPath)) {
|
|
226
|
+
try {
|
|
227
|
+
const userSettings = safeJsonParse(userSettingsPath, {});
|
|
228
|
+
const mcpServers = userSettings.mcpServers || {};
|
|
229
|
+
for (const name of Object.keys(mcpServers)) {
|
|
230
|
+
servers.add(name);
|
|
231
|
+
}
|
|
232
|
+
} catch (_err) { /* silently skip */ }
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
return [...servers];
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// ============================================================
|
|
240
|
+
// Cache Management
|
|
241
|
+
// ============================================================
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Read cached MCP capability classifications.
|
|
245
|
+
*
|
|
246
|
+
* @returns {{ classifications: Object, cachedAt: string, sessionId: string } | null}
|
|
247
|
+
*/
|
|
248
|
+
function getCachedClassifications() {
|
|
249
|
+
if (!fileExists(CACHE_PATH)) return null;
|
|
250
|
+
|
|
251
|
+
try {
|
|
252
|
+
const cached = readJson(CACHE_PATH, null);
|
|
253
|
+
if (!cached || !cached.classifications) return null;
|
|
254
|
+
return cached;
|
|
255
|
+
} catch (_err) {
|
|
256
|
+
return null;
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
/**
|
|
261
|
+
* Cache MCP capability classifications.
|
|
262
|
+
*
|
|
263
|
+
* Expected input format:
|
|
264
|
+
* {
|
|
265
|
+
* "server-name": {
|
|
266
|
+
* "tools": [
|
|
267
|
+
* { "name": "mcp__server__tool_name", "description": "What it does", "category": "documentation-lookup" }
|
|
268
|
+
* ]
|
|
269
|
+
* }
|
|
270
|
+
* }
|
|
271
|
+
*
|
|
272
|
+
* Validates input for prototype pollution and enforces length limits on tool
|
|
273
|
+
* name/description to prevent prompt injection via cache poisoning.
|
|
274
|
+
*
|
|
275
|
+
* @param {Object} classifications - Server-to-tool classifications
|
|
276
|
+
*/
|
|
277
|
+
function cacheClassifications(classifications) {
|
|
278
|
+
if (typeof classifications !== 'object' || classifications === null || Array.isArray(classifications)) {
|
|
279
|
+
return false;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// Sanitize: reject dangerous keys, enforce length limits on tool fields
|
|
283
|
+
const sanitized = {};
|
|
284
|
+
for (const [serverName, serverData] of Object.entries(classifications)) {
|
|
285
|
+
if (DANGEROUS_KEYS.has(serverName)) continue;
|
|
286
|
+
if (typeof serverData !== 'object' || serverData === null) continue;
|
|
287
|
+
|
|
288
|
+
const tools = Array.isArray(serverData.tools) ? serverData.tools : [];
|
|
289
|
+
sanitized[serverName] = {
|
|
290
|
+
tools: tools.map(tool => ({
|
|
291
|
+
name: String(tool.name || '').slice(0, 120),
|
|
292
|
+
description: String(tool.description || '').slice(0, 200).replace(/`/g, "'"),
|
|
293
|
+
category: String(tool.category || '').slice(0, 50)
|
|
294
|
+
})).filter(t => t.name && t.category)
|
|
295
|
+
};
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
const data = {
|
|
299
|
+
version: 1,
|
|
300
|
+
cachedAt: new Date().toISOString(),
|
|
301
|
+
classifications: sanitized
|
|
302
|
+
};
|
|
303
|
+
|
|
304
|
+
try {
|
|
305
|
+
writeJson(CACHE_PATH, data);
|
|
306
|
+
return true;
|
|
307
|
+
} catch (_err) {
|
|
308
|
+
return false;
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/**
|
|
313
|
+
* Clear the classification cache.
|
|
314
|
+
*/
|
|
315
|
+
function clearCache() {
|
|
316
|
+
try {
|
|
317
|
+
if (fileExists(CACHE_PATH)) {
|
|
318
|
+
fs.unlinkSync(CACHE_PATH);
|
|
319
|
+
}
|
|
320
|
+
return true;
|
|
321
|
+
} catch (_err) {
|
|
322
|
+
return false;
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// ============================================================
|
|
327
|
+
// Prompt Generation
|
|
328
|
+
// ============================================================
|
|
329
|
+
|
|
330
|
+
/**
|
|
331
|
+
* Generate a capability-aware prompt fragment for a specific agent role.
|
|
332
|
+
*
|
|
333
|
+
* @param {string} role - Agent role (e.g., 'explore-codebase', 'review-code')
|
|
334
|
+
* @param {Object} [classifications] - Cached classifications (auto-loaded if omitted)
|
|
335
|
+
* @returns {string} Prompt fragment to append to agent prompt, or empty string if no relevant capabilities
|
|
336
|
+
*/
|
|
337
|
+
function generateHint(role, classifications) {
|
|
338
|
+
const config = getMcpCapabilitiesConfig();
|
|
339
|
+
if (!config.enabled) return '';
|
|
340
|
+
|
|
341
|
+
const cached = classifications || getCachedClassifications();
|
|
342
|
+
if (!cached) return '';
|
|
343
|
+
|
|
344
|
+
const classificationData = cached.classifications || cached;
|
|
345
|
+
const neededCapabilities = getRoleCapabilities(role);
|
|
346
|
+
if (!neededCapabilities || neededCapabilities.length === 0) return '';
|
|
347
|
+
|
|
348
|
+
const categories = getCapabilityCategories();
|
|
349
|
+
const neededSet = new Set(neededCapabilities);
|
|
350
|
+
|
|
351
|
+
// Collect tools grouped by capability category
|
|
352
|
+
const toolsByCategory = {};
|
|
353
|
+
|
|
354
|
+
for (const [_serverName, serverData] of Object.entries(classificationData)) {
|
|
355
|
+
const tools = serverData.tools || [];
|
|
356
|
+
for (const tool of tools) {
|
|
357
|
+
if (!tool.category || !neededSet.has(tool.category)) continue;
|
|
358
|
+
|
|
359
|
+
if (!toolsByCategory[tool.category]) {
|
|
360
|
+
toolsByCategory[tool.category] = [];
|
|
361
|
+
}
|
|
362
|
+
toolsByCategory[tool.category].push(tool);
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
// No relevant tools found
|
|
367
|
+
if (Object.keys(toolsByCategory).length === 0) return '';
|
|
368
|
+
|
|
369
|
+
// Build the prompt fragment
|
|
370
|
+
const lines = [
|
|
371
|
+
'',
|
|
372
|
+
'## Available MCP Capabilities',
|
|
373
|
+
'',
|
|
374
|
+
'You have access to specialized MCP tools beyond the standard toolset. Use them when they help accomplish your task more effectively.',
|
|
375
|
+
''
|
|
376
|
+
];
|
|
377
|
+
|
|
378
|
+
for (const [category, tools] of Object.entries(toolsByCategory)) {
|
|
379
|
+
const categoryDef = categories[category];
|
|
380
|
+
const categoryTitle = category.split('-').map(w => w.charAt(0).toUpperCase() + w.slice(1)).join(' ');
|
|
381
|
+
|
|
382
|
+
lines.push(`### ${categoryTitle}`);
|
|
383
|
+
for (const tool of tools) {
|
|
384
|
+
lines.push(`- \`${tool.name}\` — ${tool.description}`);
|
|
385
|
+
}
|
|
386
|
+
if (categoryDef?.agentGuidance) {
|
|
387
|
+
lines.push(`**When to use**: ${categoryDef.agentGuidance}`);
|
|
388
|
+
}
|
|
389
|
+
lines.push('');
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
return lines.join('\n');
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
/**
|
|
396
|
+
* Generate the classification prompt for the orchestrator AI.
|
|
397
|
+
* This prompt tells the orchestrator how to classify its available MCP tools.
|
|
398
|
+
*
|
|
399
|
+
* @returns {string} Instructions for the orchestrator to classify MCP tools
|
|
400
|
+
*/
|
|
401
|
+
function generateClassificationPrompt() {
|
|
402
|
+
const categories = getCapabilityCategories();
|
|
403
|
+
const servers = discoverMcpServers();
|
|
404
|
+
|
|
405
|
+
if (servers.length === 0) {
|
|
406
|
+
return '';
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
const categoryList = Object.entries(categories)
|
|
410
|
+
.map(([name, def]) => ` - \`${name}\`: ${def.description} (keywords: ${def.keywords.join(', ')})`)
|
|
411
|
+
.join('\n');
|
|
412
|
+
|
|
413
|
+
return `## MCP Capability Classification Required
|
|
414
|
+
|
|
415
|
+
${servers.length} MCP server(s) detected: ${servers.join(', ')}
|
|
416
|
+
|
|
417
|
+
You have MCP tools available in your tool catalog (they appear as \`mcp__<server>__<tool>\` in your available tools). Classify each one into capability categories so sub-agents know what's available.
|
|
418
|
+
|
|
419
|
+
### Capability Categories
|
|
420
|
+
${categoryList}
|
|
421
|
+
|
|
422
|
+
### Instructions
|
|
423
|
+
1. Inspect your available tools — look for any starting with \`mcp__\`
|
|
424
|
+
2. For each MCP tool, determine which category best fits based on its name and what it does
|
|
425
|
+
3. If a tool doesn't fit any category, skip it (don't force-classify)
|
|
426
|
+
4. Cache the results by running:
|
|
427
|
+
|
|
428
|
+
\`\`\`bash
|
|
429
|
+
node scripts/flow-mcp-capabilities.js cache '<json>'
|
|
430
|
+
\`\`\`
|
|
431
|
+
|
|
432
|
+
Where \`<json>\` follows this format:
|
|
433
|
+
\`\`\`json
|
|
434
|
+
{
|
|
435
|
+
"<server-name>": {
|
|
436
|
+
"tools": [
|
|
437
|
+
{ "name": "mcp__server__tool_name", "description": "Brief description", "category": "<category-id>" }
|
|
438
|
+
]
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
\`\`\`
|
|
442
|
+
|
|
443
|
+
Only include tools that match a category. Skip internal/utility tools that aren't useful for sub-agents.`;
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
// ============================================================
|
|
447
|
+
// Exports
|
|
448
|
+
// ============================================================
|
|
449
|
+
|
|
450
|
+
module.exports = {
|
|
451
|
+
// Configuration
|
|
452
|
+
getMcpCapabilitiesConfig,
|
|
453
|
+
getCapabilityCategories,
|
|
454
|
+
getRoleCapabilities,
|
|
455
|
+
getAllRoles,
|
|
456
|
+
|
|
457
|
+
// Discovery
|
|
458
|
+
discoverMcpServers,
|
|
459
|
+
|
|
460
|
+
// Cache
|
|
461
|
+
getCachedClassifications,
|
|
462
|
+
cacheClassifications,
|
|
463
|
+
clearCache,
|
|
464
|
+
CACHE_PATH,
|
|
465
|
+
|
|
466
|
+
// Prompt generation
|
|
467
|
+
generateHint,
|
|
468
|
+
generateClassificationPrompt
|
|
469
|
+
};
|
|
470
|
+
|
|
471
|
+
// ============================================================
|
|
472
|
+
// CLI Interface
|
|
473
|
+
// ============================================================
|
|
474
|
+
|
|
475
|
+
if (require.main === module) {
|
|
476
|
+
const args = process.argv.slice(2);
|
|
477
|
+
const command = args[0];
|
|
478
|
+
|
|
479
|
+
switch (command) {
|
|
480
|
+
case 'check-cache': {
|
|
481
|
+
const cached = getCachedClassifications();
|
|
482
|
+
if (cached) {
|
|
483
|
+
const serverCount = Object.keys(cached.classifications || {}).length;
|
|
484
|
+
const toolCount = Object.values(cached.classifications || {})
|
|
485
|
+
.reduce((sum, s) => sum + (s.tools?.length || 0), 0);
|
|
486
|
+
console.log(JSON.stringify({
|
|
487
|
+
status: 'cache-hit',
|
|
488
|
+
cachedAt: cached.cachedAt,
|
|
489
|
+
servers: serverCount,
|
|
490
|
+
tools: toolCount
|
|
491
|
+
}));
|
|
492
|
+
} else {
|
|
493
|
+
console.log(JSON.stringify({ status: 'cache-miss' }));
|
|
494
|
+
}
|
|
495
|
+
break;
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
case 'categories': {
|
|
499
|
+
const categories = getCapabilityCategories();
|
|
500
|
+
console.log('\nCapability Categories:\n');
|
|
501
|
+
for (const [name, def] of Object.entries(categories)) {
|
|
502
|
+
console.log(` ${name}`);
|
|
503
|
+
console.log(` ${def.description}`);
|
|
504
|
+
console.log(` Keywords: ${def.keywords.join(', ')}`);
|
|
505
|
+
console.log('');
|
|
506
|
+
}
|
|
507
|
+
break;
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
case 'roles': {
|
|
511
|
+
const roles = getAllRoles();
|
|
512
|
+
console.log('\nRole-to-Capability Mapping:\n');
|
|
513
|
+
for (const [role, capabilities] of Object.entries(roles)) {
|
|
514
|
+
console.log(` ${role}: ${capabilities.join(', ')}`);
|
|
515
|
+
}
|
|
516
|
+
break;
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
case 'hint': {
|
|
520
|
+
const role = args[1];
|
|
521
|
+
if (!role) {
|
|
522
|
+
console.error('Usage: flow-mcp-capabilities.js hint <role>');
|
|
523
|
+
process.exit(1);
|
|
524
|
+
}
|
|
525
|
+
const hint = generateHint(role);
|
|
526
|
+
if (hint) {
|
|
527
|
+
console.log(hint);
|
|
528
|
+
} else {
|
|
529
|
+
console.log('');
|
|
530
|
+
}
|
|
531
|
+
break;
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
case 'cache': {
|
|
535
|
+
const jsonStr = args[1];
|
|
536
|
+
if (!jsonStr) {
|
|
537
|
+
console.error('Usage: flow-mcp-capabilities.js cache \'<json>\'');
|
|
538
|
+
process.exit(1);
|
|
539
|
+
}
|
|
540
|
+
try {
|
|
541
|
+
const data = JSON.parse(jsonStr);
|
|
542
|
+
if (typeof data !== 'object' || data === null || Array.isArray(data)) {
|
|
543
|
+
console.error('Invalid input: expected a JSON object');
|
|
544
|
+
process.exit(1);
|
|
545
|
+
}
|
|
546
|
+
// cacheClassifications handles sanitization (dangerous keys, length limits)
|
|
547
|
+
const success = cacheClassifications(data);
|
|
548
|
+
if (success) {
|
|
549
|
+
const serverCount = Object.keys(data).length;
|
|
550
|
+
const toolCount = Object.values(data).reduce((sum, s) => sum + (s.tools?.length || 0), 0);
|
|
551
|
+
console.log(JSON.stringify({ status: 'cached', servers: serverCount, tools: toolCount }));
|
|
552
|
+
} else {
|
|
553
|
+
console.error('Failed to write cache');
|
|
554
|
+
process.exit(1);
|
|
555
|
+
}
|
|
556
|
+
} catch (err) {
|
|
557
|
+
console.error(`Invalid JSON: ${err.message}`);
|
|
558
|
+
process.exit(1);
|
|
559
|
+
}
|
|
560
|
+
break;
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
case 'clear': {
|
|
564
|
+
clearCache();
|
|
565
|
+
console.log('Cache cleared');
|
|
566
|
+
break;
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
case 'discover': {
|
|
570
|
+
const servers = discoverMcpServers();
|
|
571
|
+
if (servers.length === 0) {
|
|
572
|
+
console.log('No MCP servers found');
|
|
573
|
+
} else {
|
|
574
|
+
console.log(`\nDiscovered ${servers.length} MCP server(s):\n`);
|
|
575
|
+
for (const name of servers) {
|
|
576
|
+
console.log(` - ${name}`);
|
|
577
|
+
}
|
|
578
|
+
}
|
|
579
|
+
break;
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
case 'classify-prompt': {
|
|
583
|
+
const prompt = generateClassificationPrompt();
|
|
584
|
+
if (prompt) {
|
|
585
|
+
console.log(prompt);
|
|
586
|
+
} else {
|
|
587
|
+
console.log('No MCP servers detected — classification not needed.');
|
|
588
|
+
}
|
|
589
|
+
break;
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
default: {
|
|
593
|
+
console.log(`
|
|
594
|
+
Wogi Flow - MCP Capability Discovery
|
|
595
|
+
|
|
596
|
+
Usage:
|
|
597
|
+
node flow-mcp-capabilities.js <command> [args]
|
|
598
|
+
|
|
599
|
+
Commands:
|
|
600
|
+
check-cache Check if classification cache exists (JSON output)
|
|
601
|
+
categories List all capability categories
|
|
602
|
+
roles List all role-to-capability mappings
|
|
603
|
+
hint <role> Generate capability hint for a specific agent role
|
|
604
|
+
cache '<json>' Cache tool classifications (JSON input)
|
|
605
|
+
clear Clear the classification cache
|
|
606
|
+
discover List all discovered MCP servers
|
|
607
|
+
classify-prompt Generate classification instructions for the orchestrator
|
|
608
|
+
|
|
609
|
+
Examples:
|
|
610
|
+
node flow-mcp-capabilities.js check-cache
|
|
611
|
+
node flow-mcp-capabilities.js hint explore-codebase
|
|
612
|
+
node flow-mcp-capabilities.js discover
|
|
613
|
+
node flow-mcp-capabilities.js classify-prompt
|
|
614
|
+
`);
|
|
615
|
+
}
|
|
616
|
+
}
|
|
617
|
+
}
|
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Wogi Flow - Manager Role Boundary Gate
|
|
5
|
+
*
|
|
6
|
+
* Mechanically enforces the manager's role boundaries in workspace mode.
|
|
7
|
+
* The manager is an orchestrator — it dispatches work to workers via channels.
|
|
8
|
+
* It must NOT directly modify files in member repos or run commands there.
|
|
9
|
+
*
|
|
10
|
+
* Activation: WOGI_REPO_NAME === 'manager'
|
|
11
|
+
*
|
|
12
|
+
* Rules:
|
|
13
|
+
* - Edit/Write: BLOCKED on any file inside a member repo
|
|
14
|
+
* - Read/Glob/Grep: ALLOWED for .workflow/state/, package.json; BLOCKED for source code
|
|
15
|
+
* - Bash: If command contains a member repo path, must match a read-only allowlist
|
|
16
|
+
* Otherwise BLOCKED with a dispatch redirect message
|
|
17
|
+
*
|
|
18
|
+
* Design: Allowlist-based (not blocklist). New tools/commands are blocked by default.
|
|
19
|
+
* Only explicitly whitelisted read patterns are allowed in member repos.
|
|
20
|
+
*
|
|
21
|
+
* Source: Workspace manager repeatedly violated role boundaries despite prompt rules
|
|
22
|
+
* (cd into worker repos, npm install, bridge sync). Prompt-only enforcement failed
|
|
23
|
+
* 3 times — mechanical gate required.
|
|
24
|
+
*/
|
|
25
|
+
|
|
26
|
+
'use strict';
|
|
27
|
+
|
|
28
|
+
const path = require('node:path');
|
|
29
|
+
const fs = require('node:fs');
|
|
30
|
+
|
|
31
|
+
// ============================================================
|
|
32
|
+
// Member Path Resolution
|
|
33
|
+
// ============================================================
|
|
34
|
+
|
|
35
|
+
let _cachedMemberPaths = null;
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Load and cache resolved member repo paths from workspace manifest.
|
|
39
|
+
* Returns an array of { name, resolvedPath } objects.
|
|
40
|
+
*
|
|
41
|
+
* @returns {Array<{ name: string, resolvedPath: string }>}
|
|
42
|
+
*/
|
|
43
|
+
function getMemberPaths() {
|
|
44
|
+
if (_cachedMemberPaths) return _cachedMemberPaths;
|
|
45
|
+
|
|
46
|
+
const workspaceRoot = process.env.WOGI_WORKSPACE_ROOT;
|
|
47
|
+
if (!workspaceRoot || !path.isAbsolute(workspaceRoot)) return [];
|
|
48
|
+
|
|
49
|
+
const manifestPath = path.join(workspaceRoot, '.workspace', 'state', 'workspace-manifest.json');
|
|
50
|
+
if (!fs.existsSync(manifestPath)) return [];
|
|
51
|
+
|
|
52
|
+
try {
|
|
53
|
+
const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
54
|
+
const members = manifest.members || {};
|
|
55
|
+
const paths = [];
|
|
56
|
+
|
|
57
|
+
for (const [name, member] of Object.entries(members)) {
|
|
58
|
+
if (typeof name !== 'string' || !member) continue;
|
|
59
|
+
const memberPath = member.path || member.root;
|
|
60
|
+
if (typeof memberPath !== 'string') continue;
|
|
61
|
+
|
|
62
|
+
// Store both the original (normalized) path AND the symlink-resolved path.
|
|
63
|
+
// On macOS, /tmp → /private/tmp, so commands may contain either form.
|
|
64
|
+
const normalized = path.resolve(workspaceRoot, memberPath);
|
|
65
|
+
let resolved = normalized;
|
|
66
|
+
try {
|
|
67
|
+
resolved = fs.realpathSync(normalized);
|
|
68
|
+
} catch (_err) {
|
|
69
|
+
// Path doesn't exist — use normalized only
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// allPaths: deduplicated list of paths to match against
|
|
73
|
+
const allPaths = [resolved];
|
|
74
|
+
if (normalized !== resolved) allPaths.push(normalized);
|
|
75
|
+
|
|
76
|
+
paths.push({ name, resolvedPath: resolved, allPaths, port: member.port || member.channelPort });
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
_cachedMemberPaths = paths;
|
|
80
|
+
return paths;
|
|
81
|
+
} catch (_err) {
|
|
82
|
+
return [];
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Check if a path is inside any member repo.
|
|
88
|
+
* Returns the member name if found, null otherwise.
|
|
89
|
+
*
|
|
90
|
+
* @param {string} targetPath - Absolute path to check
|
|
91
|
+
* @returns {{ name: string, resolvedPath: string } | null}
|
|
92
|
+
*/
|
|
93
|
+
function findMemberForPath(targetPath) {
|
|
94
|
+
if (!targetPath || !path.isAbsolute(targetPath)) return null;
|
|
95
|
+
|
|
96
|
+
const members = getMemberPaths();
|
|
97
|
+
|
|
98
|
+
// Try both normalized and symlink-resolved versions of the target path.
|
|
99
|
+
// On macOS, /tmp → /private/tmp, so we need to check both forms.
|
|
100
|
+
const normalized = path.resolve(targetPath);
|
|
101
|
+
let resolved = normalized;
|
|
102
|
+
try {
|
|
103
|
+
// Walk up to find the nearest existing ancestor, resolve from there
|
|
104
|
+
let check = normalized;
|
|
105
|
+
while (check !== path.dirname(check)) {
|
|
106
|
+
if (fs.existsSync(check)) {
|
|
107
|
+
resolved = fs.realpathSync(check) + normalized.slice(check.length);
|
|
108
|
+
break;
|
|
109
|
+
}
|
|
110
|
+
check = path.dirname(check);
|
|
111
|
+
}
|
|
112
|
+
} catch (_err) {
|
|
113
|
+
// Can't resolve — use normalized
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
const candidates = [resolved, normalized];
|
|
117
|
+
|
|
118
|
+
for (const member of members) {
|
|
119
|
+
const memberPaths = member.allPaths || [member.resolvedPath];
|
|
120
|
+
for (const candidate of candidates) {
|
|
121
|
+
for (const mp of memberPaths) {
|
|
122
|
+
if (candidate === mp || candidate.startsWith(mp + path.sep)) {
|
|
123
|
+
return member;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
return null;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* Get the channel port for a member repo (for dispatch redirect messages).
|
|
134
|
+
*
|
|
135
|
+
* @param {string} memberName
|
|
136
|
+
* @returns {number|null}
|
|
137
|
+
*/
|
|
138
|
+
function getMemberPort(memberName) {
|
|
139
|
+
const workspaceRoot = process.env.WOGI_WORKSPACE_ROOT;
|
|
140
|
+
if (!workspaceRoot) return null;
|
|
141
|
+
|
|
142
|
+
try {
|
|
143
|
+
const manifestPath = path.join(workspaceRoot, '.workspace', 'state', 'workspace-manifest.json');
|
|
144
|
+
const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
145
|
+
const member = manifest.members?.[memberName];
|
|
146
|
+
return member?.port ?? member?.channelPort ?? null;
|
|
147
|
+
} catch (_err) {
|
|
148
|
+
return null;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// ============================================================
|
|
153
|
+
// Read-Only Allowlist for Bash Commands
|
|
154
|
+
// ============================================================
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* Patterns that are allowed when a Bash command references a member repo path.
|
|
158
|
+
* If the command matches ANY of these patterns, it's a permitted read operation.
|
|
159
|
+
* Everything else is blocked by default (allowlist, not blocklist).
|
|
160
|
+
*/
|
|
161
|
+
const ALLOWED_BASH_PATTERNS = [
|
|
162
|
+
// Reading workflow state files
|
|
163
|
+
/\bcat\s+.*\.workflow\b/,
|
|
164
|
+
/\bls\s+.*\.workflow\b/,
|
|
165
|
+
/\bhead\s+.*\.workflow\b/,
|
|
166
|
+
/\btail\s+.*\.workflow\b/,
|
|
167
|
+
/\bwc\s+.*\.workflow\b/,
|
|
168
|
+
|
|
169
|
+
// Reading package.json
|
|
170
|
+
/\bcat\s+.*package\.json\b/,
|
|
171
|
+
|
|
172
|
+
// Git read-only operations (with -C for cross-directory)
|
|
173
|
+
/\bgit\s+(-C\s+\S+\s+)?log\b/,
|
|
174
|
+
/\bgit\s+(-C\s+\S+\s+)?status\b/,
|
|
175
|
+
/\bgit\s+(-C\s+\S+\s+)?diff\b/,
|
|
176
|
+
/\bgit\s+(-C\s+\S+\s+)?show\b/,
|
|
177
|
+
/\bgit\s+(-C\s+\S+\s+)?blame\b/,
|
|
178
|
+
/\bgit\s+(-C\s+\S+\s+)?rev-parse\b/,
|
|
179
|
+
/\bgit\s+(-C\s+\S+\s+)?branch\b/,
|
|
180
|
+
/\bgit\s+(-C\s+\S+\s+)?tag\s+-l\b/,
|
|
181
|
+
/\bgit\s+(-C\s+\S+\s+)?ls-files\b/,
|
|
182
|
+
/\bgit\s+(-C\s+\S+\s+)?describe\b/,
|
|
183
|
+
/\bgit\s+(-C\s+\S+\s+)?remote\s+-v\b/,
|
|
184
|
+
|
|
185
|
+
// Grep/find for reading
|
|
186
|
+
/\bgrep\s+/,
|
|
187
|
+
/\bfind\s+.*-name\b/,
|
|
188
|
+
|
|
189
|
+
// Curl is always allowed (it's the dispatch mechanism)
|
|
190
|
+
/\bcurl\s+/,
|
|
191
|
+
|
|
192
|
+
// Health checks
|
|
193
|
+
/\bwget\s+/,
|
|
194
|
+
];
|
|
195
|
+
|
|
196
|
+
/**
|
|
197
|
+
* Check if a Bash command that references a member repo matches the read-only allowlist.
|
|
198
|
+
*
|
|
199
|
+
* @param {string} command - The Bash command string
|
|
200
|
+
* @returns {boolean} True if the command is an allowed read-only operation
|
|
201
|
+
*/
|
|
202
|
+
function isAllowedReadCommand(command) {
|
|
203
|
+
const trimmed = command.trim();
|
|
204
|
+
return ALLOWED_BASH_PATTERNS.some(pattern => pattern.test(trimmed));
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// ============================================================
|
|
208
|
+
// Gate Logic
|
|
209
|
+
// ============================================================
|
|
210
|
+
|
|
211
|
+
/**
|
|
212
|
+
* Check if a tool call violates manager role boundaries.
|
|
213
|
+
*
|
|
214
|
+
* @param {string} toolName - Tool being called (Bash, Edit, Write, Read, etc.)
|
|
215
|
+
* @param {Object} toolInput - Tool input parameters
|
|
216
|
+
* @returns {{ blocked: boolean, message?: string, reason?: string }}
|
|
217
|
+
*/
|
|
218
|
+
function checkManagerBoundary(toolName, toolInput) {
|
|
219
|
+
// Only active in workspace manager mode
|
|
220
|
+
if (process.env.WOGI_REPO_NAME !== 'manager') {
|
|
221
|
+
return { blocked: false };
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
const members = getMemberPaths();
|
|
225
|
+
if (members.length === 0) {
|
|
226
|
+
// No manifest or no members — can't enforce, fail open
|
|
227
|
+
return { blocked: false };
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// ── Edit / Write: check file_path ────────────────────────
|
|
231
|
+
if (toolName === 'Edit' || toolName === 'Write') {
|
|
232
|
+
const filePath = toolInput.file_path;
|
|
233
|
+
if (!filePath) return { blocked: false };
|
|
234
|
+
|
|
235
|
+
const member = findMemberForPath(filePath);
|
|
236
|
+
if (member) {
|
|
237
|
+
const port = member.port || getMemberPort(member.name);
|
|
238
|
+
const portHint = port ? ` (port ${port})` : '';
|
|
239
|
+
return {
|
|
240
|
+
blocked: true,
|
|
241
|
+
reason: 'manager-boundary-write',
|
|
242
|
+
message: [
|
|
243
|
+
`MANAGER BOUNDARY: Cannot modify files in worker repo "${member.name}" directly.`,
|
|
244
|
+
`Blocked: ${toolName} on ${path.basename(filePath)}`,
|
|
245
|
+
'',
|
|
246
|
+
`Dispatch to the worker instead:`,
|
|
247
|
+
` curl -s -X POST http://localhost:${port || '{port}'} -H "X-Wogi-From: manager" -d "<describe what needs to change>"`,
|
|
248
|
+
'',
|
|
249
|
+
`You are an orchestrator — workers make changes, you coordinate.${portHint}`
|
|
250
|
+
].join('\n')
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
return { blocked: false };
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
// ── Read / Glob / Grep: allow .workflow/state/ and package.json ──
|
|
257
|
+
if (toolName === 'Read' || toolName === 'Glob' || toolName === 'Grep') {
|
|
258
|
+
const targetPath = toolInput.file_path || toolInput.path;
|
|
259
|
+
if (!targetPath) return { blocked: false };
|
|
260
|
+
|
|
261
|
+
const member = findMemberForPath(targetPath);
|
|
262
|
+
if (member) {
|
|
263
|
+
// Compute relative path using the same form that matched.
|
|
264
|
+
// Try each member path form to find one that produces a clean relative.
|
|
265
|
+
const resolved = path.resolve(targetPath);
|
|
266
|
+
const memberPaths = member.allPaths || [member.resolvedPath];
|
|
267
|
+
let relative = path.relative(member.resolvedPath, resolved);
|
|
268
|
+
for (const mp of memberPaths) {
|
|
269
|
+
const rel = path.relative(mp, resolved);
|
|
270
|
+
if (!rel.startsWith('..')) { relative = rel; break; }
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
// Allowed paths: .workflow/, .workspace/, package.json, tsconfig.json
|
|
274
|
+
const allowedPrefixes = ['.workflow', '.workspace', '.claude'];
|
|
275
|
+
const allowedFiles = ['package.json', 'tsconfig.json', '.env.example'];
|
|
276
|
+
const baseName = path.basename(resolved);
|
|
277
|
+
|
|
278
|
+
const isAllowed = allowedPrefixes.some(prefix => relative.startsWith(prefix)) ||
|
|
279
|
+
allowedFiles.includes(baseName);
|
|
280
|
+
|
|
281
|
+
if (!isAllowed) {
|
|
282
|
+
return {
|
|
283
|
+
blocked: true,
|
|
284
|
+
reason: 'manager-boundary-read',
|
|
285
|
+
message: [
|
|
286
|
+
`MANAGER BOUNDARY: Cannot read source code in worker repo "${member.name}".`,
|
|
287
|
+
`Blocked: ${toolName} on ${relative}`,
|
|
288
|
+
'',
|
|
289
|
+
`You may read: .workflow/state/*, package.json, .claude/ (state files)`,
|
|
290
|
+
`For source code investigation, dispatch to the worker.`
|
|
291
|
+
].join('\n')
|
|
292
|
+
};
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
return { blocked: false };
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
// ── Bash: check for member repo paths in the command ──────
|
|
299
|
+
if (toolName === 'Bash') {
|
|
300
|
+
const command = toolInput.command;
|
|
301
|
+
if (!command) return { blocked: false };
|
|
302
|
+
|
|
303
|
+
// Find if any member repo path appears in the command (check all path forms)
|
|
304
|
+
for (const member of members) {
|
|
305
|
+
const memberPaths = member.allPaths || [member.resolvedPath];
|
|
306
|
+
const matchedPath = memberPaths.find(mp => command.includes(mp));
|
|
307
|
+
if (matchedPath) {
|
|
308
|
+
// Member path found in command — check allowlist
|
|
309
|
+
if (isAllowedReadCommand(command)) {
|
|
310
|
+
return { blocked: false };
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
const port = member.port || getMemberPort(member.name);
|
|
314
|
+
return {
|
|
315
|
+
blocked: true,
|
|
316
|
+
reason: 'manager-boundary-bash',
|
|
317
|
+
message: [
|
|
318
|
+
`MANAGER BOUNDARY: Cannot run commands in worker repo "${member.name}".`,
|
|
319
|
+
`Blocked: ${command.length > 100 ? command.slice(0, 100) + '...' : command}`,
|
|
320
|
+
'',
|
|
321
|
+
`Dispatch to the worker instead:`,
|
|
322
|
+
` curl -s -X POST http://localhost:${port || '{port}'} -H "X-Wogi-From: manager" -d "<your command>"`,
|
|
323
|
+
'',
|
|
324
|
+
`Allowed in member repos: read .workflow/state/, git log/status/diff, curl to ports.`,
|
|
325
|
+
`Everything else must be dispatched to the worker.`
|
|
326
|
+
].join('\n')
|
|
327
|
+
};
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// Check for cd into member repos (handles cd with various chaining operators)
|
|
332
|
+
const cdPattern = /\bcd\s+["']?([^\s"';&|]+)/g;
|
|
333
|
+
let match;
|
|
334
|
+
while ((match = cdPattern.exec(command)) !== null) {
|
|
335
|
+
const cdTarget = match[1];
|
|
336
|
+
// Try to resolve the cd target (absolute paths and simple relative paths)
|
|
337
|
+
let resolvedCd;
|
|
338
|
+
try {
|
|
339
|
+
resolvedCd = path.isAbsolute(cdTarget)
|
|
340
|
+
? path.resolve(cdTarget)
|
|
341
|
+
: path.resolve(process.cwd(), cdTarget);
|
|
342
|
+
} catch (_err) {
|
|
343
|
+
continue;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
const member = findMemberForPath(resolvedCd);
|
|
347
|
+
if (member) {
|
|
348
|
+
const port = member.port || getMemberPort(member.name);
|
|
349
|
+
return {
|
|
350
|
+
blocked: true,
|
|
351
|
+
reason: 'manager-boundary-cd',
|
|
352
|
+
message: [
|
|
353
|
+
`MANAGER BOUNDARY: Cannot cd into worker repo "${member.name}".`,
|
|
354
|
+
'',
|
|
355
|
+
`Dispatch to the worker instead:`,
|
|
356
|
+
` curl -s -X POST http://localhost:${port || '{port}'} -H "X-Wogi-From: manager" -d "<your command>"`,
|
|
357
|
+
'',
|
|
358
|
+
`The manager stays in the workspace root. Workers execute in their own repos.`
|
|
359
|
+
].join('\n')
|
|
360
|
+
};
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
return { blocked: false };
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
return { blocked: false };
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
/**
|
|
371
|
+
* Clear the cached member paths (for testing or re-initialization).
|
|
372
|
+
*/
|
|
373
|
+
function clearCache() {
|
|
374
|
+
_cachedMemberPaths = null;
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
// ============================================================
|
|
378
|
+
// Exports
|
|
379
|
+
// ============================================================
|
|
380
|
+
|
|
381
|
+
module.exports = {
|
|
382
|
+
checkManagerBoundary,
|
|
383
|
+
getMemberPaths,
|
|
384
|
+
findMemberForPath,
|
|
385
|
+
getMemberPort,
|
|
386
|
+
isAllowedReadCommand,
|
|
387
|
+
clearCache
|
|
388
|
+
};
|
|
@@ -349,8 +349,66 @@ async function handleTaskCompleted(input) {
|
|
|
349
349
|
} catch (_err) {
|
|
350
350
|
// Non-critical - registry manager may not be available
|
|
351
351
|
}
|
|
352
|
-
// Workspace
|
|
353
|
-
//
|
|
352
|
+
// Workspace: write structured task-complete message to .workspace/messages/
|
|
353
|
+
// The Stop hook sends a freeform curl to the manager as a fallback, but this
|
|
354
|
+
// structured message is the VERIFIED completion signal — it went through quality
|
|
355
|
+
// gates (gate latch check above). The manager should trust these over freeform reports.
|
|
356
|
+
if (result.completed && process.env.WOGI_WORKSPACE_ROOT) {
|
|
357
|
+
try {
|
|
358
|
+
const workspaceRoot = process.env.WOGI_WORKSPACE_ROOT;
|
|
359
|
+
|
|
360
|
+
// Validate workspace root — must be absolute and exist (mirrors stop.js pattern)
|
|
361
|
+
if (!path.isAbsolute(workspaceRoot) || !fs.existsSync(workspaceRoot)) {
|
|
362
|
+
throw new Error(`Invalid WOGI_WORKSPACE_ROOT: ${workspaceRoot}`);
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
const messagesDir = path.join(workspaceRoot, '.workspace', 'messages');
|
|
366
|
+
const repoName = process.env.WOGI_REPO_NAME || 'unknown';
|
|
367
|
+
|
|
368
|
+
if (fs.existsSync(messagesDir)) {
|
|
369
|
+
const msgId = `msg-${completedTask.id}-${Date.now()}`;
|
|
370
|
+
// Sanitize changedFiles: limit count and path length, strip newlines
|
|
371
|
+
const rawFiles = input.changedFiles || [];
|
|
372
|
+
const changedFiles = rawFiles.slice(0, 20).map(f =>
|
|
373
|
+
String(f).replace(/[\n\r]/g, '').slice(0, 200)
|
|
374
|
+
);
|
|
375
|
+
const qualityGates = input.qualityGateResults || [];
|
|
376
|
+
const evidenceTier = input.evidenceTier || 'unknown';
|
|
377
|
+
|
|
378
|
+
const message = {
|
|
379
|
+
id: msgId,
|
|
380
|
+
from: repoName,
|
|
381
|
+
to: 'manager',
|
|
382
|
+
type: 'task-complete',
|
|
383
|
+
subject: `Task completed: ${completedTask.title || completedTask.id}`,
|
|
384
|
+
body: [
|
|
385
|
+
`**Task**: ${completedTask.id} — ${completedTask.title || ''}`,
|
|
386
|
+
`**Type**: ${completedTask.type || 'unknown'}`,
|
|
387
|
+
changedFiles.length > 0 ? `**Files changed**: ${changedFiles.join(', ')}` : null,
|
|
388
|
+
qualityGates.length > 0 ? `**Quality gates**: ${qualityGates.map(g => `${g.name}: ${g.passed ? 'PASS' : 'FAIL'}`).join(', ')}` : null,
|
|
389
|
+
`**Verification evidence**: ${evidenceTier}`,
|
|
390
|
+
].filter(Boolean).join('\n'),
|
|
391
|
+
taskId: completedTask.id,
|
|
392
|
+
status: 'pending',
|
|
393
|
+
verified: true,
|
|
394
|
+
evidenceTier,
|
|
395
|
+
timestamp: new Date().toISOString()
|
|
396
|
+
};
|
|
397
|
+
|
|
398
|
+
fs.writeFileSync(
|
|
399
|
+
path.join(messagesDir, `${msgId}.json`),
|
|
400
|
+
JSON.stringify(message, null, 2),
|
|
401
|
+
{ mode: 0o644 }
|
|
402
|
+
);
|
|
403
|
+
}
|
|
404
|
+
} catch (_err) {
|
|
405
|
+
// Non-critical — workspace message is defense-in-depth.
|
|
406
|
+
// The Stop hook curl remains as fallback.
|
|
407
|
+
if (process.env.DEBUG) {
|
|
408
|
+
console.error(`[Task Completed] Workspace message write failed: ${_err.message}`);
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
}
|
|
354
412
|
|
|
355
413
|
// Compound from success — capture positive patterns (fire-and-forget)
|
|
356
414
|
if (result.completed) {
|
|
@@ -30,6 +30,8 @@ let checkScopeMutation = _noop;
|
|
|
30
30
|
try { checkScopeMutation = require('../../core/scope-mutation-gate').checkScopeMutation; } catch (_err) { if (process.env.DEBUG) console.error(`[Hook] Scope mutation gate not loaded: ${_err.message}`); }
|
|
31
31
|
let checkGitSafety = _noop;
|
|
32
32
|
try { checkGitSafety = require('../../core/git-safety-gate').checkGitSafety; } catch (_err) { if (process.env.DEBUG) console.error(`[Hook] Git safety gate not loaded: ${_err.message}`); }
|
|
33
|
+
let checkManagerBoundary = _noop;
|
|
34
|
+
try { checkManagerBoundary = require('../../core/manager-boundary-gate').checkManagerBoundary; } catch (_err) { if (process.env.DEBUG) console.error(`[Hook] Manager boundary gate not loaded: ${_err.message}`); }
|
|
33
35
|
const { claudeCodeAdapter } = require('../../adapters/claude-code');
|
|
34
36
|
const { markSkillPending } = require('../../../flow-durable-session');
|
|
35
37
|
const { getConfig } = require('../../../flow-utils');
|
|
@@ -224,6 +226,29 @@ runHook('PreToolUse', async ({ input, parsedInput }) => {
|
|
|
224
226
|
}
|
|
225
227
|
}
|
|
226
228
|
|
|
229
|
+
// Manager role boundary gate — blocks modifications in worker repos
|
|
230
|
+
// Runs early: role boundaries should be enforced before other gates
|
|
231
|
+
if (process.env.WOGI_REPO_NAME === 'manager') {
|
|
232
|
+
try {
|
|
233
|
+
const boundaryResult = checkManagerBoundary(toolName, toolInput);
|
|
234
|
+
if (boundaryResult.blocked) {
|
|
235
|
+
coreResult = {
|
|
236
|
+
allowed: false,
|
|
237
|
+
blocked: true,
|
|
238
|
+
reason: boundaryResult.reason,
|
|
239
|
+
message: boundaryResult.message
|
|
240
|
+
};
|
|
241
|
+
const output = claudeCodeAdapter.transformResult('PreToolUse', coreResult);
|
|
242
|
+
return { __raw: true, ...output };
|
|
243
|
+
}
|
|
244
|
+
} catch (err) {
|
|
245
|
+
// Fail-open: manager boundary errors should not block normal work
|
|
246
|
+
if (process.env.DEBUG) {
|
|
247
|
+
console.error(`[Hook] Manager boundary gate error (fail-open): ${err.message}`);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
227
252
|
// Commit log gate check
|
|
228
253
|
if (toolName === 'Bash' && toolInput.command) {
|
|
229
254
|
try {
|
package/scripts/postinstall.js
CHANGED
|
@@ -389,16 +389,27 @@ function rewriteHookPaths(settings) {
|
|
|
389
389
|
// In self-development, hooks should use local paths (node scripts/hooks/...)
|
|
390
390
|
// not package paths (node node_modules/wogiflow/scripts/hooks/...) which don't exist.
|
|
391
391
|
if (path.resolve(PROJECT_ROOT) === path.resolve(PACKAGE_ROOT)) return;
|
|
392
|
+
|
|
393
|
+
// Use absolute path to PACKAGE_ROOT/scripts/ instead of relative node_modules/ path.
|
|
394
|
+
// This fixes monorepo setups where npm hoists wogiflow to the workspace root
|
|
395
|
+
// node_modules/ but Claude Code runs hooks from a package subdirectory (e.g.,
|
|
396
|
+
// packages/portal/). Relative paths like 'node node_modules/wogiflow/scripts/...'
|
|
397
|
+
// fail because the package doesn't exist at the subdirectory level.
|
|
398
|
+
// Absolute paths work regardless of where Claude Code's cwd is.
|
|
399
|
+
const absoluteScriptsDir = path.resolve(PACKAGE_ROOT, 'scripts');
|
|
400
|
+
|
|
392
401
|
for (const hookList of Object.values(settings.hooks)) {
|
|
393
402
|
if (!Array.isArray(hookList)) continue;
|
|
394
403
|
for (const entry of hookList) {
|
|
395
404
|
if (!entry.hooks || !Array.isArray(entry.hooks)) continue;
|
|
396
405
|
for (const hook of entry.hooks) {
|
|
397
406
|
if (hook.command && typeof hook.command === 'string') {
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
)
|
|
407
|
+
// Extract the relative script path, join with absolute base, wrap in quotes.
|
|
408
|
+
// Simpler and more robust than regex-based open/close quoting.
|
|
409
|
+
const match = hook.command.match(/^node scripts\/(.+)$/);
|
|
410
|
+
if (match) {
|
|
411
|
+
hook.command = `node "${path.join(absoluteScriptsDir, match[1])}"`;
|
|
412
|
+
}
|
|
402
413
|
}
|
|
403
414
|
}
|
|
404
415
|
}
|