@agent-relay/bridge 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +8 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +9 -0
- package/dist/index.js.map +1 -0
- package/dist/multi-project-client.d.ts +99 -0
- package/dist/multi-project-client.d.ts.map +1 -0
- package/dist/multi-project-client.js +389 -0
- package/dist/multi-project-client.js.map +1 -0
- package/dist/shadow-cli.d.ts +17 -0
- package/dist/shadow-cli.d.ts.map +1 -0
- package/dist/shadow-cli.js +75 -0
- package/dist/shadow-cli.js.map +1 -0
- package/dist/spawner.d.ts +210 -0
- package/dist/spawner.d.ts.map +1 -0
- package/dist/spawner.js +1276 -0
- package/dist/spawner.js.map +1 -0
- package/dist/types.d.ts +131 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +6 -0
- package/dist/types.js.map +1 -0
- package/dist/utils.d.ts +15 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/utils.js +60 -0
- package/dist/utils.js.map +1 -0
- package/package.json +40 -0
package/dist/spawner.js
ADDED
|
@@ -0,0 +1,1276 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent Spawner
|
|
3
|
+
* Handles spawning and releasing worker agents via relay-pty.
|
|
4
|
+
* Workers run headlessly with output capture for logs.
|
|
5
|
+
*/
|
|
6
|
+
import fs from 'node:fs';
|
|
7
|
+
import { execFile } from 'node:child_process';
|
|
8
|
+
import path from 'node:path';
|
|
9
|
+
import { fileURLToPath } from 'node:url';
|
|
10
|
+
import { sleep } from './utils.js';
|
|
11
|
+
import { getProjectPaths, getAgentOutboxTemplate } from '@agent-relay/config';
|
|
12
|
+
import { resolveCommand } from '@agent-relay/utils/command-resolver';
|
|
13
|
+
import { createTraceableError } from '@agent-relay/utils/error-tracking';
|
|
14
|
+
import { createLogger } from '@agent-relay/utils/logger';
|
|
15
|
+
import { mapModelToCli } from '@agent-relay/utils/model-mapping';
|
|
16
|
+
import { RelayPtyOrchestrator } from '@agent-relay/wrapper';
|
|
17
|
+
import { selectShadowCli } from './shadow-cli.js';
|
|
18
|
+
// Get the directory where this module is located (for binary path resolution)
|
|
19
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
20
|
+
const __dirname = path.dirname(__filename);
|
|
21
|
+
import { AgentPolicyService } from '@agent-relay/policy';
|
|
22
|
+
import { buildClaudeArgs, findAgentConfig } from '@agent-relay/config/agent-config';
|
|
23
|
+
import { composeForAgent } from '@agent-relay/wrapper';
|
|
24
|
+
import { getUserDirectoryService } from '@agent-relay/user-directory';
|
|
25
|
+
// Logger instance for spawner (uses daemon log system instead of console)
|
|
26
|
+
const log = createLogger('spawner');
|
|
27
|
+
/**
|
|
28
|
+
* CLI command mapping for providers
|
|
29
|
+
* Maps provider names to actual CLI command names
|
|
30
|
+
*/
|
|
31
|
+
const CLI_COMMAND_MAP = {
|
|
32
|
+
cursor: 'agent', // Cursor CLI installs as 'agent'
|
|
33
|
+
google: 'gemini', // Google provider uses 'gemini' CLI
|
|
34
|
+
// Other providers use their name as the command (claude, codex, etc.)
|
|
35
|
+
};
|
|
36
|
+
function extractGhTokenFromHosts(content) {
|
|
37
|
+
const lines = content.split(/\r?\n/);
|
|
38
|
+
let inGithubSection = false;
|
|
39
|
+
for (const line of lines) {
|
|
40
|
+
const trimmed = line.trim();
|
|
41
|
+
if (!trimmed) {
|
|
42
|
+
continue;
|
|
43
|
+
}
|
|
44
|
+
if (!line.startsWith(' ') && !line.startsWith('\t')) {
|
|
45
|
+
const host = trimmed.replace(/:$/, '');
|
|
46
|
+
inGithubSection = host === 'github.com';
|
|
47
|
+
continue;
|
|
48
|
+
}
|
|
49
|
+
if (!inGithubSection) {
|
|
50
|
+
continue;
|
|
51
|
+
}
|
|
52
|
+
const match = line.match(/^\s*(oauth_token|token):\s*(.+)$/);
|
|
53
|
+
if (!match) {
|
|
54
|
+
continue;
|
|
55
|
+
}
|
|
56
|
+
let token = match[2].split('#')[0].trim();
|
|
57
|
+
token = token.replace(/^['"]|['"]$/g, '');
|
|
58
|
+
if (token) {
|
|
59
|
+
return token;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
return null;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Ensure MCP permissions are pre-configured for Claude Code.
|
|
66
|
+
* This prevents MCP approval prompts from blocking agent initialization.
|
|
67
|
+
*
|
|
68
|
+
* Creates/updates .claude/settings.local.json with:
|
|
69
|
+
* - enableAllProjectMcpServers: true (auto-approve project MCP servers)
|
|
70
|
+
* - permissions.allow: ["mcp__agent-relay"] (pre-approve agent-relay MCP tools)
|
|
71
|
+
*
|
|
72
|
+
* @param projectRoot - The project root directory
|
|
73
|
+
* @param debug - Whether to log debug information
|
|
74
|
+
*/
|
|
75
|
+
function ensureMcpPermissions(projectRoot, debug = false) {
|
|
76
|
+
const settingsDir = path.join(projectRoot, '.claude');
|
|
77
|
+
const settingsPath = path.join(settingsDir, 'settings.local.json');
|
|
78
|
+
try {
|
|
79
|
+
// Ensure .claude directory exists
|
|
80
|
+
if (!fs.existsSync(settingsDir)) {
|
|
81
|
+
fs.mkdirSync(settingsDir, { recursive: true });
|
|
82
|
+
}
|
|
83
|
+
// Read existing settings or start fresh
|
|
84
|
+
let settings = {};
|
|
85
|
+
if (fs.existsSync(settingsPath)) {
|
|
86
|
+
try {
|
|
87
|
+
const content = fs.readFileSync(settingsPath, 'utf-8');
|
|
88
|
+
settings = JSON.parse(content);
|
|
89
|
+
}
|
|
90
|
+
catch {
|
|
91
|
+
// Invalid JSON, start fresh
|
|
92
|
+
settings = {};
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
// Set enableAllProjectMcpServers to auto-approve MCP servers in .mcp.json
|
|
96
|
+
if (settings.enableAllProjectMcpServers !== true) {
|
|
97
|
+
settings.enableAllProjectMcpServers = true;
|
|
98
|
+
if (debug)
|
|
99
|
+
log.debug('Setting enableAllProjectMcpServers: true');
|
|
100
|
+
}
|
|
101
|
+
// Ensure permissions.allow includes agent-relay MCP
|
|
102
|
+
// Format: "mcp__serverName" approves all tools from that server
|
|
103
|
+
if (!settings.permissions || typeof settings.permissions !== 'object') {
|
|
104
|
+
settings.permissions = {};
|
|
105
|
+
}
|
|
106
|
+
const permissions = settings.permissions;
|
|
107
|
+
if (!Array.isArray(permissions.allow)) {
|
|
108
|
+
permissions.allow = [];
|
|
109
|
+
}
|
|
110
|
+
const allowList = permissions.allow;
|
|
111
|
+
// Add agent-relay MCP permission if not already present
|
|
112
|
+
const agentRelayPermission = 'mcp__agent-relay';
|
|
113
|
+
if (!allowList.includes(agentRelayPermission)) {
|
|
114
|
+
allowList.push(agentRelayPermission);
|
|
115
|
+
if (debug)
|
|
116
|
+
log.debug(`Added MCP permission: ${agentRelayPermission}`);
|
|
117
|
+
}
|
|
118
|
+
// Write updated settings
|
|
119
|
+
fs.writeFileSync(settingsPath, JSON.stringify(settings, null, 2) + '\n');
|
|
120
|
+
if (debug)
|
|
121
|
+
log.debug(`MCP permissions configured at ${settingsPath}`);
|
|
122
|
+
}
|
|
123
|
+
catch (err) {
|
|
124
|
+
// Log but don't fail - this is a best-effort optimization
|
|
125
|
+
log.warn('Failed to pre-configure MCP permissions', {
|
|
126
|
+
error: err instanceof Error ? err.message : String(err),
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Get MCP tools reference for spawned agents.
|
|
132
|
+
* Only included when MCP is configured for the project.
|
|
133
|
+
*/
|
|
134
|
+
function getMcpToolsReference() {
|
|
135
|
+
return [
|
|
136
|
+
'## MCP Tools Available',
|
|
137
|
+
'',
|
|
138
|
+
'You have access to MCP tools for agent communication (recommended over file protocol):',
|
|
139
|
+
'- `relay_send(to, message)` - Send message to agent/channel',
|
|
140
|
+
'- `relay_spawn(name, cli, task)` - Create worker agent',
|
|
141
|
+
'- `relay_inbox()` - Check your messages',
|
|
142
|
+
'- `relay_who()` - List online agents',
|
|
143
|
+
'- `relay_release(name)` - Stop a worker agent',
|
|
144
|
+
'- `relay_status()` - Check connection status',
|
|
145
|
+
'',
|
|
146
|
+
].join('\n');
|
|
147
|
+
}
|
|
148
|
+
/**
|
|
149
|
+
* Get relay protocol instructions for a spawned agent.
|
|
150
|
+
* This provides the agent with the communication protocol it needs to work with the relay.
|
|
151
|
+
*
|
|
152
|
+
* Uses the legacy outbox path (/tmp/relay-outbox/) which is symlinked to workspace paths.
|
|
153
|
+
* This keeps agent instructions simple while supporting workspace isolation.
|
|
154
|
+
*
|
|
155
|
+
* @param agentName - Name of the agent
|
|
156
|
+
* @param hasMcp - Whether MCP tools are available (based on .mcp.json existence)
|
|
157
|
+
*/
|
|
158
|
+
function getRelayInstructions(agentName, hasMcp = false) {
|
|
159
|
+
// Get the outbox path template and replace variable with actual agent name
|
|
160
|
+
const outboxBase = getAgentOutboxTemplate(agentName);
|
|
161
|
+
const parts = [
|
|
162
|
+
'# Agent Relay Protocol',
|
|
163
|
+
'',
|
|
164
|
+
`You are agent "${agentName}" connected to Agent Relay for multi-agent coordination.`,
|
|
165
|
+
'',
|
|
166
|
+
];
|
|
167
|
+
// Add MCP tools reference if available
|
|
168
|
+
if (hasMcp) {
|
|
169
|
+
parts.push(getMcpToolsReference());
|
|
170
|
+
}
|
|
171
|
+
parts.push('## Sending Messages', '', 'Write a file to your outbox, then output the trigger:', '', '```bash', `cat > ${outboxBase}/msg << 'EOF'`, 'TO: TargetAgent', '', 'Your message here.', 'EOF', '```', '', 'Then output: `->relay-file:msg`', '', '## Communication Rules', '', '1. **ACK immediately** - When you receive a task:', '```bash', `cat > ${outboxBase}/ack << 'EOF'`, 'TO: Sender', '', 'ACK: Brief description of task received', 'EOF', '```', 'Then: `->relay-file:ack`', '', '2. **Report completion** - When done:', '```bash', `cat > ${outboxBase}/done << 'EOF'`, 'TO: Sender', '', 'DONE: Brief summary of what was completed', 'EOF', '```', 'Then: `->relay-file:done`', '', '## Message Format', '', '```', 'TO: Target', 'THREAD: optional-thread', '', 'Message body (everything after blank line)', '```', '', '| TO Value | Behavior |', '|----------|----------|', '| `AgentName` | Direct message |', '| `*` | Broadcast to all |', '| `#channel` | Channel message |');
|
|
172
|
+
return parts.join('\n');
|
|
173
|
+
}
|
|
174
|
+
/**
|
|
175
|
+
* Check if the relay-pty binary is available.
|
|
176
|
+
* Returns the path to the binary if found, null otherwise.
|
|
177
|
+
*
|
|
178
|
+
* Search order:
|
|
179
|
+
* 1. bin/relay-pty in package root (installed by postinstall)
|
|
180
|
+
* 2. relay-pty/target/release/relay-pty (local Rust build)
|
|
181
|
+
* 3. /usr/local/bin/relay-pty (global install)
|
|
182
|
+
*/
|
|
183
|
+
function findRelayPtyBinary() {
|
|
184
|
+
// Get the project root (three levels up from packages/bridge/dist/)
|
|
185
|
+
// packages/bridge/dist/ -> packages/bridge -> packages -> project root
|
|
186
|
+
const projectRoot = path.join(__dirname, '..', '..', '..');
|
|
187
|
+
const candidates = [
|
|
188
|
+
// Primary: installed by postinstall from platform-specific binary
|
|
189
|
+
path.join(projectRoot, 'bin', 'relay-pty'),
|
|
190
|
+
// Development: local Rust build
|
|
191
|
+
path.join(projectRoot, 'relay-pty', 'target', 'release', 'relay-pty'),
|
|
192
|
+
path.join(projectRoot, 'relay-pty', 'target', 'debug', 'relay-pty'),
|
|
193
|
+
// Local build in cwd (for development)
|
|
194
|
+
path.join(process.cwd(), 'relay-pty', 'target', 'release', 'relay-pty'),
|
|
195
|
+
// Installed globally
|
|
196
|
+
'/usr/local/bin/relay-pty',
|
|
197
|
+
// In node_modules (when installed as dependency)
|
|
198
|
+
path.join(process.cwd(), 'node_modules', 'agent-relay', 'bin', 'relay-pty'),
|
|
199
|
+
];
|
|
200
|
+
for (const candidate of candidates) {
|
|
201
|
+
if (fs.existsSync(candidate)) {
|
|
202
|
+
return candidate;
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
return null;
|
|
206
|
+
}
|
|
207
|
+
/** Cached result of relay-pty binary check */
|
|
208
|
+
let relayPtyBinaryPath;
|
|
209
|
+
let relayPtyBinaryChecked = false;
|
|
210
|
+
/**
|
|
211
|
+
* Check if relay-pty binary is available (cached).
|
|
212
|
+
* Returns true if the binary exists, false otherwise.
|
|
213
|
+
*/
|
|
214
|
+
function hasRelayPtyBinary() {
|
|
215
|
+
if (!relayPtyBinaryChecked) {
|
|
216
|
+
relayPtyBinaryPath = findRelayPtyBinary();
|
|
217
|
+
relayPtyBinaryChecked = true;
|
|
218
|
+
if (process.env.DEBUG_SPAWN === '1') {
|
|
219
|
+
if (relayPtyBinaryPath) {
|
|
220
|
+
log.debug(`relay-pty binary found: ${relayPtyBinaryPath}`);
|
|
221
|
+
}
|
|
222
|
+
else {
|
|
223
|
+
log.debug('relay-pty binary not found, will use PtyWrapper fallback');
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
return relayPtyBinaryPath !== null;
|
|
228
|
+
}
|
|
229
|
+
export class AgentSpawner {
|
|
230
|
+
static ONLINE_THRESHOLD_MS = 30_000;
|
|
231
|
+
activeWorkers = new Map();
|
|
232
|
+
agentsPath;
|
|
233
|
+
registryPath;
|
|
234
|
+
projectRoot;
|
|
235
|
+
socketPath;
|
|
236
|
+
logsDir;
|
|
237
|
+
workersPath;
|
|
238
|
+
dashboardPort;
|
|
239
|
+
onAgentDeath;
|
|
240
|
+
cloudPersistence;
|
|
241
|
+
policyService;
|
|
242
|
+
policyEnforcementEnabled = false;
|
|
243
|
+
onMarkSpawning;
|
|
244
|
+
onClearSpawning;
|
|
245
|
+
constructor(projectRootOrOptions, _tmuxSession, dashboardPort) {
|
|
246
|
+
// Handle both old and new constructor signatures
|
|
247
|
+
const options = typeof projectRootOrOptions === 'string'
|
|
248
|
+
? { projectRoot: projectRootOrOptions, tmuxSession: _tmuxSession, dashboardPort }
|
|
249
|
+
: projectRootOrOptions;
|
|
250
|
+
const paths = getProjectPaths(options.projectRoot);
|
|
251
|
+
this.projectRoot = paths.projectRoot;
|
|
252
|
+
// Use connected-agents.json (live socket connections) instead of agents.json (historical registry)
|
|
253
|
+
// This ensures spawned agents have actual daemon connections for channel message delivery
|
|
254
|
+
this.agentsPath = path.join(paths.teamDir, 'connected-agents.json');
|
|
255
|
+
this.registryPath = path.join(paths.teamDir, 'agents.json');
|
|
256
|
+
this.socketPath = paths.socketPath;
|
|
257
|
+
this.logsDir = path.join(paths.teamDir, 'worker-logs');
|
|
258
|
+
this.workersPath = path.join(paths.teamDir, 'workers.json');
|
|
259
|
+
this.dashboardPort = options.dashboardPort;
|
|
260
|
+
// Store spawn tracking callbacks
|
|
261
|
+
this.onMarkSpawning = options.onMarkSpawning;
|
|
262
|
+
this.onClearSpawning = options.onClearSpawning;
|
|
263
|
+
// Ensure logs directory exists
|
|
264
|
+
fs.mkdirSync(this.logsDir, { recursive: true });
|
|
265
|
+
// Initialize policy service if enforcement is enabled
|
|
266
|
+
if (process.env.AGENT_POLICY_ENFORCEMENT === '1') {
|
|
267
|
+
this.policyEnforcementEnabled = true;
|
|
268
|
+
this.policyService = new AgentPolicyService({
|
|
269
|
+
projectRoot: this.projectRoot,
|
|
270
|
+
workspaceId: process.env.WORKSPACE_ID,
|
|
271
|
+
strictMode: process.env.AGENT_POLICY_STRICT === '1',
|
|
272
|
+
});
|
|
273
|
+
log.info('Policy enforcement enabled');
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
/**
|
|
277
|
+
* Set cloud policy fetcher for workspace-level policies
|
|
278
|
+
*/
|
|
279
|
+
setCloudPolicyFetcher(fetcher) {
|
|
280
|
+
if (this.policyService) {
|
|
281
|
+
// Recreate policy service with cloud fetcher
|
|
282
|
+
this.policyService = new AgentPolicyService({
|
|
283
|
+
projectRoot: this.projectRoot,
|
|
284
|
+
workspaceId: process.env.WORKSPACE_ID,
|
|
285
|
+
cloudFetcher: fetcher,
|
|
286
|
+
strictMode: process.env.AGENT_POLICY_STRICT === '1',
|
|
287
|
+
});
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
/**
|
|
291
|
+
* Get the policy service (for external access to policy checks)
|
|
292
|
+
*/
|
|
293
|
+
getPolicyService() {
|
|
294
|
+
return this.policyService;
|
|
295
|
+
}
|
|
296
|
+
async fetchGhTokenFromCloud() {
|
|
297
|
+
const cloudApiUrl = process.env.CLOUD_API_URL || process.env.AGENT_RELAY_CLOUD_URL;
|
|
298
|
+
const workspaceId = process.env.WORKSPACE_ID;
|
|
299
|
+
const workspaceToken = process.env.WORKSPACE_TOKEN;
|
|
300
|
+
if (!cloudApiUrl || !workspaceId || !workspaceToken) {
|
|
301
|
+
return null;
|
|
302
|
+
}
|
|
303
|
+
const normalizedUrl = cloudApiUrl.replace(/\/$/, '');
|
|
304
|
+
const url = `${normalizedUrl}/api/git/token?workspaceId=${encodeURIComponent(workspaceId)}`;
|
|
305
|
+
try {
|
|
306
|
+
// Use AbortController for timeout (5 seconds - don't block spawning)
|
|
307
|
+
const controller = new AbortController();
|
|
308
|
+
const timeoutId = setTimeout(() => controller.abort(), 5000);
|
|
309
|
+
const response = await fetch(url, {
|
|
310
|
+
headers: {
|
|
311
|
+
Authorization: `Bearer ${workspaceToken}`,
|
|
312
|
+
},
|
|
313
|
+
signal: controller.signal,
|
|
314
|
+
});
|
|
315
|
+
clearTimeout(timeoutId);
|
|
316
|
+
if (!response.ok) {
|
|
317
|
+
log.warn(`Failed to fetch GH token from cloud: ${response.status} ${response.statusText}`);
|
|
318
|
+
return null;
|
|
319
|
+
}
|
|
320
|
+
const data = await response.json();
|
|
321
|
+
return data.userToken || data.token || null;
|
|
322
|
+
}
|
|
323
|
+
catch (err) {
|
|
324
|
+
// Don't log timeout errors loudly - this is expected when cloud is unreachable
|
|
325
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
326
|
+
if (message.includes('abort')) {
|
|
327
|
+
log.info('Cloud API timeout (5s) - using local auth');
|
|
328
|
+
}
|
|
329
|
+
else {
|
|
330
|
+
log.warn('Failed to fetch GH token from cloud', { error: message });
|
|
331
|
+
}
|
|
332
|
+
return null;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
resolveGhTokenFromHostsFile(homeDir) {
|
|
336
|
+
const resolvedHome = homeDir || process.env.HOME;
|
|
337
|
+
const configHome = process.env.XDG_CONFIG_HOME || (resolvedHome ? path.join(resolvedHome, '.config') : undefined);
|
|
338
|
+
const candidates = new Set();
|
|
339
|
+
if (configHome) {
|
|
340
|
+
candidates.add(path.join(configHome, 'gh', 'hosts.yml'));
|
|
341
|
+
}
|
|
342
|
+
if (resolvedHome) {
|
|
343
|
+
candidates.add(path.join(resolvedHome, '.config', 'gh', 'hosts.yml'));
|
|
344
|
+
}
|
|
345
|
+
for (const hostPath of candidates) {
|
|
346
|
+
if (!hostPath || !fs.existsSync(hostPath)) {
|
|
347
|
+
continue;
|
|
348
|
+
}
|
|
349
|
+
try {
|
|
350
|
+
const content = fs.readFileSync(hostPath, 'utf8');
|
|
351
|
+
const token = extractGhTokenFromHosts(content);
|
|
352
|
+
if (token) {
|
|
353
|
+
return token;
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
catch {
|
|
357
|
+
continue;
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
return null;
|
|
361
|
+
}
|
|
362
|
+
async resolveGhTokenFromGhCli() {
|
|
363
|
+
// Check common gh CLI installation paths across platforms
|
|
364
|
+
const ghPathCandidates = [
|
|
365
|
+
'/usr/bin/gh', // Linux package managers
|
|
366
|
+
'/usr/local/bin/gh', // Homebrew (Intel Mac), manual install
|
|
367
|
+
'/opt/homebrew/bin/gh', // Homebrew (Apple Silicon Mac)
|
|
368
|
+
'/home/linuxbrew/.linuxbrew/bin/gh', // Linuxbrew
|
|
369
|
+
];
|
|
370
|
+
const ghPath = ghPathCandidates.find((p) => fs.existsSync(p));
|
|
371
|
+
if (!ghPath) {
|
|
372
|
+
return null;
|
|
373
|
+
}
|
|
374
|
+
return await new Promise((resolve) => {
|
|
375
|
+
execFile(ghPath, ['auth', 'token', '--hostname', 'github.com'], { timeout: 5000 }, (err, stdout) => {
|
|
376
|
+
if (err) {
|
|
377
|
+
resolve(null);
|
|
378
|
+
return;
|
|
379
|
+
}
|
|
380
|
+
const token = stdout.trim();
|
|
381
|
+
resolve(token || null);
|
|
382
|
+
});
|
|
383
|
+
});
|
|
384
|
+
}
|
|
385
|
+
/**
|
|
386
|
+
* Resolve GitHub token using multiple fallback sources.
|
|
387
|
+
*
|
|
388
|
+
* Fallback order (same as git-credential-relay for consistency):
|
|
389
|
+
* 1. Environment - GH_TOKEN or GITHUB_TOKEN (fastest, set by entrypoint)
|
|
390
|
+
* 2. hosts.yml - gh CLI config file (~/.config/gh/hosts.yml)
|
|
391
|
+
* 3. gh CLI - execute `gh auth token` command
|
|
392
|
+
* 4. Cloud API - workspace-scoped token from Nango (requires network)
|
|
393
|
+
*
|
|
394
|
+
* Environment is checked first because:
|
|
395
|
+
* - It's the fastest (no I/O or network)
|
|
396
|
+
* - The entrypoint pre-fetches and caches GH_TOKEN at startup
|
|
397
|
+
* - This avoids delays when cloud API is slow/unreachable
|
|
398
|
+
*/
|
|
399
|
+
async resolveGhToken(homeDir) {
|
|
400
|
+
// 1. Check environment variables first (fastest - set by entrypoint at startup)
|
|
401
|
+
const envToken = process.env.GH_TOKEN || process.env.GITHUB_TOKEN;
|
|
402
|
+
if (envToken) {
|
|
403
|
+
return envToken;
|
|
404
|
+
}
|
|
405
|
+
// 2. Parse gh CLI hosts.yml config file
|
|
406
|
+
const hostsToken = this.resolveGhTokenFromHostsFile(homeDir);
|
|
407
|
+
if (hostsToken) {
|
|
408
|
+
return hostsToken;
|
|
409
|
+
}
|
|
410
|
+
// 3. Execute gh CLI if available
|
|
411
|
+
const cliToken = await this.resolveGhTokenFromGhCli();
|
|
412
|
+
if (cliToken) {
|
|
413
|
+
return cliToken;
|
|
414
|
+
}
|
|
415
|
+
// 4. Try cloud API as last resort (may be slow or unreachable)
|
|
416
|
+
return await this.fetchGhTokenFromCloud();
|
|
417
|
+
}
|
|
418
|
+
/**
|
|
419
|
+
* Set the dashboard port (for nested spawn API calls).
|
|
420
|
+
* Called after the dashboard server starts and we know the actual port.
|
|
421
|
+
*/
|
|
422
|
+
setDashboardPort(port) {
|
|
423
|
+
log.info(`Dashboard port set to ${port} - nested spawns now enabled`);
|
|
424
|
+
this.dashboardPort = port;
|
|
425
|
+
}
|
|
426
|
+
/**
|
|
427
|
+
* Set callback for agent death notifications.
|
|
428
|
+
* Called when an agent exits unexpectedly (non-zero exit code).
|
|
429
|
+
*/
|
|
430
|
+
setOnAgentDeath(callback) {
|
|
431
|
+
this.onAgentDeath = callback;
|
|
432
|
+
}
|
|
433
|
+
/**
|
|
434
|
+
* Set cloud persistence handler for forwarding RelayPtyOrchestrator events.
|
|
435
|
+
* When set, 'summary' and 'session-end' events from spawned agents
|
|
436
|
+
* are forwarded to the handler for cloud persistence (PostgreSQL/Redis).
|
|
437
|
+
*
|
|
438
|
+
* Note: Enable via RELAY_CLOUD_ENABLED=true environment variable.
|
|
439
|
+
*/
|
|
440
|
+
setCloudPersistence(handler) {
|
|
441
|
+
this.cloudPersistence = handler;
|
|
442
|
+
log.info('Cloud persistence handler set');
|
|
443
|
+
}
|
|
444
|
+
/**
|
|
445
|
+
* Bind cloud persistence event handlers to a RelayPtyOrchestrator.
|
|
446
|
+
* Returns the listener references for cleanup.
|
|
447
|
+
*/
|
|
448
|
+
bindCloudPersistenceEvents(name, pty) {
|
|
449
|
+
if (!this.cloudPersistence)
|
|
450
|
+
return {};
|
|
451
|
+
const summaryListener = async (event) => {
|
|
452
|
+
try {
|
|
453
|
+
await this.cloudPersistence.onSummary(name, event);
|
|
454
|
+
}
|
|
455
|
+
catch (err) {
|
|
456
|
+
log.error(`Cloud persistence summary error for ${name}`, { error: err instanceof Error ? err.message : String(err) });
|
|
457
|
+
}
|
|
458
|
+
};
|
|
459
|
+
const sessionEndListener = async (event) => {
|
|
460
|
+
try {
|
|
461
|
+
await this.cloudPersistence.onSessionEnd(name, event);
|
|
462
|
+
}
|
|
463
|
+
catch (err) {
|
|
464
|
+
log.error(`Cloud persistence session-end error for ${name}`, { error: err instanceof Error ? err.message : String(err) });
|
|
465
|
+
}
|
|
466
|
+
};
|
|
467
|
+
pty.on('summary', summaryListener);
|
|
468
|
+
pty.on('session-end', sessionEndListener);
|
|
469
|
+
return { summary: summaryListener, sessionEnd: sessionEndListener };
|
|
470
|
+
}
|
|
471
|
+
/**
|
|
472
|
+
* Unbind all tracked listeners from a RelayPtyOrchestrator.
|
|
473
|
+
*/
|
|
474
|
+
unbindListeners(pty, listeners) {
|
|
475
|
+
if (!listeners)
|
|
476
|
+
return;
|
|
477
|
+
if (listeners.output) {
|
|
478
|
+
pty.off('output', listeners.output);
|
|
479
|
+
}
|
|
480
|
+
if (listeners.summary) {
|
|
481
|
+
pty.off('summary', listeners.summary);
|
|
482
|
+
}
|
|
483
|
+
if (listeners.sessionEnd) {
|
|
484
|
+
pty.off('session-end', listeners.sessionEnd);
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
/**
|
|
488
|
+
* Spawn a new worker agent using relay-pty
|
|
489
|
+
*/
|
|
490
|
+
async spawn(request) {
|
|
491
|
+
const { name, cli, task, team, spawnerName, userId } = request;
|
|
492
|
+
const debug = process.env.DEBUG_SPAWN === '1';
|
|
493
|
+
// Check if worker already exists in this spawner
|
|
494
|
+
if (this.activeWorkers.has(name)) {
|
|
495
|
+
return {
|
|
496
|
+
success: false,
|
|
497
|
+
name,
|
|
498
|
+
error: `Agent "${name}" is already running. Use a different name or release the existing agent first.`,
|
|
499
|
+
};
|
|
500
|
+
}
|
|
501
|
+
// Check if agent is already connected to daemon (prevents duplicate connection storms)
|
|
502
|
+
if (this.isAgentConnected(name)) {
|
|
503
|
+
return {
|
|
504
|
+
success: false,
|
|
505
|
+
name,
|
|
506
|
+
error: `Agent "${name}" is already connected to the daemon. Use a different name or wait for the existing agent to disconnect.`,
|
|
507
|
+
};
|
|
508
|
+
}
|
|
509
|
+
// Enforce agent limit based on plan (MAX_AGENTS is set by provisioner based on plan)
|
|
510
|
+
const maxAgents = parseInt(process.env.MAX_AGENTS || '10', 10);
|
|
511
|
+
const currentAgentCount = this.activeWorkers.size;
|
|
512
|
+
if (currentAgentCount >= maxAgents) {
|
|
513
|
+
log.warn(`Agent limit reached: ${currentAgentCount}/${maxAgents}`);
|
|
514
|
+
return {
|
|
515
|
+
success: false,
|
|
516
|
+
name,
|
|
517
|
+
error: `Agent limit reached (${currentAgentCount}/${maxAgents}). Upgrade your plan for more agents.`,
|
|
518
|
+
};
|
|
519
|
+
}
|
|
520
|
+
// Policy enforcement: check if the spawner is authorized to spawn this agent
|
|
521
|
+
if (this.policyEnforcementEnabled && this.policyService && spawnerName) {
|
|
522
|
+
const decision = await this.policyService.canSpawn(spawnerName, name, cli);
|
|
523
|
+
if (!decision.allowed) {
|
|
524
|
+
log.warn(`Policy blocked spawn: ${spawnerName} -> ${name}: ${decision.reason}`);
|
|
525
|
+
return {
|
|
526
|
+
success: false,
|
|
527
|
+
name,
|
|
528
|
+
error: `Policy denied: ${decision.reason}`,
|
|
529
|
+
policyDecision: decision,
|
|
530
|
+
};
|
|
531
|
+
}
|
|
532
|
+
if (debug) {
|
|
533
|
+
log.debug(`Policy allowed spawn: ${spawnerName} -> ${name} (source: ${decision.policySource})`);
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
try {
|
|
537
|
+
// Parse CLI command and apply mapping (e.g., cursor -> agent)
|
|
538
|
+
const cliParts = cli.split(' ');
|
|
539
|
+
const rawCommandName = cliParts[0];
|
|
540
|
+
const commandName = CLI_COMMAND_MAP[rawCommandName] || rawCommandName;
|
|
541
|
+
const args = cliParts.slice(1);
|
|
542
|
+
if (commandName !== rawCommandName && debug) {
|
|
543
|
+
log.debug(`Mapped CLI '${rawCommandName}' -> '${commandName}'`);
|
|
544
|
+
}
|
|
545
|
+
// Resolve full path to avoid posix_spawnp failures
|
|
546
|
+
const command = resolveCommand(commandName);
|
|
547
|
+
if (debug)
|
|
548
|
+
log.debug(`Resolved '${commandName}' -> '${command}'`);
|
|
549
|
+
if (command === commandName && !commandName.startsWith('/')) {
|
|
550
|
+
// Command wasn't resolved - it might not exist
|
|
551
|
+
log.warn(`Could not resolve path for '${commandName}', spawn may fail`);
|
|
552
|
+
}
|
|
553
|
+
// Add --dangerously-skip-permissions for Claude agents
|
|
554
|
+
const isClaudeCli = commandName.startsWith('claude');
|
|
555
|
+
if (isClaudeCli) {
|
|
556
|
+
// Pre-configure MCP permissions to avoid approval prompts blocking initialization
|
|
557
|
+
// This creates/updates .claude/settings.local.json with:
|
|
558
|
+
// - enableAllProjectMcpServers: true
|
|
559
|
+
// - permissions.allow: ["mcp__agent-relay"]
|
|
560
|
+
ensureMcpPermissions(this.projectRoot, debug);
|
|
561
|
+
if (!args.includes('--dangerously-skip-permissions')) {
|
|
562
|
+
args.push('--dangerously-skip-permissions');
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
// Add --force for Cursor agents (CLI is 'agent', may be passed as 'cursor')
|
|
566
|
+
const isCursorCli = commandName === 'agent' || rawCommandName === 'cursor';
|
|
567
|
+
if (isCursorCli && !args.includes('--force')) {
|
|
568
|
+
args.push('--force');
|
|
569
|
+
}
|
|
570
|
+
// Apply agent config (model, --agent flag) from .claude/agents/ if available
|
|
571
|
+
// This ensures spawned agents respect their profile settings
|
|
572
|
+
if (isClaudeCli) {
|
|
573
|
+
// Get agent config for model tracking and CLI variant selection
|
|
574
|
+
const agentConfig = findAgentConfig(name, this.projectRoot);
|
|
575
|
+
const modelFromProfile = agentConfig?.model?.trim();
|
|
576
|
+
// Map model to CLI variant (e.g., 'opus' -> 'claude:opus')
|
|
577
|
+
// This allows agent profiles to specify model preferences
|
|
578
|
+
const cliVariant = modelFromProfile
|
|
579
|
+
? mapModelToCli(modelFromProfile)
|
|
580
|
+
: mapModelToCli(); // defaults to claude:sonnet
|
|
581
|
+
// Extract effective model name for logging
|
|
582
|
+
const effectiveModel = modelFromProfile || 'sonnet';
|
|
583
|
+
const configuredArgs = buildClaudeArgs(name, args, this.projectRoot);
|
|
584
|
+
// Replace args with configured version (includes --model and --agent if found)
|
|
585
|
+
args.length = 0;
|
|
586
|
+
args.push(...configuredArgs);
|
|
587
|
+
// Cost tracking: log which model is being used
|
|
588
|
+
log.info(`Agent ${name}: model=${effectiveModel}, cli=${cli}, variant=${cliVariant}`);
|
|
589
|
+
if (debug)
|
|
590
|
+
log.debug(`Applied agent config for ${name}: ${args.join(' ')}`);
|
|
591
|
+
}
|
|
592
|
+
// Add --dangerously-bypass-approvals-and-sandbox for Codex agents
|
|
593
|
+
const isCodexCli = commandName.startsWith('codex');
|
|
594
|
+
if (isCodexCli && !args.includes('--dangerously-bypass-approvals-and-sandbox')) {
|
|
595
|
+
args.push('--dangerously-bypass-approvals-and-sandbox');
|
|
596
|
+
}
|
|
597
|
+
// Add --yolo for Gemini agents (auto-accept all prompts)
|
|
598
|
+
const isGeminiCli = commandName === 'gemini';
|
|
599
|
+
if (isGeminiCli && !args.includes('--yolo')) {
|
|
600
|
+
args.push('--yolo');
|
|
601
|
+
}
|
|
602
|
+
// Check if MCP tools are available
|
|
603
|
+
// Must verify BOTH conditions (matching inbox hook behavior from commit 18bab59):
|
|
604
|
+
// 1. .mcp.json config exists in project
|
|
605
|
+
// 2. Relay daemon socket is accessible (daemon must be running)
|
|
606
|
+
// Without both, MCP context would be shown but tools wouldn't work
|
|
607
|
+
const mcpConfigPath = path.join(this.projectRoot, '.mcp.json');
|
|
608
|
+
const relaySocket = process.env.RELAY_SOCKET || '/tmp/agent-relay.sock';
|
|
609
|
+
let hasMcp = false;
|
|
610
|
+
if (fs.existsSync(mcpConfigPath)) {
|
|
611
|
+
try {
|
|
612
|
+
hasMcp = fs.statSync(relaySocket).isSocket();
|
|
613
|
+
}
|
|
614
|
+
catch {
|
|
615
|
+
// Socket doesn't exist or isn't accessible - daemon not running
|
|
616
|
+
hasMcp = false;
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
if (debug && hasMcp)
|
|
620
|
+
log.debug(`MCP tools available for ${name} (found ${mcpConfigPath} and socket ${relaySocket})`);
|
|
621
|
+
// Inject relay protocol instructions via CLI-specific system prompt
|
|
622
|
+
let relayInstructions = getRelayInstructions(name, hasMcp);
|
|
623
|
+
// Compose role-specific prompts if agent has a role defined in .claude/agents/
|
|
624
|
+
const agentConfigForRole = isClaudeCli ? findAgentConfig(name, this.projectRoot) : null;
|
|
625
|
+
if (agentConfigForRole?.role) {
|
|
626
|
+
const validRoles = ['planner', 'worker', 'reviewer', 'lead', 'shadow'];
|
|
627
|
+
const role = agentConfigForRole.role.toLowerCase();
|
|
628
|
+
if (validRoles.includes(role)) {
|
|
629
|
+
try {
|
|
630
|
+
const composed = await composeForAgent({ name, role }, this.projectRoot, { taskDescription: task });
|
|
631
|
+
if (composed.content) {
|
|
632
|
+
relayInstructions = `${composed.content}\n\n---\n\n${relayInstructions}`;
|
|
633
|
+
if (debug)
|
|
634
|
+
log.debug(`Composed role prompt for ${name} (role: ${role})`);
|
|
635
|
+
}
|
|
636
|
+
}
|
|
637
|
+
catch (err) {
|
|
638
|
+
log.warn(`Failed to compose role prompt for ${name}: ${err.message}`);
|
|
639
|
+
}
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
if (isClaudeCli && !args.includes('--append-system-prompt')) {
|
|
643
|
+
args.push('--append-system-prompt', relayInstructions);
|
|
644
|
+
}
|
|
645
|
+
else if (isCodexCli && !args.some(a => a.includes('developer_instructions'))) {
|
|
646
|
+
args.push('--config', `developer_instructions=${relayInstructions}`);
|
|
647
|
+
}
|
|
648
|
+
// Codex requires an initial prompt in TTY mode (unlike Claude which waits for input)
|
|
649
|
+
// Pass the task as the initial prompt, or a generic "ready" message if no task
|
|
650
|
+
if (isCodexCli) {
|
|
651
|
+
const initialPrompt = task || 'You are ready. Wait for messages from the relay system.';
|
|
652
|
+
args.push(initialPrompt);
|
|
653
|
+
}
|
|
654
|
+
if (debug)
|
|
655
|
+
log.debug(`Spawning ${name} with: ${command} ${args.join(' ')}`);
|
|
656
|
+
// Create PtyWrapper config
|
|
657
|
+
// Use dashboardPort for nested spawns (API-based, works in non-TTY contexts)
|
|
658
|
+
// Fall back to callbacks only if no dashboardPort is not set
|
|
659
|
+
// Note: Spawned agents CAN spawn sub-workers intentionally - the parser is strict enough
|
|
660
|
+
// to avoid accidental spawns from documentation text (requires line start, PascalCase, known CLI)
|
|
661
|
+
// Use request.cwd if specified, otherwise use projectRoot
|
|
662
|
+
const agentCwd = request.cwd || this.projectRoot;
|
|
663
|
+
// Log whether nested spawning will be enabled for this agent
|
|
664
|
+
log.info(`Spawning ${name}: dashboardPort=${this.dashboardPort || 'none'} (${this.dashboardPort ? 'nested spawns enabled' : 'nested spawns disabled'})`);
|
|
665
|
+
let userEnv;
|
|
666
|
+
if (userId) {
|
|
667
|
+
try {
|
|
668
|
+
const userDirService = getUserDirectoryService();
|
|
669
|
+
userEnv = userDirService.getUserEnvironment(userId);
|
|
670
|
+
}
|
|
671
|
+
catch (err) {
|
|
672
|
+
log.warn('Failed to resolve user environment, using default', {
|
|
673
|
+
userId,
|
|
674
|
+
error: err instanceof Error ? err.message : String(err),
|
|
675
|
+
});
|
|
676
|
+
}
|
|
677
|
+
}
|
|
678
|
+
const mergedUserEnv = { ...(userEnv ?? {}) };
|
|
679
|
+
if (!mergedUserEnv.GH_TOKEN) {
|
|
680
|
+
const ghToken = await this.resolveGhToken(userEnv?.HOME);
|
|
681
|
+
if (ghToken) {
|
|
682
|
+
mergedUserEnv.GH_TOKEN = ghToken;
|
|
683
|
+
}
|
|
684
|
+
}
|
|
685
|
+
if (Object.keys(mergedUserEnv).length > 0) {
|
|
686
|
+
userEnv = mergedUserEnv;
|
|
687
|
+
}
|
|
688
|
+
if (debug)
|
|
689
|
+
log.debug(`Socket path for ${name}: ${this.socketPath ?? 'undefined'}`);
|
|
690
|
+
// Require relay-pty binary
|
|
691
|
+
if (!hasRelayPtyBinary()) {
|
|
692
|
+
const tracedError = createTraceableError('relay-pty binary not found', {
|
|
693
|
+
agentName: name,
|
|
694
|
+
cli,
|
|
695
|
+
hint: 'Install with: npm run build:relay-pty',
|
|
696
|
+
});
|
|
697
|
+
log.error(tracedError.logMessage);
|
|
698
|
+
return {
|
|
699
|
+
success: false,
|
|
700
|
+
name,
|
|
701
|
+
error: tracedError.userMessage,
|
|
702
|
+
errorId: tracedError.errorId,
|
|
703
|
+
};
|
|
704
|
+
}
|
|
705
|
+
// Common exit handler for both wrapper types
|
|
706
|
+
const onExitHandler = (code) => {
|
|
707
|
+
if (debug)
|
|
708
|
+
log.debug(`Worker ${name} exited with code ${code}`);
|
|
709
|
+
// Get the agentId and clean up listeners before removing from active workers
|
|
710
|
+
const worker = this.activeWorkers.get(name);
|
|
711
|
+
const agentId = worker?.pty?.getAgentId?.();
|
|
712
|
+
if (worker?.listeners) {
|
|
713
|
+
this.unbindListeners(worker.pty, worker.listeners);
|
|
714
|
+
}
|
|
715
|
+
this.activeWorkers.delete(name);
|
|
716
|
+
try {
|
|
717
|
+
this.saveWorkersMetadata();
|
|
718
|
+
}
|
|
719
|
+
catch (err) {
|
|
720
|
+
log.error('Failed to save metadata on exit', { error: err instanceof Error ? err.message : String(err) });
|
|
721
|
+
}
|
|
722
|
+
// Notify if agent died unexpectedly (non-zero exit)
|
|
723
|
+
if (code !== 0 && code !== null && this.onAgentDeath) {
|
|
724
|
+
const crashError = createTraceableError('Agent crashed unexpectedly', {
|
|
725
|
+
agentName: name,
|
|
726
|
+
exitCode: code,
|
|
727
|
+
cli,
|
|
728
|
+
agentId,
|
|
729
|
+
});
|
|
730
|
+
log.error(crashError.logMessage);
|
|
731
|
+
this.onAgentDeath({
|
|
732
|
+
name,
|
|
733
|
+
exitCode: code,
|
|
734
|
+
agentId,
|
|
735
|
+
errorId: crashError.errorId,
|
|
736
|
+
resumeInstructions: agentId
|
|
737
|
+
? `To resume this agent's work, use: --resume ${agentId}`
|
|
738
|
+
: undefined,
|
|
739
|
+
});
|
|
740
|
+
}
|
|
741
|
+
};
|
|
742
|
+
// Common spawn/release handlers
|
|
743
|
+
const onSpawnHandler = this.dashboardPort ? undefined : async (workerName, workerCli, workerTask) => {
|
|
744
|
+
if (debug)
|
|
745
|
+
log.debug(`Nested spawn: ${workerName}`);
|
|
746
|
+
await this.spawn({
|
|
747
|
+
name: workerName,
|
|
748
|
+
cli: workerCli,
|
|
749
|
+
task: workerTask,
|
|
750
|
+
userId,
|
|
751
|
+
});
|
|
752
|
+
};
|
|
753
|
+
const onReleaseHandler = this.dashboardPort ? undefined : async (workerName) => {
|
|
754
|
+
if (debug)
|
|
755
|
+
log.debug(`Release request: ${workerName}`);
|
|
756
|
+
await this.release(workerName);
|
|
757
|
+
};
|
|
758
|
+
// Create RelayPtyOrchestrator (relay-pty Rust binary)
|
|
759
|
+
const ptyConfig = {
|
|
760
|
+
name,
|
|
761
|
+
command,
|
|
762
|
+
args,
|
|
763
|
+
socketPath: this.socketPath,
|
|
764
|
+
cwd: agentCwd,
|
|
765
|
+
dashboardPort: this.dashboardPort,
|
|
766
|
+
env: {
|
|
767
|
+
...userEnv,
|
|
768
|
+
...(spawnerName ? { AGENT_RELAY_SPAWNER: spawnerName } : {}),
|
|
769
|
+
},
|
|
770
|
+
streamLogs: true,
|
|
771
|
+
shadowOf: request.shadowOf,
|
|
772
|
+
shadowSpeakOn: request.shadowSpeakOn,
|
|
773
|
+
skipContinuity: true,
|
|
774
|
+
onSpawn: onSpawnHandler,
|
|
775
|
+
onRelease: onReleaseHandler,
|
|
776
|
+
onExit: onExitHandler,
|
|
777
|
+
headless: true, // Force headless mode for spawned agents to enable task injection via stdin
|
|
778
|
+
};
|
|
779
|
+
const pty = new RelayPtyOrchestrator(ptyConfig);
|
|
780
|
+
if (debug)
|
|
781
|
+
log.debug(`Using RelayPtyOrchestrator for ${name}`);
|
|
782
|
+
// Track listener references for proper cleanup
|
|
783
|
+
const listeners = {};
|
|
784
|
+
// Hook up output events for live log streaming
|
|
785
|
+
const outputListener = (data) => {
|
|
786
|
+
// Broadcast to any connected WebSocket clients via global function
|
|
787
|
+
const broadcast = global.__broadcastLogOutput;
|
|
788
|
+
if (broadcast) {
|
|
789
|
+
broadcast(name, data);
|
|
790
|
+
}
|
|
791
|
+
};
|
|
792
|
+
pty.on('output', outputListener);
|
|
793
|
+
listeners.output = outputListener;
|
|
794
|
+
// Bind cloud persistence events (if enabled) and store references
|
|
795
|
+
const cloudListeners = this.bindCloudPersistenceEvents(name, pty);
|
|
796
|
+
if (cloudListeners.summary)
|
|
797
|
+
listeners.summary = cloudListeners.summary;
|
|
798
|
+
if (cloudListeners.sessionEnd)
|
|
799
|
+
listeners.sessionEnd = cloudListeners.sessionEnd;
|
|
800
|
+
// Mark agent as spawning BEFORE starting PTY
|
|
801
|
+
// This allows messages sent to this agent to be queued until HELLO completes
|
|
802
|
+
if (this.onMarkSpawning) {
|
|
803
|
+
this.onMarkSpawning(name);
|
|
804
|
+
if (debug)
|
|
805
|
+
log.debug(`Marked ${name} as spawning`);
|
|
806
|
+
}
|
|
807
|
+
await pty.start();
|
|
808
|
+
if (debug)
|
|
809
|
+
log.debug(`PTY started, pid: ${pty.pid}`);
|
|
810
|
+
// Wait for the agent to register with the daemon
|
|
811
|
+
const registered = await this.waitForAgentRegistration(name, 30_000, 500);
|
|
812
|
+
if (!registered) {
|
|
813
|
+
const tracedError = createTraceableError('Agent registration timeout', {
|
|
814
|
+
agentName: name,
|
|
815
|
+
cli,
|
|
816
|
+
pid: pty.pid,
|
|
817
|
+
timeoutMs: 30_000,
|
|
818
|
+
});
|
|
819
|
+
log.error(tracedError.logMessage);
|
|
820
|
+
// Clear spawning flag since spawn failed
|
|
821
|
+
if (this.onClearSpawning) {
|
|
822
|
+
this.onClearSpawning(name);
|
|
823
|
+
}
|
|
824
|
+
await pty.kill();
|
|
825
|
+
return {
|
|
826
|
+
success: false,
|
|
827
|
+
name,
|
|
828
|
+
error: tracedError.userMessage,
|
|
829
|
+
errorId: tracedError.errorId,
|
|
830
|
+
};
|
|
831
|
+
}
|
|
832
|
+
// Send task to the newly spawned agent if provided
|
|
833
|
+
// We do this AFTER registration AND after the orchestrator is FULLY ready for messages
|
|
834
|
+
// This includes: CLI started, CLI idle, socket connected, readyForMessages flag set
|
|
835
|
+
if (task && task.trim()) {
|
|
836
|
+
const maxRetries = 3;
|
|
837
|
+
const retryDelayMs = 2000;
|
|
838
|
+
let taskSent = false;
|
|
839
|
+
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
|
840
|
+
try {
|
|
841
|
+
// Wait for full orchestrator readiness (CLI + socket + internal flags)
|
|
842
|
+
if ('waitUntilReadyForMessages' in pty) {
|
|
843
|
+
const orchestrator = pty;
|
|
844
|
+
const ready = await orchestrator.waitUntilReadyForMessages(20000, 100);
|
|
845
|
+
if (!ready) {
|
|
846
|
+
// Log retry attempts at DEBUG level to avoid terminal noise
|
|
847
|
+
log.debug(`Attempt ${attempt}/${maxRetries}: ${name} not ready for messages within timeout`);
|
|
848
|
+
if (attempt < maxRetries) {
|
|
849
|
+
await sleep(retryDelayMs);
|
|
850
|
+
continue;
|
|
851
|
+
}
|
|
852
|
+
log.error(`${name} failed to become ready after ${maxRetries} attempts - task may be lost`);
|
|
853
|
+
break;
|
|
854
|
+
}
|
|
855
|
+
}
|
|
856
|
+
else if ('waitUntilCliReady' in pty) {
|
|
857
|
+
// Fallback for older wrapper types
|
|
858
|
+
await pty.waitUntilCliReady(15000, 100);
|
|
859
|
+
}
|
|
860
|
+
// Inject task via socket (with verification and retries)
|
|
861
|
+
const success = await pty.injectTask(task, spawnerName || 'spawner');
|
|
862
|
+
if (success) {
|
|
863
|
+
taskSent = true;
|
|
864
|
+
if (debug)
|
|
865
|
+
log.debug(`Task injected to ${name} (attempt ${attempt})`);
|
|
866
|
+
break;
|
|
867
|
+
}
|
|
868
|
+
else {
|
|
869
|
+
throw new Error('Task injection returned false');
|
|
870
|
+
}
|
|
871
|
+
}
|
|
872
|
+
catch (err) {
|
|
873
|
+
// Log retry attempts at DEBUG level to avoid terminal noise
|
|
874
|
+
// Only the final summary (if all attempts fail) is logged at ERROR level
|
|
875
|
+
log.debug(`Attempt ${attempt}/${maxRetries}: Error injecting task for ${name}: ${err.message}`);
|
|
876
|
+
if (attempt < maxRetries) {
|
|
877
|
+
await sleep(retryDelayMs);
|
|
878
|
+
}
|
|
879
|
+
}
|
|
880
|
+
}
|
|
881
|
+
if (!taskSent) {
|
|
882
|
+
const tracedError = createTraceableError('Task injection failed', {
|
|
883
|
+
agentName: name,
|
|
884
|
+
cli,
|
|
885
|
+
attempts: maxRetries,
|
|
886
|
+
taskLength: task.length,
|
|
887
|
+
});
|
|
888
|
+
log.error(`CRITICAL: ${tracedError.logMessage}`);
|
|
889
|
+
// Note: We don't return an error here because the agent is running,
|
|
890
|
+
// but we track the errorId so support can investigate if user reports it
|
|
891
|
+
}
|
|
892
|
+
}
|
|
893
|
+
// Track the worker
|
|
894
|
+
const workerInfo = {
|
|
895
|
+
name,
|
|
896
|
+
cli,
|
|
897
|
+
task,
|
|
898
|
+
team,
|
|
899
|
+
userId,
|
|
900
|
+
spawnedAt: Date.now(),
|
|
901
|
+
pid: pty.pid,
|
|
902
|
+
pty,
|
|
903
|
+
logFile: pty.logPath,
|
|
904
|
+
listeners, // Store for cleanup
|
|
905
|
+
};
|
|
906
|
+
this.activeWorkers.set(name, workerInfo);
|
|
907
|
+
this.saveWorkersMetadata();
|
|
908
|
+
const teamInfo = team ? ` [team: ${team}]` : '';
|
|
909
|
+
const shadowInfo = request.shadowOf ? ` [shadow of: ${request.shadowOf}]` : '';
|
|
910
|
+
log.info(`Spawned ${name} (${cli})${teamInfo}${shadowInfo} [pid: ${pty.pid}]`);
|
|
911
|
+
return {
|
|
912
|
+
success: true,
|
|
913
|
+
name,
|
|
914
|
+
pid: pty.pid,
|
|
915
|
+
};
|
|
916
|
+
}
|
|
917
|
+
catch (err) {
|
|
918
|
+
const tracedError = createTraceableError('Agent spawn failed', {
|
|
919
|
+
agentName: name,
|
|
920
|
+
cli,
|
|
921
|
+
task: task?.substring(0, 100),
|
|
922
|
+
}, err instanceof Error ? err : undefined);
|
|
923
|
+
log.error(tracedError.logMessage);
|
|
924
|
+
if (debug)
|
|
925
|
+
log.debug('Full error', { error: err?.stack || String(err) });
|
|
926
|
+
// Clear spawning flag since spawn failed
|
|
927
|
+
if (this.onClearSpawning) {
|
|
928
|
+
this.onClearSpawning(name);
|
|
929
|
+
}
|
|
930
|
+
return {
|
|
931
|
+
success: false,
|
|
932
|
+
name,
|
|
933
|
+
error: tracedError.userMessage,
|
|
934
|
+
errorId: tracedError.errorId,
|
|
935
|
+
};
|
|
936
|
+
}
|
|
937
|
+
}
|
|
938
|
+
/** Role presets for shadow agents */
|
|
939
|
+
static ROLE_PRESETS = {
|
|
940
|
+
reviewer: ['CODE_WRITTEN', 'REVIEW_REQUEST', 'EXPLICIT_ASK'],
|
|
941
|
+
auditor: ['SESSION_END', 'EXPLICIT_ASK'],
|
|
942
|
+
active: ['ALL_MESSAGES'],
|
|
943
|
+
};
|
|
944
|
+
/**
|
|
945
|
+
* Spawn a primary agent with its shadow agent
|
|
946
|
+
*
|
|
947
|
+
* Example usage:
|
|
948
|
+
* ```ts
|
|
949
|
+
* const result = await spawner.spawnWithShadow({
|
|
950
|
+
* primary: { name: 'Lead', command: 'claude', task: 'Implement feature X' },
|
|
951
|
+
* shadow: { name: 'Auditor', role: 'reviewer', speakOn: ['CODE_WRITTEN'] }
|
|
952
|
+
* });
|
|
953
|
+
* ```
|
|
954
|
+
*/
|
|
955
|
+
async spawnWithShadow(request) {
|
|
956
|
+
const { primary, shadow } = request;
|
|
957
|
+
const debug = process.env.DEBUG_SPAWN === '1';
|
|
958
|
+
// Resolve shadow speakOn triggers
|
|
959
|
+
let speakOn = ['EXPLICIT_ASK']; // Default
|
|
960
|
+
// Check for role preset
|
|
961
|
+
if (shadow.role && AgentSpawner.ROLE_PRESETS[shadow.role.toLowerCase()]) {
|
|
962
|
+
speakOn = AgentSpawner.ROLE_PRESETS[shadow.role.toLowerCase()];
|
|
963
|
+
}
|
|
964
|
+
// Override with explicit speakOn if provided
|
|
965
|
+
if (shadow.speakOn && shadow.speakOn.length > 0) {
|
|
966
|
+
speakOn = shadow.speakOn;
|
|
967
|
+
}
|
|
968
|
+
// Build shadow task prompt
|
|
969
|
+
const defaultPrompt = `You are a shadow agent monitoring "${primary.name}". You receive copies of their messages. Your role: ${shadow.role || 'observer'}. Stay passive unless your triggers activate: ${speakOn.join(', ')}.`;
|
|
970
|
+
const shadowTask = shadow.prompt || defaultPrompt;
|
|
971
|
+
// Decide how to run the shadow (subagent for Claude/OpenCode primaries, process fallback otherwise)
|
|
972
|
+
let shadowSelection = null;
|
|
973
|
+
try {
|
|
974
|
+
shadowSelection = await selectShadowCli(primary.command || 'claude', {
|
|
975
|
+
preferredShadowCli: shadow.command,
|
|
976
|
+
});
|
|
977
|
+
}
|
|
978
|
+
catch (err) {
|
|
979
|
+
log.warn(`Shadow CLI selection failed for ${shadow.name}: ${err.message}`);
|
|
980
|
+
}
|
|
981
|
+
if (debug) {
|
|
982
|
+
const mode = shadowSelection?.mode ?? 'unknown';
|
|
983
|
+
const cli = shadowSelection?.command ?? shadow.command ?? primary.command ?? 'claude';
|
|
984
|
+
log.debug(`spawnWithShadow: primary=${primary.name}, shadow=${shadow.name}, mode=${mode}, cli=${cli}, speakOn=${speakOn.join(',')}`);
|
|
985
|
+
}
|
|
986
|
+
// Step 1: Spawn primary agent
|
|
987
|
+
const primaryResult = await this.spawn({
|
|
988
|
+
name: primary.name,
|
|
989
|
+
cli: primary.command || 'claude',
|
|
990
|
+
task: primary.task || '',
|
|
991
|
+
team: primary.team,
|
|
992
|
+
});
|
|
993
|
+
if (!primaryResult.success) {
|
|
994
|
+
return {
|
|
995
|
+
success: false,
|
|
996
|
+
primary: primaryResult,
|
|
997
|
+
error: `Failed to spawn primary agent: ${primaryResult.error}`,
|
|
998
|
+
};
|
|
999
|
+
}
|
|
1000
|
+
// Step 2: Wait for primary to register before spawning shadow
|
|
1001
|
+
// The spawn() method already waits, but we add a small delay for stability
|
|
1002
|
+
await sleep(1000);
|
|
1003
|
+
// Subagent mode: no separate process needed
|
|
1004
|
+
if (shadowSelection?.mode === 'subagent') {
|
|
1005
|
+
log.info(`Shadow ${shadow.name} will run as ${shadowSelection.cli} subagent inside ${primary.name} (no separate process)`);
|
|
1006
|
+
return {
|
|
1007
|
+
success: true,
|
|
1008
|
+
primary: primaryResult,
|
|
1009
|
+
shadow: {
|
|
1010
|
+
success: true,
|
|
1011
|
+
name: shadow.name,
|
|
1012
|
+
},
|
|
1013
|
+
};
|
|
1014
|
+
}
|
|
1015
|
+
// No available shadow CLI - proceed without spawning a shadow process
|
|
1016
|
+
if (!shadowSelection) {
|
|
1017
|
+
log.warn(`No authenticated shadow CLI available; ${primary.name} will run without a shadow`);
|
|
1018
|
+
return {
|
|
1019
|
+
success: true,
|
|
1020
|
+
primary: primaryResult,
|
|
1021
|
+
error: 'Shadow spawn skipped: no authenticated shadow CLI available',
|
|
1022
|
+
};
|
|
1023
|
+
}
|
|
1024
|
+
// Step 3: Spawn shadow agent with shadowOf and shadowSpeakOn
|
|
1025
|
+
const shadowResult = await this.spawn({
|
|
1026
|
+
name: shadow.name,
|
|
1027
|
+
// Use the selected/validated CLI for process-mode shadows
|
|
1028
|
+
cli: shadowSelection.command || shadow.command || primary.command || 'claude',
|
|
1029
|
+
task: shadowTask,
|
|
1030
|
+
shadowOf: primary.name,
|
|
1031
|
+
shadowSpeakOn: speakOn,
|
|
1032
|
+
});
|
|
1033
|
+
if (!shadowResult.success) {
|
|
1034
|
+
log.warn(`Shadow agent ${shadow.name} failed to spawn, primary ${primary.name} continues without shadow`);
|
|
1035
|
+
return {
|
|
1036
|
+
success: true, // Primary succeeded, overall operation is partial success
|
|
1037
|
+
primary: primaryResult,
|
|
1038
|
+
shadow: shadowResult,
|
|
1039
|
+
error: `Shadow spawn failed: ${shadowResult.error}`,
|
|
1040
|
+
};
|
|
1041
|
+
}
|
|
1042
|
+
log.info(`Spawned pair: ${primary.name} with shadow ${shadow.name} (speakOn: ${speakOn.join(',')})`);
|
|
1043
|
+
return {
|
|
1044
|
+
success: true,
|
|
1045
|
+
primary: primaryResult,
|
|
1046
|
+
shadow: shadowResult,
|
|
1047
|
+
};
|
|
1048
|
+
}
|
|
1049
|
+
/**
|
|
1050
|
+
* Release (terminate) a worker
|
|
1051
|
+
*/
|
|
1052
|
+
async release(name) {
|
|
1053
|
+
const worker = this.activeWorkers.get(name);
|
|
1054
|
+
if (!worker) {
|
|
1055
|
+
log.debug(`Worker ${name} not found`);
|
|
1056
|
+
return false;
|
|
1057
|
+
}
|
|
1058
|
+
try {
|
|
1059
|
+
// Unbind all listeners first to prevent memory leaks
|
|
1060
|
+
this.unbindListeners(worker.pty, worker.listeners);
|
|
1061
|
+
// Stop the pty process gracefully (handles auto-save internally)
|
|
1062
|
+
await worker.pty.stop();
|
|
1063
|
+
// Force kill if still running
|
|
1064
|
+
if (worker.pty.isRunning) {
|
|
1065
|
+
await worker.pty.kill();
|
|
1066
|
+
}
|
|
1067
|
+
this.activeWorkers.delete(name);
|
|
1068
|
+
this.saveWorkersMetadata();
|
|
1069
|
+
log.info(`Released ${name}`);
|
|
1070
|
+
return true;
|
|
1071
|
+
}
|
|
1072
|
+
catch (err) {
|
|
1073
|
+
log.error(`Failed to release ${name}: ${err.message}`);
|
|
1074
|
+
// Still unbind and remove from tracking
|
|
1075
|
+
this.unbindListeners(worker.pty, worker.listeners);
|
|
1076
|
+
this.activeWorkers.delete(name);
|
|
1077
|
+
this.saveWorkersMetadata();
|
|
1078
|
+
return false;
|
|
1079
|
+
}
|
|
1080
|
+
}
|
|
1081
|
+
/**
|
|
1082
|
+
* Release all workers
|
|
1083
|
+
*/
|
|
1084
|
+
async releaseAll() {
|
|
1085
|
+
const workers = Array.from(this.activeWorkers.keys());
|
|
1086
|
+
for (const name of workers) {
|
|
1087
|
+
await this.release(name);
|
|
1088
|
+
}
|
|
1089
|
+
}
|
|
1090
|
+
/**
|
|
1091
|
+
* Get all active workers (returns WorkerInfo without pty reference)
|
|
1092
|
+
*/
|
|
1093
|
+
getActiveWorkers() {
|
|
1094
|
+
return Array.from(this.activeWorkers.values()).map((w) => ({
|
|
1095
|
+
name: w.name,
|
|
1096
|
+
cli: w.cli,
|
|
1097
|
+
task: w.task,
|
|
1098
|
+
team: w.team,
|
|
1099
|
+
spawnedAt: w.spawnedAt,
|
|
1100
|
+
pid: w.pid,
|
|
1101
|
+
}));
|
|
1102
|
+
}
|
|
1103
|
+
/**
|
|
1104
|
+
* Check if a worker exists
|
|
1105
|
+
*/
|
|
1106
|
+
hasWorker(name) {
|
|
1107
|
+
return this.activeWorkers.has(name);
|
|
1108
|
+
}
|
|
1109
|
+
/**
|
|
1110
|
+
* Get worker info
|
|
1111
|
+
*/
|
|
1112
|
+
getWorker(name) {
|
|
1113
|
+
const worker = this.activeWorkers.get(name);
|
|
1114
|
+
if (!worker)
|
|
1115
|
+
return undefined;
|
|
1116
|
+
return {
|
|
1117
|
+
name: worker.name,
|
|
1118
|
+
cli: worker.cli,
|
|
1119
|
+
task: worker.task,
|
|
1120
|
+
team: worker.team,
|
|
1121
|
+
spawnedAt: worker.spawnedAt,
|
|
1122
|
+
pid: worker.pid,
|
|
1123
|
+
};
|
|
1124
|
+
}
|
|
1125
|
+
/**
|
|
1126
|
+
* Get output logs from a worker
|
|
1127
|
+
*/
|
|
1128
|
+
getWorkerOutput(name, limit) {
|
|
1129
|
+
const worker = this.activeWorkers.get(name);
|
|
1130
|
+
if (!worker)
|
|
1131
|
+
return null;
|
|
1132
|
+
return worker.pty.getOutput(limit);
|
|
1133
|
+
}
|
|
1134
|
+
/**
|
|
1135
|
+
* Get raw output from a worker
|
|
1136
|
+
*/
|
|
1137
|
+
getWorkerRawOutput(name) {
|
|
1138
|
+
const worker = this.activeWorkers.get(name);
|
|
1139
|
+
if (!worker)
|
|
1140
|
+
return null;
|
|
1141
|
+
return worker.pty.getRawOutput();
|
|
1142
|
+
}
|
|
1143
|
+
/**
|
|
1144
|
+
* Send input to a worker's PTY (for interactive terminal support)
|
|
1145
|
+
* @param name - Worker name
|
|
1146
|
+
* @param data - Input data to send (keystrokes, text, etc.)
|
|
1147
|
+
* @returns true if input was sent, false if worker not found
|
|
1148
|
+
*/
|
|
1149
|
+
sendWorkerInput(name, data) {
|
|
1150
|
+
const worker = this.activeWorkers.get(name);
|
|
1151
|
+
if (!worker)
|
|
1152
|
+
return false;
|
|
1153
|
+
worker.pty.write(data);
|
|
1154
|
+
return true;
|
|
1155
|
+
}
|
|
1156
|
+
/**
|
|
1157
|
+
* Wait for an agent to appear in the connected list and registry (connected-agents.json + agents.json).
|
|
1158
|
+
*/
|
|
1159
|
+
async waitForAgentRegistration(name, timeoutMs = 30_000, pollIntervalMs = 500) {
|
|
1160
|
+
const deadline = Date.now() + timeoutMs;
|
|
1161
|
+
while (Date.now() < deadline) {
|
|
1162
|
+
if (this.isAgentRegistered(name)) {
|
|
1163
|
+
return true;
|
|
1164
|
+
}
|
|
1165
|
+
await sleep(pollIntervalMs);
|
|
1166
|
+
}
|
|
1167
|
+
return false;
|
|
1168
|
+
}
|
|
1169
|
+
isAgentRegistered(name) {
|
|
1170
|
+
return this.isAgentConnected(name) && this.isAgentRecentlySeen(name);
|
|
1171
|
+
}
|
|
1172
|
+
isAgentConnected(name) {
|
|
1173
|
+
if (!this.agentsPath)
|
|
1174
|
+
return false;
|
|
1175
|
+
if (!fs.existsSync(this.agentsPath))
|
|
1176
|
+
return false;
|
|
1177
|
+
try {
|
|
1178
|
+
const raw = JSON.parse(fs.readFileSync(this.agentsPath, 'utf-8'));
|
|
1179
|
+
// connected-agents.json format: { agents: string[], users: string[], updatedAt: number }
|
|
1180
|
+
// agents is a string array of connected agent names (not objects)
|
|
1181
|
+
const agents = Array.isArray(raw?.agents) ? raw.agents : [];
|
|
1182
|
+
const updatedAt = typeof raw?.updatedAt === 'number' ? raw.updatedAt : 0;
|
|
1183
|
+
const isFresh = Date.now() - updatedAt <= AgentSpawner.ONLINE_THRESHOLD_MS;
|
|
1184
|
+
if (!isFresh)
|
|
1185
|
+
return false;
|
|
1186
|
+
// Case-insensitive check to match router behavior
|
|
1187
|
+
const lowerName = name.toLowerCase();
|
|
1188
|
+
return agents.some((a) => typeof a === 'string' && a.toLowerCase() === lowerName);
|
|
1189
|
+
}
|
|
1190
|
+
catch (err) {
|
|
1191
|
+
log.error('Failed to read connected-agents.json', { error: err.message });
|
|
1192
|
+
return false;
|
|
1193
|
+
}
|
|
1194
|
+
}
|
|
1195
|
+
isAgentRecentlySeen(name) {
|
|
1196
|
+
if (!this.registryPath)
|
|
1197
|
+
return false;
|
|
1198
|
+
if (!fs.existsSync(this.registryPath))
|
|
1199
|
+
return false;
|
|
1200
|
+
try {
|
|
1201
|
+
const raw = JSON.parse(fs.readFileSync(this.registryPath, 'utf-8'));
|
|
1202
|
+
const agents = Array.isArray(raw?.agents)
|
|
1203
|
+
? raw.agents
|
|
1204
|
+
: typeof raw?.agents === 'object' && raw?.agents !== null
|
|
1205
|
+
? Object.values(raw.agents)
|
|
1206
|
+
: [];
|
|
1207
|
+
const lowerName = name.toLowerCase();
|
|
1208
|
+
const agent = agents.find((entry) => typeof entry?.name === 'string' && entry.name.toLowerCase() === lowerName);
|
|
1209
|
+
if (!agent?.lastSeen)
|
|
1210
|
+
return false;
|
|
1211
|
+
return Date.now() - new Date(agent.lastSeen).getTime() <= AgentSpawner.ONLINE_THRESHOLD_MS;
|
|
1212
|
+
}
|
|
1213
|
+
catch (err) {
|
|
1214
|
+
log.error('Failed to read agents.json', { error: err.message });
|
|
1215
|
+
return false;
|
|
1216
|
+
}
|
|
1217
|
+
}
|
|
1218
|
+
/**
|
|
1219
|
+
* Save workers metadata to disk for CLI access
|
|
1220
|
+
*/
|
|
1221
|
+
saveWorkersMetadata() {
|
|
1222
|
+
try {
|
|
1223
|
+
const workers = Array.from(this.activeWorkers.values()).map((w) => ({
|
|
1224
|
+
name: w.name,
|
|
1225
|
+
cli: w.cli,
|
|
1226
|
+
task: w.task,
|
|
1227
|
+
team: w.team,
|
|
1228
|
+
userId: w.userId,
|
|
1229
|
+
spawnedAt: w.spawnedAt,
|
|
1230
|
+
pid: w.pid,
|
|
1231
|
+
logFile: w.logFile,
|
|
1232
|
+
}));
|
|
1233
|
+
fs.writeFileSync(this.workersPath, JSON.stringify({ workers }, null, 2));
|
|
1234
|
+
}
|
|
1235
|
+
catch (err) {
|
|
1236
|
+
log.error('Failed to save workers metadata', { error: err.message });
|
|
1237
|
+
}
|
|
1238
|
+
}
|
|
1239
|
+
/**
|
|
1240
|
+
* Get path to logs directory
|
|
1241
|
+
*/
|
|
1242
|
+
getLogsDir() {
|
|
1243
|
+
return this.logsDir;
|
|
1244
|
+
}
|
|
1245
|
+
/**
|
|
1246
|
+
* Get path to workers metadata file
|
|
1247
|
+
*/
|
|
1248
|
+
getWorkersPath() {
|
|
1249
|
+
return this.workersPath;
|
|
1250
|
+
}
|
|
1251
|
+
}
|
|
1252
|
+
/**
|
|
1253
|
+
* Read workers metadata from disk (for CLI use)
|
|
1254
|
+
*/
|
|
1255
|
+
export function readWorkersMetadata(projectRoot) {
|
|
1256
|
+
const paths = getProjectPaths(projectRoot);
|
|
1257
|
+
const workersPath = path.join(paths.teamDir, 'workers.json');
|
|
1258
|
+
if (!fs.existsSync(workersPath)) {
|
|
1259
|
+
return [];
|
|
1260
|
+
}
|
|
1261
|
+
try {
|
|
1262
|
+
const raw = JSON.parse(fs.readFileSync(workersPath, 'utf-8'));
|
|
1263
|
+
return Array.isArray(raw?.workers) ? raw.workers : [];
|
|
1264
|
+
}
|
|
1265
|
+
catch {
|
|
1266
|
+
return [];
|
|
1267
|
+
}
|
|
1268
|
+
}
|
|
1269
|
+
/**
|
|
1270
|
+
* Get the worker logs directory path
|
|
1271
|
+
*/
|
|
1272
|
+
export function getWorkerLogsDir(projectRoot) {
|
|
1273
|
+
const paths = getProjectPaths(projectRoot);
|
|
1274
|
+
return path.join(paths.teamDir, 'worker-logs');
|
|
1275
|
+
}
|
|
1276
|
+
//# sourceMappingURL=spawner.js.map
|