teleportation-cli 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/hooks/config-loader.mjs +93 -0
- package/.claude/hooks/heartbeat.mjs +331 -0
- package/.claude/hooks/notification.mjs +35 -0
- package/.claude/hooks/permission_request.mjs +307 -0
- package/.claude/hooks/post_tool_use.mjs +137 -0
- package/.claude/hooks/pre_tool_use.mjs +451 -0
- package/.claude/hooks/session-register.mjs +274 -0
- package/.claude/hooks/session_end.mjs +256 -0
- package/.claude/hooks/session_start.mjs +308 -0
- package/.claude/hooks/stop.mjs +277 -0
- package/.claude/hooks/user_prompt_submit.mjs +91 -0
- package/LICENSE +21 -0
- package/README.md +243 -0
- package/lib/auth/api-key.js +110 -0
- package/lib/auth/credentials.js +341 -0
- package/lib/backup/manager.js +461 -0
- package/lib/cli/daemon-commands.js +299 -0
- package/lib/cli/index.js +303 -0
- package/lib/cli/session-commands.js +294 -0
- package/lib/cli/snapshot-commands.js +223 -0
- package/lib/cli/worktree-commands.js +291 -0
- package/lib/config/manager.js +306 -0
- package/lib/daemon/lifecycle.js +336 -0
- package/lib/daemon/pid-manager.js +160 -0
- package/lib/daemon/teleportation-daemon.js +2009 -0
- package/lib/handoff/config.js +102 -0
- package/lib/handoff/example.js +152 -0
- package/lib/handoff/git-handoff.js +351 -0
- package/lib/handoff/handoff.js +277 -0
- package/lib/handoff/index.js +25 -0
- package/lib/handoff/session-state.js +238 -0
- package/lib/install/installer.js +555 -0
- package/lib/machine-coders/claude-code-adapter.js +329 -0
- package/lib/machine-coders/example.js +239 -0
- package/lib/machine-coders/gemini-cli-adapter.js +406 -0
- package/lib/machine-coders/index.js +103 -0
- package/lib/machine-coders/interface.js +168 -0
- package/lib/router/classifier.js +251 -0
- package/lib/router/example.js +92 -0
- package/lib/router/index.js +69 -0
- package/lib/router/mech-llms-client.js +277 -0
- package/lib/router/models.js +188 -0
- package/lib/router/router.js +382 -0
- package/lib/session/cleanup.js +100 -0
- package/lib/session/metadata.js +258 -0
- package/lib/session/mute-checker.js +114 -0
- package/lib/session-registry/manager.js +302 -0
- package/lib/snapshot/manager.js +390 -0
- package/lib/utils/errors.js +166 -0
- package/lib/utils/logger.js +148 -0
- package/lib/utils/retry.js +155 -0
- package/lib/worktree/manager.js +301 -0
- package/package.json +66 -0
- package/teleportation-cli.cjs +2987 -0
|
@@ -0,0 +1,2009 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Teleportation Daemon
|
|
5
|
+
*
|
|
6
|
+
* Persistent background service that:
|
|
7
|
+
* - Polls relay API for approved tool requests
|
|
8
|
+
* - Spawns child Claude Code processes via `claude --resume <session_id> -p "<prompt>"`
|
|
9
|
+
* - Executes approved tools asynchronously when user is away
|
|
10
|
+
* - Maintains session registry and approval queue
|
|
11
|
+
* - Provides HTTP server for hook communication
|
|
12
|
+
*
|
|
13
|
+
* SECURITY ARCHITECTURE:
|
|
14
|
+
* ----------------------
|
|
15
|
+
* This daemon executes shell commands via spawn('sh', ['-c', command]) which bypasses
|
|
16
|
+
* Claude CLI's built-in security controls. This is an intentional architectural decision
|
|
17
|
+
* to enable remote approval/execution, but requires defense-in-depth measures:
|
|
18
|
+
*
|
|
19
|
+
* 1. COMMAND WHITELIST: Only pre-approved command prefixes are allowed (see ALLOWED_COMMAND_PREFIXES)
|
|
20
|
+
* 2. SHELL INJECTION BLOCKING: Commands containing metacharacters (;|&`$() etc.) are rejected
|
|
21
|
+
* 3. APPROVAL FLOW: All commands must be explicitly approved via the relay API
|
|
22
|
+
* 4. DEVELOPMENT BYPASS: ALLOW_ALL_COMMANDS requires TELEPORTATION_DANGER_ZONE confirmation
|
|
23
|
+
*
|
|
24
|
+
* For production deployments requiring Claude CLI integration, consider:
|
|
25
|
+
* - Using the CLAUDE_CLI_PATH environment variable to specify a custom Claude CLI wrapper
|
|
26
|
+
* - Implementing additional command validation in a proxy layer
|
|
27
|
+
* - Enabling audit logging by setting DEBUG=1
|
|
28
|
+
*/
|
|
29
|
+
|
|
30
|
+
import http from 'http';
|
|
31
|
+
import { fileURLToPath } from 'url';
|
|
32
|
+
import { spawn, exec } from 'child_process';
|
|
33
|
+
import { promisify } from 'util';
|
|
34
|
+
import { acquirePidLock, releasePidLock } from './pid-manager.js';
|
|
35
|
+
import { setupSignalHandlers } from './lifecycle.js';
|
|
36
|
+
|
|
37
|
+
// Machine coder adapters for multi-provider support
|
|
38
|
+
import { getAvailableCoders, getBestCoder } from '../machine-coders/index.js';
|
|
39
|
+
|
|
40
|
+
// Cost-aware model router for LLM calls
|
|
41
|
+
import { createRouter, classifyTask } from '../router/index.js';
|
|
42
|
+
|
|
43
|
+
const execAsync = promisify(exec);
|
|
44
|
+
console.log('[daemon] Starting up...');
|
|
45
|
+
|
|
46
|
+
const PORT = parseInt(process.env.TELEPORTATION_DAEMON_PORT || '3050', 10);
|
|
47
|
+
const RELAY_API_URL = process.env.RELAY_API_URL || 'https://api.teleportation.dev';
|
|
48
|
+
const RELAY_API_KEY = process.env.RELAY_API_KEY || '';
|
|
49
|
+
const POLL_INTERVAL_MS = parseInt(process.env.DAEMON_POLL_INTERVAL_MS || '5000', 10);
|
|
50
|
+
const CHILD_TIMEOUT_MS = parseInt(process.env.DAEMON_CHILD_TIMEOUT_MS || '600000', 10); // 10 min
|
|
51
|
+
const IDLE_CHECK_INTERVAL_MS = parseInt(process.env.DAEMON_IDLE_CHECK_INTERVAL_MS || '300000', 10); // 5 min
|
|
52
|
+
const IDLE_TIMEOUT_MS = parseInt(process.env.DAEMON_IDLE_TIMEOUT_MS || '1800000', 10); // 30 min
|
|
53
|
+
const CLAUDE_CLI = process.env.CLAUDE_CLI_PATH || 'claude'; // Configurable Claude CLI path
|
|
54
|
+
const ALLOW_ALL_COMMANDS = process.env.TELEPORTATION_DAEMON_ALLOW_ALL_COMMANDS === 'true';
|
|
55
|
+
const HEARTBEAT_INTERVAL_MS = parseInt(process.env.DAEMON_HEARTBEAT_INTERVAL_MS || '30000', 10); // 30 sec default
|
|
56
|
+
|
|
57
|
+
// Machine coder configuration
|
|
58
|
+
// PREFERRED_CODER: 'claude-code' | 'gemini-cli' | 'auto' (default: auto)
|
|
59
|
+
// 'auto' will use Claude Code if available, otherwise Gemini CLI
|
|
60
|
+
const PREFERRED_CODER = process.env.TELEPORTATION_PREFERRED_CODER || 'auto';
|
|
61
|
+
// USE_GEMINI_FOR_LARGE_CONTEXT: If true, prefer Gemini for tasks that might benefit from large context
|
|
62
|
+
const USE_GEMINI_FOR_LARGE_CONTEXT = process.env.TELEPORTATION_USE_GEMINI_FOR_LARGE_CONTEXT === 'true';
|
|
63
|
+
|
|
64
|
+
// Router configuration for cost-aware LLM calls
|
|
65
|
+
// MECH_API_KEY: Required for router API calls (from env)
|
|
66
|
+
// ROUTER_ENABLED: Enable cost-aware routing (default: true if MECH_API_KEY is set)
|
|
67
|
+
const ROUTER_ENABLED = process.env.TELEPORTATION_ROUTER_ENABLED !== 'false' && !!process.env.MECH_API_KEY;
|
|
68
|
+
const ROUTER_VERBOSE = process.env.TELEPORTATION_ROUTER_VERBOSE === 'true';
|
|
69
|
+
const ROUTER_MAX_ESCALATIONS = parseInt(process.env.TELEPORTATION_ROUTER_MAX_ESCALATIONS || '2', 10);
|
|
70
|
+
|
|
71
|
+
// Lazy-initialized router instance
|
|
72
|
+
let _router = null;
|
|
73
|
+
function getRouter() {
|
|
74
|
+
if (!_router && ROUTER_ENABLED) {
|
|
75
|
+
_router = createRouter({
|
|
76
|
+
apiKey: process.env.MECH_API_KEY,
|
|
77
|
+
verbose: ROUTER_VERBOSE,
|
|
78
|
+
maxEscalations: ROUTER_MAX_ESCALATIONS,
|
|
79
|
+
});
|
|
80
|
+
console.log('[daemon] Cost-aware router initialized');
|
|
81
|
+
}
|
|
82
|
+
return _router;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Make a cost-aware LLM completion using the router.
|
|
87
|
+
* Automatically classifies the task and selects the cheapest capable model.
|
|
88
|
+
* Falls back to direct API call if router is not enabled.
|
|
89
|
+
*
|
|
90
|
+
* @param {Object} options
|
|
91
|
+
* @param {string} options.prompt - User prompt (for classification)
|
|
92
|
+
* @param {Array} options.messages - Chat messages
|
|
93
|
+
* @param {Object} [options.context] - Context for classification (fileCount, diffLines, etc.)
|
|
94
|
+
* @param {number} [options.maxTokens] - Max tokens
|
|
95
|
+
* @param {number} [options.temperature] - Temperature
|
|
96
|
+
* @param {string} [options.forceTier] - Force a specific tier (cheap/mid/best)
|
|
97
|
+
* @param {string} [options.forceModel] - Force a specific model
|
|
98
|
+
* @returns {Promise<Object>} Result with content, model, cost, tier, usage
|
|
99
|
+
*/
|
|
100
|
+
async function routedCompletion(options) {
|
|
101
|
+
const router = getRouter();
|
|
102
|
+
|
|
103
|
+
if (!router) {
|
|
104
|
+
// Router not enabled - return error or use fallback
|
|
105
|
+
console.warn('[daemon] Router not enabled (MECH_API_KEY not set). Cannot make LLM completion.');
|
|
106
|
+
return {
|
|
107
|
+
success: false,
|
|
108
|
+
error: 'Router not enabled. Set MECH_API_KEY to enable cost-aware routing.',
|
|
109
|
+
content: null,
|
|
110
|
+
model: null,
|
|
111
|
+
cost: 0,
|
|
112
|
+
tier: null,
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
try {
|
|
117
|
+
const result = await router.route(options);
|
|
118
|
+
|
|
119
|
+
console.log(`[daemon] Routed completion: model=${result.model}, tier=${result.tier}, cost=$${result.cost?.toFixed(6) || 0}`);
|
|
120
|
+
|
|
121
|
+
return {
|
|
122
|
+
success: true,
|
|
123
|
+
content: result.content,
|
|
124
|
+
model: result.model,
|
|
125
|
+
cost: result.cost || 0,
|
|
126
|
+
cost_breakdown: result.costBreakdown,
|
|
127
|
+
tier: result.tier,
|
|
128
|
+
usage: result.usage,
|
|
129
|
+
escalations: result.escalations || 0,
|
|
130
|
+
};
|
|
131
|
+
} catch (error) {
|
|
132
|
+
console.error('[daemon] Router completion failed:', error.message);
|
|
133
|
+
return {
|
|
134
|
+
success: false,
|
|
135
|
+
error: error.message,
|
|
136
|
+
content: null,
|
|
137
|
+
model: null,
|
|
138
|
+
cost: 0,
|
|
139
|
+
tier: null,
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* Classify a task to determine its complexity tier.
|
|
146
|
+
* Uses the router's classifier to determine cheap/mid/best tier.
|
|
147
|
+
*
|
|
148
|
+
* @param {string} prompt - The task prompt
|
|
149
|
+
* @param {Object} [context] - Context for classification
|
|
150
|
+
* @returns {{ tier: string, reason: string, confidence: number }}
|
|
151
|
+
*/
|
|
152
|
+
function classifyTaskTier(prompt, context = {}) {
|
|
153
|
+
return classifyTask(prompt, context);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// Command and output preview truncation constants
|
|
157
|
+
const COMMAND_PREVIEW_SHORT = 50; // Inline previews, summaries (e.g., timeline summaries)
|
|
158
|
+
const COMMAND_PREVIEW_MEDIUM = 100; // Card displays, detail views (e.g., timeline cards)
|
|
159
|
+
const COMMAND_PREVIEW_LONG = 200; // Timeline storage, full details (e.g., timeline events)
|
|
160
|
+
const OUTPUT_PREVIEW_SHORT = 500; // Output previews in UI
|
|
161
|
+
const OUTPUT_PREVIEW_LONG = 1000; // Full output displays
|
|
162
|
+
|
|
163
|
+
// Heartbeat tracking: session_id -> { count, lastSent }
|
|
164
|
+
const heartbeatState = new Map();
|
|
165
|
+
let lastHeartbeatTime = 0;
|
|
166
|
+
|
|
167
|
+
// Session registry: session_id -> { session_id, cwd, meta, registered_at }
|
|
168
|
+
const sessions = new Map();
|
|
169
|
+
|
|
170
|
+
// Approval queue: FIFO queue of pending approvals
|
|
171
|
+
// { approval_id, session_id, tool_name, tool_input, queued_at }
|
|
172
|
+
const approvalQueue = [];
|
|
173
|
+
|
|
174
|
+
// Maximum queue size to prevent memory exhaustion (DoS prevention)
|
|
175
|
+
const MAX_QUEUE_SIZE = 1000;
|
|
176
|
+
|
|
177
|
+
// Execution tracking: approval_id -> { status, started_at, completed_at, exit_code, stdout, stderr, error, child_process }
|
|
178
|
+
// child_process is stored so we can kill it if needed
|
|
179
|
+
const executions = new Map();
|
|
180
|
+
|
|
181
|
+
// Maximum number of executions to keep in memory (LRU cache)
|
|
182
|
+
const MAX_EXECUTIONS = 1000; // Maximum executions to keep in memory (LRU cache)
|
|
183
|
+
|
|
184
|
+
// Maximum output size to prevent memory issues
|
|
185
|
+
const MAX_OUTPUT_SIZE = 100_000; // 100KB
|
|
186
|
+
|
|
187
|
+
// Command whitelist for inbox execution (security: prevents arbitrary command execution)
|
|
188
|
+
// Only commands starting with these prefixes are allowed
|
|
189
|
+
const ALLOWED_COMMAND_PREFIXES = [
|
|
190
|
+
'git ', // Git operations
|
|
191
|
+
'npm ', // NPM package management
|
|
192
|
+
'npx ', // NPX execution
|
|
193
|
+
'node ', // Node.js execution
|
|
194
|
+
'ls', // List files (ls, ls -la, etc.)
|
|
195
|
+
'cat ', // View file contents
|
|
196
|
+
'head ', // View file head
|
|
197
|
+
'tail ', // View file tail
|
|
198
|
+
'grep ', // Search in files
|
|
199
|
+
'find ', // Find files
|
|
200
|
+
'pwd', // Print working directory
|
|
201
|
+
'echo ', // Echo output
|
|
202
|
+
'mkdir ', // Create directories
|
|
203
|
+
'touch ', // Create files
|
|
204
|
+
'cp ', // Copy files
|
|
205
|
+
'mv ', // Move files
|
|
206
|
+
// 'rm ' removed - too dangerous for remote execution (could allow rm -rf /)
|
|
207
|
+
// 'chmod ' removed - could enable privilege escalation attacks in remote execution
|
|
208
|
+
'wc ', // Word count
|
|
209
|
+
'sort ', // Sort output
|
|
210
|
+
'uniq ', // Unique lines
|
|
211
|
+
'cut ', // Cut columns
|
|
212
|
+
'diff ', // Compare files
|
|
213
|
+
'which ', // Find executables
|
|
214
|
+
'env', // Show environment
|
|
215
|
+
'date', // Show date
|
|
216
|
+
'whoami', // Show current user
|
|
217
|
+
'hostname', // Show hostname
|
|
218
|
+
];
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* SECURITY: Shell injection detection
|
|
222
|
+
* Block characters that can chain or inject additional commands
|
|
223
|
+
* Note: Parentheses/brackets allowed within quoted strings (e.g., node -e "code()")
|
|
224
|
+
*/
|
|
225
|
+
const COMMAND_INJECTION_PATTERNS = [
|
|
226
|
+
/;/, // Command chaining: cmd1; cmd2
|
|
227
|
+
/\|/, // Piping: cmd1 | cmd2
|
|
228
|
+
/&/, // Background/AND: cmd1 & cmd2, cmd1 && cmd2
|
|
229
|
+
/`/, // Backtick substitution: `cmd`
|
|
230
|
+
/\$\(/, // Command substitution: $(cmd)
|
|
231
|
+
/\$\{/, // Variable expansion: ${var}
|
|
232
|
+
/\n|\r/, // Newlines (command separation)
|
|
233
|
+
/>\s*>/, // Append redirect: >>
|
|
234
|
+
/<\s*</, // Here-string: <<
|
|
235
|
+
];
|
|
236
|
+
|
|
237
|
+
/**
|
|
238
|
+
* Sanitize command by checking for shell injection patterns
|
|
239
|
+
* @param {string} command - The command to sanitize
|
|
240
|
+
* @returns {{ safe: boolean, reason?: string }}
|
|
241
|
+
*/
|
|
242
|
+
function sanitizeCommand(command) {
|
|
243
|
+
for (const pattern of COMMAND_INJECTION_PATTERNS) {
|
|
244
|
+
if (pattern.test(command)) {
|
|
245
|
+
const match = command.match(pattern);
|
|
246
|
+
return {
|
|
247
|
+
safe: false,
|
|
248
|
+
reason: `Command contains shell injection pattern: '${match[0]}'`
|
|
249
|
+
};
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
return { safe: true };
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Check if a command is allowed based on the whitelist
|
|
257
|
+
* @param {string} command - The command to validate
|
|
258
|
+
* @returns {{ allowed: boolean, reason?: string }}
|
|
259
|
+
*/
|
|
260
|
+
function isCommandAllowed(command) {
|
|
261
|
+
if (!command || typeof command !== 'string') {
|
|
262
|
+
return { allowed: false, reason: 'Command must be a non-empty string' };
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
const trimmed = command.trim();
|
|
266
|
+
if (trimmed.length === 0) {
|
|
267
|
+
return { allowed: false, reason: 'Command cannot be empty' };
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
// SECURITY: Check for shell metacharacters first
|
|
271
|
+
const sanitizeResult = sanitizeCommand(trimmed);
|
|
272
|
+
if (!sanitizeResult.safe) {
|
|
273
|
+
console.warn(`[daemon] SECURITY: Blocked command with shell metacharacters: ${trimmed.substring(0, 50)}...`);
|
|
274
|
+
return { allowed: false, reason: sanitizeResult.reason };
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
// Optional escape hatch for development: allow any command when explicitly enabled
|
|
278
|
+
// SECURITY: Requires both ALLOW_ALL_COMMANDS=true AND explicit confirmation to prevent accidental enabling
|
|
279
|
+
if (ALLOW_ALL_COMMANDS) {
|
|
280
|
+
// Block in production environment
|
|
281
|
+
if (process.env.NODE_ENV === 'production') {
|
|
282
|
+
console.error('[daemon] SECURITY: ALLOW_ALL_COMMANDS is not permitted in production');
|
|
283
|
+
return { allowed: false, reason: 'Command whitelist bypass disabled in production' };
|
|
284
|
+
}
|
|
285
|
+
// Require explicit confirmation variable (NODE_ENV defaults to undefined in most deployments)
|
|
286
|
+
// This ensures ALLOW_ALL_COMMANDS cannot be accidentally enabled
|
|
287
|
+
const dangerConfirm = process.env.TELEPORTATION_DANGER_ZONE;
|
|
288
|
+
if (dangerConfirm !== 'i_understand_the_risks') {
|
|
289
|
+
console.error('[daemon] SECURITY: ALLOW_ALL_COMMANDS requires TELEPORTATION_DANGER_ZONE=i_understand_the_risks');
|
|
290
|
+
return { allowed: false, reason: 'Command whitelist bypass requires explicit danger zone confirmation' };
|
|
291
|
+
}
|
|
292
|
+
// Log with timestamp for audit trail
|
|
293
|
+
console.warn(`[daemon] ⚠️ SECURITY WARNING: Command whitelist bypass enabled at ${new Date().toISOString()} - ALLOW_ALL_COMMANDS=true`);
|
|
294
|
+
console.warn(`[daemon] ⚠️ Bypassing whitelist for command: ${trimmed.substring(0, 100)}`);
|
|
295
|
+
return { allowed: true };
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
// Check against whitelist
|
|
299
|
+
for (const prefix of ALLOWED_COMMAND_PREFIXES) {
|
|
300
|
+
if (trimmed === prefix.trim() || trimmed.startsWith(prefix)) {
|
|
301
|
+
return { allowed: true };
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
// Command not in whitelist
|
|
306
|
+
const cmdName = trimmed.split(/\s+/)[0];
|
|
307
|
+
return {
|
|
308
|
+
allowed: false,
|
|
309
|
+
reason: `Command '${cmdName}' is not in the allowed whitelist. Allowed: ${ALLOWED_COMMAND_PREFIXES.map(p => p.trim().split(' ')[0]).filter((v, i, a) => a.indexOf(v) === i).join(', ')}`
|
|
310
|
+
};
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
// Cleanup interval: remove old executions every hour
|
|
314
|
+
const CLEANUP_INTERVAL_MS = 60 * 60 * 1000; // 1 hour
|
|
315
|
+
|
|
316
|
+
let server = null;
|
|
317
|
+
let pollingTimer = null;
|
|
318
|
+
let cleanupTimer = null;
|
|
319
|
+
let idleTimer = null;
|
|
320
|
+
let isShuttingDown = false;
|
|
321
|
+
|
|
322
|
+
// Track last time we had any registered sessions (or last time we checked while sessions were present)
|
|
323
|
+
let lastSessionActivityAt = Date.now();
|
|
324
|
+
|
|
325
|
+
// Track in-flight fetch requests to prevent duplicate API calls for the same session
|
|
326
|
+
// Note: Entries are cleaned up in the finally block of getOrFetchSession after each fetch completes.
|
|
327
|
+
// Since fetch has a timeout, entries won't accumulate indefinitely even if relay is slow.
|
|
328
|
+
const pendingFetches = new Map();
|
|
329
|
+
|
|
330
|
+
/**
|
|
331
|
+
* Fetch session from relay API and cache locally
|
|
332
|
+
* Used when a session is not found in the local registry (e.g., after daemon restart)
|
|
333
|
+
* @param {string} session_id - Session ID to fetch
|
|
334
|
+
* @returns {Promise<object|null>} - Session object or null if not found
|
|
335
|
+
*/
|
|
336
|
+
async function fetchAndCacheSession(session_id) {
|
|
337
|
+
if (!RELAY_API_URL || !RELAY_API_KEY) {
|
|
338
|
+
console.log(`[daemon] Cannot fetch session ${session_id}: relay not configured`);
|
|
339
|
+
return null;
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
try {
|
|
343
|
+
console.log(`[daemon] Fetching session ${session_id} from relay API`);
|
|
344
|
+
const response = await fetch(`${RELAY_API_URL}/api/sessions/${session_id}`, {
|
|
345
|
+
headers: { 'Authorization': `Bearer ${RELAY_API_KEY}` },
|
|
346
|
+
signal: AbortSignal.timeout(5000)
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
if (!response.ok) {
|
|
350
|
+
console.log(`[daemon] Session ${session_id} not found in relay (status: ${response.status})`);
|
|
351
|
+
return null;
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
const data = await response.json();
|
|
355
|
+
|
|
356
|
+
// Validate response data structure
|
|
357
|
+
if (!data || typeof data !== 'object') {
|
|
358
|
+
console.log(`[daemon] Invalid session response for ${session_id}: expected object`);
|
|
359
|
+
return null;
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// Validate session_id matches (or use the one from response)
|
|
363
|
+
const responseSessionId = data.session_id || data.claude_session_id || session_id;
|
|
364
|
+
if (data.session_id && data.session_id !== session_id) {
|
|
365
|
+
console.log(`[daemon] Session ID mismatch: requested ${session_id}, got ${data.session_id}`);
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
console.log(`[daemon] Retrieved session ${session_id} from relay:`, JSON.stringify(data.meta || {}).slice(0, 200));
|
|
369
|
+
|
|
370
|
+
// Cache the session locally with validated/normalized data
|
|
371
|
+
const session = {
|
|
372
|
+
session_id: responseSessionId,
|
|
373
|
+
claude_session_id: data.claude_session_id || responseSessionId,
|
|
374
|
+
cwd: data.meta?.cwd || process.cwd(),
|
|
375
|
+
meta: data.meta && typeof data.meta === 'object' ? data.meta : {},
|
|
376
|
+
registered_at: typeof data.registered_at === 'number' ? data.registered_at : Date.now()
|
|
377
|
+
};
|
|
378
|
+
|
|
379
|
+
sessions.set(session_id, session);
|
|
380
|
+
console.log(`[daemon] Cached session ${session_id} locally (cwd: ${session.cwd})`);
|
|
381
|
+
|
|
382
|
+
return session;
|
|
383
|
+
} catch (error) {
|
|
384
|
+
console.error(`[daemon] Failed to fetch session ${session_id} from relay:`, error.message);
|
|
385
|
+
return null;
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
/**
|
|
390
|
+
* Get session from local cache or fetch from relay
|
|
391
|
+
* Uses a pending fetches map to prevent duplicate API calls for the same session
|
|
392
|
+
* when multiple concurrent requests come in (race condition prevention)
|
|
393
|
+
* @param {string} session_id - Session ID
|
|
394
|
+
* @returns {Promise<object|null>} - Session object or null if not found
|
|
395
|
+
*/
|
|
396
|
+
async function getOrFetchSession(session_id) {
|
|
397
|
+
// First check local cache
|
|
398
|
+
let session = sessions.get(session_id);
|
|
399
|
+
if (session) {
|
|
400
|
+
return session;
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
// Check if a fetch is already in progress for this session
|
|
404
|
+
if (pendingFetches.has(session_id)) {
|
|
405
|
+
// Wait for the in-flight request to complete
|
|
406
|
+
return await pendingFetches.get(session_id);
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
// Start a new fetch and cache the promise to prevent duplicate requests
|
|
410
|
+
const fetchPromise = fetchAndCacheSession(session_id);
|
|
411
|
+
pendingFetches.set(session_id, fetchPromise);
|
|
412
|
+
|
|
413
|
+
try {
|
|
414
|
+
session = await fetchPromise;
|
|
415
|
+
return session;
|
|
416
|
+
} finally {
|
|
417
|
+
// Always clean up the pending fetch entry
|
|
418
|
+
pendingFetches.delete(session_id);
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
/**
|
|
423
|
+
* HTTP Server for Hook Communication
|
|
424
|
+
* Uses Node.js built-in http module (no external dependencies)
|
|
425
|
+
*/
|
|
426
|
+
|
|
427
|
+
// Helper to truncate output with indicator
|
|
428
|
+
function truncateOutput(output, label) {
|
|
429
|
+
if (output.length <= MAX_OUTPUT_SIZE) {
|
|
430
|
+
return output;
|
|
431
|
+
}
|
|
432
|
+
const truncated = output.slice(0, MAX_OUTPUT_SIZE);
|
|
433
|
+
const remaining = output.length - MAX_OUTPUT_SIZE;
|
|
434
|
+
return `${truncated}\n\n[${label} TRUNCATED - ${remaining} bytes omitted. Total: ${output.length} bytes]`;
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
// Helper to parse JSON body with size limit (DoS prevention)
|
|
438
|
+
async function parseJSONBody(req, maxSize = 1024 * 1024) {
|
|
439
|
+
return new Promise((resolve, reject) => {
|
|
440
|
+
let body = '';
|
|
441
|
+
let size = 0;
|
|
442
|
+
|
|
443
|
+
req.on('data', chunk => {
|
|
444
|
+
size += chunk.length;
|
|
445
|
+
if (size > maxSize) {
|
|
446
|
+
req.destroy();
|
|
447
|
+
reject(new Error('Request body too large'));
|
|
448
|
+
return;
|
|
449
|
+
}
|
|
450
|
+
body += chunk.toString();
|
|
451
|
+
});
|
|
452
|
+
|
|
453
|
+
req.on('end', () => {
|
|
454
|
+
try {
|
|
455
|
+
resolve(body ? JSON.parse(body) : {});
|
|
456
|
+
} catch (e) {
|
|
457
|
+
reject(new Error('Invalid JSON: ' + e.message));
|
|
458
|
+
}
|
|
459
|
+
});
|
|
460
|
+
|
|
461
|
+
req.on('error', reject);
|
|
462
|
+
});
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
// Validation helpers
|
|
466
|
+
function validateSessionId(session_id) {
|
|
467
|
+
if (!session_id || typeof session_id !== 'string') {
|
|
468
|
+
throw new Error('session_id must be a non-empty string');
|
|
469
|
+
}
|
|
470
|
+
if (session_id.length > 256) {
|
|
471
|
+
throw new Error('session_id too long (max 256 characters)');
|
|
472
|
+
}
|
|
473
|
+
// Allow @ and . for user@host format
|
|
474
|
+
if (!/^[a-zA-Z0-9_@.-]+$/.test(session_id)) {
|
|
475
|
+
throw new Error('session_id contains invalid characters (only alphanumeric, dash, underscore, @, . allowed)');
|
|
476
|
+
}
|
|
477
|
+
return session_id;
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
function validateApprovalId(approval_id) {
|
|
481
|
+
if (!approval_id || typeof approval_id !== 'string') {
|
|
482
|
+
throw new Error('approval_id must be a non-empty string');
|
|
483
|
+
}
|
|
484
|
+
if (approval_id.length > 256) {
|
|
485
|
+
throw new Error('approval_id too long (max 256 characters)');
|
|
486
|
+
}
|
|
487
|
+
if (!/^[a-zA-Z0-9_-]+$/.test(approval_id)) {
|
|
488
|
+
throw new Error('approval_id contains invalid characters');
|
|
489
|
+
}
|
|
490
|
+
return approval_id;
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
function validateToolName(tool_name) {
|
|
494
|
+
if (!tool_name || typeof tool_name !== 'string') {
|
|
495
|
+
throw new Error('tool_name must be a non-empty string');
|
|
496
|
+
}
|
|
497
|
+
if (tool_name.length > 100) {
|
|
498
|
+
throw new Error('tool_name too long (max 100 characters)');
|
|
499
|
+
}
|
|
500
|
+
// Tool names should be alphanumeric with underscores
|
|
501
|
+
if (!/^[a-zA-Z0-9_]+$/.test(tool_name)) {
|
|
502
|
+
throw new Error('tool_name contains invalid characters');
|
|
503
|
+
}
|
|
504
|
+
return tool_name;
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
// Helper to send JSON response
|
|
508
|
+
function sendJSON(res, statusCode, data) {
|
|
509
|
+
res.writeHead(statusCode, { 'Content-Type': 'application/json' });
|
|
510
|
+
res.end(JSON.stringify(data));
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
// HTTP request handler
|
|
514
|
+
async function handleRequest(req, res) {
|
|
515
|
+
const url = new URL(req.url, `http://${req.headers.host}`);
|
|
516
|
+
const method = req.method;
|
|
517
|
+
const pathname = url.pathname;
|
|
518
|
+
|
|
519
|
+
// CORS headers for localhost
|
|
520
|
+
const headers = {
|
|
521
|
+
'Access-Control-Allow-Origin': '*',
|
|
522
|
+
'Access-Control-Allow-Methods': 'GET, POST, OPTIONS',
|
|
523
|
+
'Access-Control-Allow-Headers': 'Content-Type'
|
|
524
|
+
};
|
|
525
|
+
|
|
526
|
+
// Handle OPTIONS (CORS preflight)
|
|
527
|
+
if (method === 'OPTIONS') {
|
|
528
|
+
res.writeHead(200, headers);
|
|
529
|
+
res.end();
|
|
530
|
+
return;
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
try {
|
|
534
|
+
// Health check
|
|
535
|
+
if (method === 'GET' && pathname === '/health') {
|
|
536
|
+
sendJSON(res, 200, {
|
|
537
|
+
status: 'healthy',
|
|
538
|
+
uptime: process.uptime(),
|
|
539
|
+
sessions: sessions.size,
|
|
540
|
+
queue: approvalQueue.length,
|
|
541
|
+
executions: executions.size
|
|
542
|
+
});
|
|
543
|
+
return;
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
// Register session
|
|
547
|
+
if (method === 'POST' && pathname === '/sessions/register') {
|
|
548
|
+
const body = await parseJSONBody(req);
|
|
549
|
+
const { session_id, claude_session_id, cwd, meta } = body;
|
|
550
|
+
|
|
551
|
+
// Validate session_id
|
|
552
|
+
try {
|
|
553
|
+
validateSessionId(session_id);
|
|
554
|
+
} catch (validationError) {
|
|
555
|
+
sendJSON(res, 400, { error: validationError.message });
|
|
556
|
+
return;
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
// Validate cwd if provided
|
|
560
|
+
if (cwd && typeof cwd !== 'string') {
|
|
561
|
+
sendJSON(res, 400, { error: 'cwd must be a string' });
|
|
562
|
+
return;
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
sessions.set(session_id, {
|
|
566
|
+
session_id,
|
|
567
|
+
claude_session_id: claude_session_id || session_id, // Fallback to session_id if not provided
|
|
568
|
+
cwd: cwd || process.cwd(),
|
|
569
|
+
meta: {
|
|
570
|
+
...(meta || {}),
|
|
571
|
+
daemon_pid: process.pid // Add daemon PID to metadata
|
|
572
|
+
},
|
|
573
|
+
registered_at: Date.now()
|
|
574
|
+
});
|
|
575
|
+
|
|
576
|
+
console.log(`[daemon] Session registered: ${session_id} (claude_id: ${claude_session_id || session_id}) (daemon_pid: ${process.pid}) (cwd: ${cwd || process.cwd()})`);
|
|
577
|
+
sendJSON(res, 200, { ok: true });
|
|
578
|
+
return;
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
// Queue approval for daemon handling
|
|
583
|
+
if (method === 'POST' && pathname === '/approvals/handoff') {
|
|
584
|
+
const body = await parseJSONBody(req);
|
|
585
|
+
const { approval_id, session_id, tool_name, tool_input } = body;
|
|
586
|
+
|
|
587
|
+
// Validate all required fields
|
|
588
|
+
try {
|
|
589
|
+
validateApprovalId(approval_id);
|
|
590
|
+
validateSessionId(session_id);
|
|
591
|
+
validateToolName(tool_name);
|
|
592
|
+
} catch (validationError) {
|
|
593
|
+
sendJSON(res, 400, { error: validationError.message });
|
|
594
|
+
return;
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
// Validate tool_input if provided (should be an object)
|
|
598
|
+
if (tool_input !== undefined && tool_input !== null && typeof tool_input !== 'object') {
|
|
599
|
+
sendJSON(res, 400, { error: 'tool_input must be an object' });
|
|
600
|
+
return;
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
// Check queue size limit to prevent memory exhaustion (DoS prevention)
|
|
604
|
+
if (approvalQueue.length >= MAX_QUEUE_SIZE) {
|
|
605
|
+
console.warn(`[daemon] Approval queue full (${approvalQueue.length}/${MAX_QUEUE_SIZE})`);
|
|
606
|
+
sendJSON(res, 503, {
|
|
607
|
+
error: 'Approval queue full',
|
|
608
|
+
queue_size: approvalQueue.length,
|
|
609
|
+
max_size: MAX_QUEUE_SIZE,
|
|
610
|
+
message: 'Too many pending approvals. Please wait for some to complete.'
|
|
611
|
+
});
|
|
612
|
+
return;
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
// Add to queue if not already present
|
|
616
|
+
if (!approvalQueue.find(a => a.approval_id === approval_id)) {
|
|
617
|
+
approvalQueue.push({
|
|
618
|
+
approval_id,
|
|
619
|
+
session_id,
|
|
620
|
+
tool_name,
|
|
621
|
+
tool_input,
|
|
622
|
+
queued_at: Date.now()
|
|
623
|
+
});
|
|
624
|
+
console.log(`[daemon] Approval queued: ${approval_id} (${tool_name}) [${approvalQueue.length}/${MAX_QUEUE_SIZE}]`);
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
sendJSON(res, 200, { ok: true, queued: true });
|
|
628
|
+
return;
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
// Get execution status
|
|
632
|
+
if (method === 'GET' && pathname.startsWith('/executions/')) {
|
|
633
|
+
const approval_id = pathname.split('/executions/')[1];
|
|
634
|
+
const execution = executions.get(approval_id);
|
|
635
|
+
|
|
636
|
+
if (!execution) {
|
|
637
|
+
sendJSON(res, 404, { error: 'not_found' });
|
|
638
|
+
return;
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
sendJSON(res, 200, execution);
|
|
642
|
+
return;
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
// 404 for unknown routes
|
|
646
|
+
sendJSON(res, 404, { error: 'not_found' });
|
|
647
|
+
} catch (error) {
|
|
648
|
+
console.error(`[daemon] Request error:`, error.message);
|
|
649
|
+
sendJSON(res, 500, { error: error.message });
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
|
|
653
|
+
function hasIdleTimedOut(now, lastActivityAt, timeoutMs, sessionCount) {
|
|
654
|
+
if (sessionCount > 0) return false;
|
|
655
|
+
return now - lastActivityAt >= timeoutMs;
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
async function checkIdleTimeout() {
|
|
659
|
+
if (isShuttingDown) return;
|
|
660
|
+
|
|
661
|
+
const now = Date.now();
|
|
662
|
+
|
|
663
|
+
if (sessions.size > 0) {
|
|
664
|
+
lastSessionActivityAt = now;
|
|
665
|
+
return;
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
if (!hasIdleTimedOut(now, lastSessionActivityAt, IDLE_TIMEOUT_MS, sessions.size)) {
|
|
669
|
+
return;
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
const minutes = Math.round(IDLE_TIMEOUT_MS / 60000);
|
|
673
|
+
console.log(`[daemon] No active sessions for ${minutes} minute(s). Shutting down due to idle timeout.`);
|
|
674
|
+
|
|
675
|
+
// Double-check: prevent race condition where session registers during shutdown
|
|
676
|
+
if (sessions.size > 0) {
|
|
677
|
+
console.log('[daemon] New session registered during shutdown check, canceling idle timeout');
|
|
678
|
+
lastSessionActivityAt = Date.now();
|
|
679
|
+
return;
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
await cleanup();
|
|
683
|
+
// Exit after idle timeout - tests mock process.exit to verify this behavior
|
|
684
|
+
process.exit(0);
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
/**
|
|
688
|
+
* Execute a shell command in the session's working directory
|
|
689
|
+
* Returns { success, stdout, stderr, exit_code, error }
|
|
690
|
+
*
|
|
691
|
+
* Security: Commands must be in the ALLOWED_COMMAND_PREFIXES whitelist
|
|
692
|
+
*/
|
|
693
|
+
async function executeCommand(session_id, command) {
|
|
694
|
+
// Try local cache first, then fetch from relay if not found
|
|
695
|
+
const session = await getOrFetchSession(session_id);
|
|
696
|
+
if (!session) {
|
|
697
|
+
return {
|
|
698
|
+
success: false,
|
|
699
|
+
stdout: '',
|
|
700
|
+
stderr: '',
|
|
701
|
+
exit_code: -1,
|
|
702
|
+
error: `Session not registered and not found in relay: ${session_id}`
|
|
703
|
+
};
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
// Security: Validate command against whitelist
|
|
707
|
+
const validation = isCommandAllowed(command);
|
|
708
|
+
if (!validation.allowed) {
|
|
709
|
+
console.log(`[daemon] Command rejected (not in whitelist): ${command.slice(0, 100)}`);
|
|
710
|
+
return {
|
|
711
|
+
success: false,
|
|
712
|
+
stdout: '',
|
|
713
|
+
stderr: '',
|
|
714
|
+
exit_code: -1,
|
|
715
|
+
error: validation.reason
|
|
716
|
+
};
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
const cwd = session.cwd || process.cwd();
|
|
720
|
+
const timeout = 30000; // 30 second timeout for shell commands
|
|
721
|
+
|
|
722
|
+
try {
|
|
723
|
+
const { stdout, stderr } = await execAsync(command, {
|
|
724
|
+
cwd,
|
|
725
|
+
timeout,
|
|
726
|
+
maxBuffer: 1024 * 1024 // 1MB max output
|
|
727
|
+
});
|
|
728
|
+
|
|
729
|
+
return {
|
|
730
|
+
success: true,
|
|
731
|
+
stdout: truncateOutput(stdout, 'STDOUT'),
|
|
732
|
+
stderr: truncateOutput(stderr, 'STDERR'),
|
|
733
|
+
exit_code: 0,
|
|
734
|
+
error: null
|
|
735
|
+
};
|
|
736
|
+
} catch (error) {
|
|
737
|
+
// exec throws on non-zero exit codes
|
|
738
|
+
return {
|
|
739
|
+
success: false,
|
|
740
|
+
stdout: truncateOutput(error.stdout || '', 'STDOUT'),
|
|
741
|
+
stderr: truncateOutput(error.stderr || '', 'STDERR'),
|
|
742
|
+
exit_code: error.code || -1,
|
|
743
|
+
error: error.message
|
|
744
|
+
};
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
async function handleInboxMessage(session_id, message) {
|
|
749
|
+
try {
|
|
750
|
+
const preview = (message.text || '').slice(0, 200).replace(/\s+/g, ' ');
|
|
751
|
+
console.log(`[daemon] Inbox message for session ${session_id}: ${message.id} - ${preview}`);
|
|
752
|
+
|
|
753
|
+
const meta = message.meta || {};
|
|
754
|
+
|
|
755
|
+
// For command messages, execute the command and post result back to the main agent inbox
|
|
756
|
+
if (meta.type === 'command') {
|
|
757
|
+
const replyAgentId = meta.reply_agent_id || 'main';
|
|
758
|
+
const commandText = message.text || '';
|
|
759
|
+
|
|
760
|
+
// Invalidate pending approvals BEFORE executing new command
|
|
761
|
+
// This prevents race conditions where stale approvals could be acted upon
|
|
762
|
+
try {
|
|
763
|
+
const invalidateResponse = await fetch(`${RELAY_API_URL}/api/approvals/invalidate`, {
|
|
764
|
+
method: 'POST',
|
|
765
|
+
headers: {
|
|
766
|
+
'Content-Type': 'application/json',
|
|
767
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
768
|
+
},
|
|
769
|
+
body: JSON.stringify({
|
|
770
|
+
session_id,
|
|
771
|
+
reason: 'New command execution started'
|
|
772
|
+
})
|
|
773
|
+
});
|
|
774
|
+
|
|
775
|
+
if (invalidateResponse.ok) {
|
|
776
|
+
const { invalidated } = await invalidateResponse.json();
|
|
777
|
+
if (invalidated > 0) {
|
|
778
|
+
console.log(`[daemon] Invalidated ${invalidated} pending approvals before command execution`);
|
|
779
|
+
}
|
|
780
|
+
}
|
|
781
|
+
} catch (error) {
|
|
782
|
+
console.warn(`[daemon] Failed to invalidate approvals:`, error.message);
|
|
783
|
+
// Continue with execution - this is not critical
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
// Hybrid Execution Logic:
|
|
787
|
+
// 1. Check if it's a valid whitelisted shell command
|
|
788
|
+
const validation = isCommandAllowed(commandText);
|
|
789
|
+
let executionResult;
|
|
790
|
+
let executionType = 'shell';
|
|
791
|
+
|
|
792
|
+
if (validation.allowed) {
|
|
793
|
+
// Fast path: Execute shell command directly
|
|
794
|
+
console.log(`[daemon] Executing direct shell command: ${commandText}`);
|
|
795
|
+
executionResult = await executeCommand(session_id, commandText);
|
|
796
|
+
} else {
|
|
797
|
+
// Fallback: Natural language prompt via machine coder (Claude/Gemini/etc.)
|
|
798
|
+
console.log(`[daemon] Command not in whitelist, handing off to machine coder: ${commandText}`);
|
|
799
|
+
executionType = 'agent';
|
|
800
|
+
|
|
801
|
+
// Stream output callback for inbox message execution
|
|
802
|
+
const onOutput = createStreamingCallback(session_id, message.id, {
|
|
803
|
+
message_id: message.id
|
|
804
|
+
});
|
|
805
|
+
|
|
806
|
+
// Use the unified machine coder interface
|
|
807
|
+
// This supports Claude Code, Gemini CLI, and future backends
|
|
808
|
+
try {
|
|
809
|
+
executionResult = await executeWithMachineCoder(session_id, commandText, {
|
|
810
|
+
onOutput,
|
|
811
|
+
approvalContext: { type: 'inbox_message', id: message.id },
|
|
812
|
+
});
|
|
813
|
+
|
|
814
|
+
// Track which coder was used
|
|
815
|
+
if (executionResult.coder_used) {
|
|
816
|
+
console.log(`[daemon] Executed via ${executionResult.coder_used}`);
|
|
817
|
+
}
|
|
818
|
+
} catch (error) {
|
|
819
|
+
executionResult = {
|
|
820
|
+
success: false,
|
|
821
|
+
exit_code: -1,
|
|
822
|
+
stdout: '',
|
|
823
|
+
stderr: '',
|
|
824
|
+
error: error.message
|
|
825
|
+
};
|
|
826
|
+
}
|
|
827
|
+
}
|
|
828
|
+
|
|
829
|
+
// Store execution result for context delivery to local client
|
|
830
|
+
// This ensures the local Claude client sees the work done remotely
|
|
831
|
+
await storeExecutionResult(
|
|
832
|
+
session_id,
|
|
833
|
+
message.id, // Use message ID as pseudo-approval ID
|
|
834
|
+
'Remote Command',
|
|
835
|
+
commandText,
|
|
836
|
+
executionResult,
|
|
837
|
+
{ triggered_by_message_id: message.id } // Link execution to originating command
|
|
838
|
+
);
|
|
839
|
+
|
|
840
|
+
// Build result message with execution details
|
|
841
|
+
let resultText = '';
|
|
842
|
+
if (executionResult.success) {
|
|
843
|
+
const header = executionType === 'agent' ? 'Claude executed your request:\n\n' : 'Command executed successfully:\n\n';
|
|
844
|
+
resultText = `${header}${executionResult.stdout}`;
|
|
845
|
+
} else {
|
|
846
|
+
const header = executionType === 'agent' ? 'Claude failed to execute request:\n\n' : `Command failed with exit code ${executionResult.exit_code}:\n\n`;
|
|
847
|
+
resultText = `${header}Error: ${executionResult.error}\n\nStderr:\n${executionResult.stderr}`;
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
try {
|
|
851
|
+
const resultResponse = await fetch(`${RELAY_API_URL}/api/messages`, {
|
|
852
|
+
method: 'POST',
|
|
853
|
+
headers: {
|
|
854
|
+
'Content-Type': 'application/json',
|
|
855
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
856
|
+
},
|
|
857
|
+
body: JSON.stringify({
|
|
858
|
+
session_id,
|
|
859
|
+
text: resultText,
|
|
860
|
+
meta: {
|
|
861
|
+
type: 'result',
|
|
862
|
+
from_agent_id: 'daemon',
|
|
863
|
+
target_agent_id: replyAgentId,
|
|
864
|
+
in_reply_to_message_id: message.id,
|
|
865
|
+
command_exit_code: executionResult.exit_code,
|
|
866
|
+
command_success: executionResult.success,
|
|
867
|
+
execution_type: executionType
|
|
868
|
+
}
|
|
869
|
+
})
|
|
870
|
+
});
|
|
871
|
+
|
|
872
|
+
if (!resultResponse.ok) {
|
|
873
|
+
const errorText = await resultResponse.text();
|
|
874
|
+
console.error(`[daemon] Failed to post result message: HTTP ${resultResponse.status} - ${errorText}`);
|
|
875
|
+
}
|
|
876
|
+
} catch (sendError) {
|
|
877
|
+
console.error('[daemon] Failed to send result message:', sendError.message);
|
|
878
|
+
}
|
|
879
|
+
}
|
|
880
|
+
|
|
881
|
+
// Acknowledge the message so it is not re-delivered
|
|
882
|
+
await fetch(`${RELAY_API_URL}/api/messages/${encodeURIComponent(message.id)}/ack`, {
|
|
883
|
+
method: 'POST',
|
|
884
|
+
headers: {
|
|
885
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
886
|
+
}
|
|
887
|
+
});
|
|
888
|
+
} catch (error) {
|
|
889
|
+
console.error('[daemon] Failed to handle inbox message:', error.message);
|
|
890
|
+
}
|
|
891
|
+
}
|
|
892
|
+
|
|
893
|
+
/**
|
|
894
|
+
* Send heartbeat for a session to keep it alive in the relay
|
|
895
|
+
* @param {string} session_id - The session ID to send heartbeat for
|
|
896
|
+
*/
|
|
897
|
+
async function sendHeartbeat(session_id) {
|
|
898
|
+
try {
|
|
899
|
+
// Get or initialize heartbeat state for this session
|
|
900
|
+
let state = heartbeatState.get(session_id);
|
|
901
|
+
if (!state) {
|
|
902
|
+
state = { count: 0, lastSent: 0 };
|
|
903
|
+
heartbeatState.set(session_id, state);
|
|
904
|
+
}
|
|
905
|
+
|
|
906
|
+
state.count++;
|
|
907
|
+
state.lastSent = Date.now();
|
|
908
|
+
|
|
909
|
+
const response = await fetch(
|
|
910
|
+
`${RELAY_API_URL}/api/sessions/${encodeURIComponent(session_id)}/heartbeat`,
|
|
911
|
+
{
|
|
912
|
+
method: 'POST',
|
|
913
|
+
headers: {
|
|
914
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`,
|
|
915
|
+
'Content-Type': 'application/json'
|
|
916
|
+
},
|
|
917
|
+
body: JSON.stringify({
|
|
918
|
+
timestamp: Date.now(),
|
|
919
|
+
pid: process.pid,
|
|
920
|
+
count: state.count
|
|
921
|
+
}),
|
|
922
|
+
signal: AbortSignal.timeout(5000) // 5 second timeout
|
|
923
|
+
}
|
|
924
|
+
);
|
|
925
|
+
|
|
926
|
+
if (!response.ok) {
|
|
927
|
+
// 404 means session not registered in mech-storage yet (registered by Claude hooks, not daemon)
|
|
928
|
+
// This is expected for sessions that are only locally registered - silently skip
|
|
929
|
+
if (response.status !== 404) {
|
|
930
|
+
console.error(`[daemon] Heartbeat failed for session ${session_id}: ${response.status}`);
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
} catch (error) {
|
|
934
|
+
// Don't spam logs for heartbeat failures - just note it
|
|
935
|
+
if (error.name !== 'AbortError') {
|
|
936
|
+
console.error(`[daemon] Heartbeat error for ${session_id}: ${error.message}`);
|
|
937
|
+
}
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
|
|
941
|
+
/**
|
|
942
|
+
* Relay API Polling Loop
|
|
943
|
+
* Polls relay API every 5 seconds for approved requests
|
|
944
|
+
*/
|
|
945
|
+
async function pollRelayAPI() {
|
|
946
|
+
if (isShuttingDown) return;
|
|
947
|
+
|
|
948
|
+
try {
|
|
949
|
+
// Fetch pending approvals and inbox messages for all registered sessions
|
|
950
|
+
const TEST_SESSION_FILTER = process.env.TELEPORTATION_TEST_SESSION_FILTER;
|
|
951
|
+
for (const [session_id] of sessions) {
|
|
952
|
+
// Optional: Filter sessions for testing (if TEST_SESSION_FILTER env var set)
|
|
953
|
+
if (TEST_SESSION_FILTER && !session_id.startsWith(TEST_SESSION_FILTER)) {
|
|
954
|
+
continue;
|
|
955
|
+
}
|
|
956
|
+
console.log(`Polling for session ${session_id}`);
|
|
957
|
+
|
|
958
|
+
// 1) Approvals polling (existing behavior)
|
|
959
|
+
try {
|
|
960
|
+
const response = await fetch(
|
|
961
|
+
`${RELAY_API_URL}/api/approvals?status=allowed&session_id=${session_id}`,
|
|
962
|
+
{
|
|
963
|
+
headers: {
|
|
964
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
965
|
+
}
|
|
966
|
+
}
|
|
967
|
+
);
|
|
968
|
+
|
|
969
|
+
if (!response.ok) {
|
|
970
|
+
console.error(`[daemon] Failed to fetch approvals for session ${session_id}: ${response.status}`);
|
|
971
|
+
} else {
|
|
972
|
+
const approvals = await response.json();
|
|
973
|
+
|
|
974
|
+
// Queue newly approved requests
|
|
975
|
+
for (const approval of approvals) {
|
|
976
|
+
// Skip if already queued or executed
|
|
977
|
+
if (approvalQueue.find(a => a.approval_id === approval.id)) continue;
|
|
978
|
+
if (executions.has(approval.id)) continue;
|
|
979
|
+
|
|
980
|
+
// Skip if already acknowledged (already handled by hook's fast path)
|
|
981
|
+
if (approval.acknowledgedAt) continue;
|
|
982
|
+
|
|
983
|
+
approvalQueue.push({
|
|
984
|
+
approval_id: approval.id,
|
|
985
|
+
session_id: approval.session_id,
|
|
986
|
+
tool_name: approval.tool_name,
|
|
987
|
+
tool_input: approval.tool_input,
|
|
988
|
+
queued_at: Date.now()
|
|
989
|
+
});
|
|
990
|
+
|
|
991
|
+
console.log(`[daemon] Approval discovered: ${approval.id} (${approval.tool_name})`);
|
|
992
|
+
}
|
|
993
|
+
}
|
|
994
|
+
} catch (approvalError) {
|
|
995
|
+
console.error(`[daemon] Approval polling error for session ${session_id}:`, approvalError.message);
|
|
996
|
+
}
|
|
997
|
+
|
|
998
|
+
// 2) Inbox polling (new behavior)
|
|
999
|
+
try {
|
|
1000
|
+
const messageResponse = await fetch(
|
|
1001
|
+
`${RELAY_API_URL}/api/messages/pending?session_id=${encodeURIComponent(session_id)}&agent_id=daemon`,
|
|
1002
|
+
{
|
|
1003
|
+
headers: {
|
|
1004
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
1005
|
+
}
|
|
1006
|
+
}
|
|
1007
|
+
);
|
|
1008
|
+
|
|
1009
|
+
if (!messageResponse.ok) {
|
|
1010
|
+
// 404 or empty is not an error; only log unexpected statuses
|
|
1011
|
+
if (messageResponse.status !== 404) {
|
|
1012
|
+
console.error(`[daemon] Failed to fetch inbox message for session ${session_id}: ${messageResponse.status}`);
|
|
1013
|
+
}
|
|
1014
|
+
} else {
|
|
1015
|
+
const inboxMessage = await messageResponse.json();
|
|
1016
|
+
const hasFields = inboxMessage && typeof inboxMessage === 'object' && inboxMessage.id && inboxMessage.text;
|
|
1017
|
+
if (hasFields) {
|
|
1018
|
+
await handleInboxMessage(session_id, inboxMessage);
|
|
1019
|
+
}
|
|
1020
|
+
}
|
|
1021
|
+
} catch (inboxError) {
|
|
1022
|
+
console.error(`[daemon] Inbox polling error for session ${session_id}:`, inboxError.message);
|
|
1023
|
+
}
|
|
1024
|
+
|
|
1025
|
+
// 3) Heartbeat - send periodically to keep session alive
|
|
1026
|
+
// Only send heartbeat if enough time has passed since last one (throttled per session)
|
|
1027
|
+
const now = Date.now();
|
|
1028
|
+
const sessionHeartbeat = heartbeatState.get(session_id);
|
|
1029
|
+
const lastSent = sessionHeartbeat?.lastSent || 0;
|
|
1030
|
+
if (now - lastSent >= HEARTBEAT_INTERVAL_MS) {
|
|
1031
|
+
await sendHeartbeat(session_id);
|
|
1032
|
+
}
|
|
1033
|
+
}
|
|
1034
|
+
|
|
1035
|
+
// Process approval queue
|
|
1036
|
+
await processQueue();
|
|
1037
|
+
} catch (error) {
|
|
1038
|
+
console.error(`[daemon] Polling error:`, error.message);
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
// Schedule next poll
|
|
1042
|
+
if (!isShuttingDown) {
|
|
1043
|
+
pollingTimer = setTimeout(pollRelayAPI, POLL_INTERVAL_MS);
|
|
1044
|
+
}
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
/**
|
|
1048
|
+
* Cleanup old executions (TTL-based)
|
|
1049
|
+
* Removes executions older than 1 hour to prevent memory growth
|
|
1050
|
+
*/
|
|
1051
|
+
function cleanupOldExecutions() {
|
|
1052
|
+
if (isShuttingDown) return;
|
|
1053
|
+
|
|
1054
|
+
const oneHourAgo = Date.now() - CLEANUP_INTERVAL_MS;
|
|
1055
|
+
let removed = 0;
|
|
1056
|
+
|
|
1057
|
+
for (const [id, exec] of executions) {
|
|
1058
|
+
// Remove completed executions older than 1 hour
|
|
1059
|
+
if (exec.completed_at && exec.completed_at < oneHourAgo) {
|
|
1060
|
+
executions.delete(id);
|
|
1061
|
+
removed++;
|
|
1062
|
+
}
|
|
1063
|
+
}
|
|
1064
|
+
|
|
1065
|
+
if (removed > 0) {
|
|
1066
|
+
console.log(`[daemon] Cleaned up ${removed} old execution(s) from cache`);
|
|
1067
|
+
}
|
|
1068
|
+
|
|
1069
|
+
// Clean up heartbeatState for sessions that no longer exist
|
|
1070
|
+
let heartbeatRemoved = 0;
|
|
1071
|
+
for (const sessionId of heartbeatState.keys()) {
|
|
1072
|
+
if (!sessions.has(sessionId)) {
|
|
1073
|
+
heartbeatState.delete(sessionId);
|
|
1074
|
+
heartbeatRemoved++;
|
|
1075
|
+
}
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
if (heartbeatRemoved > 0) {
|
|
1079
|
+
console.log(`[daemon] Cleaned up ${heartbeatRemoved} stale heartbeat state(s)`);
|
|
1080
|
+
}
|
|
1081
|
+
|
|
1082
|
+
// Also enforce LRU limit if still over limit
|
|
1083
|
+
while (executions.size > MAX_EXECUTIONS) {
|
|
1084
|
+
let oldestKey = null;
|
|
1085
|
+
let oldestTime = Infinity;
|
|
1086
|
+
for (const [id, exec] of executions) {
|
|
1087
|
+
if (exec.completed_at && exec.completed_at < oldestTime) {
|
|
1088
|
+
oldestTime = exec.completed_at;
|
|
1089
|
+
oldestKey = id;
|
|
1090
|
+
}
|
|
1091
|
+
}
|
|
1092
|
+
if (oldestKey) {
|
|
1093
|
+
executions.delete(oldestKey);
|
|
1094
|
+
} else {
|
|
1095
|
+
// If no completed executions, remove oldest by started_at
|
|
1096
|
+
for (const [id, exec] of executions) {
|
|
1097
|
+
if (exec.started_at < oldestTime) {
|
|
1098
|
+
oldestTime = exec.started_at;
|
|
1099
|
+
oldestKey = id;
|
|
1100
|
+
}
|
|
1101
|
+
}
|
|
1102
|
+
if (oldestKey) {
|
|
1103
|
+
executions.delete(oldestKey);
|
|
1104
|
+
} else {
|
|
1105
|
+
break; // Shouldn't happen, but safety check
|
|
1106
|
+
}
|
|
1107
|
+
}
|
|
1108
|
+
}
|
|
1109
|
+
}
|
|
1110
|
+
|
|
1111
|
+
/**
|
|
1112
|
+
* Process Approval Queue
|
|
1113
|
+
* Execute queued approvals one at a time (FIFO)
|
|
1114
|
+
*/
|
|
1115
|
+
async function processQueue() {
|
|
1116
|
+
// Process one at a time to avoid overwhelming the system
|
|
1117
|
+
if (approvalQueue.length === 0) return;
|
|
1118
|
+
|
|
1119
|
+
const approval = approvalQueue.shift();
|
|
1120
|
+
const { approval_id, session_id, tool_name, tool_input } = approval;
|
|
1121
|
+
const command = tool_input?.command || '';
|
|
1122
|
+
|
|
1123
|
+
console.log(`[daemon] Processing approval: ${approval_id} (${tool_name})`);
|
|
1124
|
+
|
|
1125
|
+
// Check if already executing (race condition prevention)
|
|
1126
|
+
if (executions.has(approval_id)) {
|
|
1127
|
+
const existing = executions.get(approval_id);
|
|
1128
|
+
if (existing.status === 'executing') {
|
|
1129
|
+
console.log(`[daemon] Approval ${approval_id} already executing, skipping duplicate`);
|
|
1130
|
+
return;
|
|
1131
|
+
}
|
|
1132
|
+
}
|
|
1133
|
+
|
|
1134
|
+
// Enforce LRU cache limit before adding new execution
|
|
1135
|
+
if (executions.size >= MAX_EXECUTIONS) {
|
|
1136
|
+
// Remove oldest completed execution
|
|
1137
|
+
let oldestKey = null;
|
|
1138
|
+
let oldestTime = Infinity;
|
|
1139
|
+
for (const [id, exec] of executions) {
|
|
1140
|
+
if (exec.completed_at && exec.completed_at < oldestTime) {
|
|
1141
|
+
oldestTime = exec.completed_at;
|
|
1142
|
+
oldestKey = id;
|
|
1143
|
+
}
|
|
1144
|
+
}
|
|
1145
|
+
if (oldestKey) {
|
|
1146
|
+
executions.delete(oldestKey);
|
|
1147
|
+
console.log(`[daemon] Removed oldest execution from cache: ${oldestKey}`);
|
|
1148
|
+
} else {
|
|
1149
|
+
// If no completed executions, remove oldest by started_at
|
|
1150
|
+
for (const [id, exec] of executions) {
|
|
1151
|
+
if (exec.started_at < oldestTime) {
|
|
1152
|
+
oldestTime = exec.started_at;
|
|
1153
|
+
oldestKey = id;
|
|
1154
|
+
}
|
|
1155
|
+
}
|
|
1156
|
+
if (oldestKey) {
|
|
1157
|
+
executions.delete(oldestKey);
|
|
1158
|
+
console.log(`[daemon] Removed oldest execution from cache: ${oldestKey}`);
|
|
1159
|
+
}
|
|
1160
|
+
}
|
|
1161
|
+
}
|
|
1162
|
+
|
|
1163
|
+
// Mark as executing (child_process will be set when spawnClaudeProcess is called)
|
|
1164
|
+
executions.set(approval_id, {
|
|
1165
|
+
approval_id,
|
|
1166
|
+
status: 'executing',
|
|
1167
|
+
started_at: Date.now(),
|
|
1168
|
+
completed_at: null,
|
|
1169
|
+
exit_code: null,
|
|
1170
|
+
stdout: '',
|
|
1171
|
+
stderr: '',
|
|
1172
|
+
error: null,
|
|
1173
|
+
child_process: null // Will be set when process spawns
|
|
1174
|
+
});
|
|
1175
|
+
|
|
1176
|
+
try {
|
|
1177
|
+
// Acknowledge approval BEFORE executing to prevent duplicate execution
|
|
1178
|
+
// if user also approves locally while daemon is executing
|
|
1179
|
+
try {
|
|
1180
|
+
await fetch(`${RELAY_API_URL}/api/approvals/${approval_id}/ack`, {
|
|
1181
|
+
method: 'POST',
|
|
1182
|
+
headers: {
|
|
1183
|
+
'Content-Type': 'application/json',
|
|
1184
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
1185
|
+
},
|
|
1186
|
+
body: JSON.stringify({ processed: true })
|
|
1187
|
+
});
|
|
1188
|
+
console.log(`[daemon] Acknowledged approval: ${approval_id}`);
|
|
1189
|
+
} catch (ackError) {
|
|
1190
|
+
console.error(`[daemon] Failed to acknowledge approval:`, ackError.message);
|
|
1191
|
+
// Continue anyway - acknowledgment is optional
|
|
1192
|
+
}
|
|
1193
|
+
|
|
1194
|
+
// Build prompt for tool execution
|
|
1195
|
+
const prompt = buildToolPrompt(tool_name, tool_input);
|
|
1196
|
+
|
|
1197
|
+
// Stream output callback to send conversational text to frontend
|
|
1198
|
+
const onOutput = createStreamingCallback(session_id, approval_id);
|
|
1199
|
+
|
|
1200
|
+
// Spawn child Claude process with streaming callback
|
|
1201
|
+
// Track child process so it can be stopped if needed
|
|
1202
|
+
const onSpawn = (child) => {
|
|
1203
|
+
const exec = executions.get(approval_id);
|
|
1204
|
+
if (exec) {
|
|
1205
|
+
exec.child_process = child;
|
|
1206
|
+
executions.set(approval_id, exec);
|
|
1207
|
+
console.log(`[daemon] Child process spawned for approval ${approval_id} (PID: ${child.pid})`);
|
|
1208
|
+
}
|
|
1209
|
+
};
|
|
1210
|
+
|
|
1211
|
+
const result = await spawnClaudeProcess(session_id, prompt, { onOutput, onSpawn });
|
|
1212
|
+
|
|
1213
|
+
// Send any remaining accumulated output before final result
|
|
1214
|
+
if (accumulatedOutput.trim().length > 0) {
|
|
1215
|
+
try {
|
|
1216
|
+
await sendStreamingMessage(session_id, accumulatedOutput, {
|
|
1217
|
+
type: 'streaming_update',
|
|
1218
|
+
approval_id,
|
|
1219
|
+
partial: true,
|
|
1220
|
+
final_chunk: true
|
|
1221
|
+
});
|
|
1222
|
+
} catch (error) {
|
|
1223
|
+
console.debug(`[daemon] Failed to send final chunk: ${error.message}`);
|
|
1224
|
+
}
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
// Update execution status
|
|
1228
|
+
executions.set(approval_id, {
|
|
1229
|
+
...executions.get(approval_id),
|
|
1230
|
+
status: result.success ? 'completed' : 'failed',
|
|
1231
|
+
completed_at: Date.now(),
|
|
1232
|
+
exit_code: result.exit_code,
|
|
1233
|
+
stdout: result.stdout,
|
|
1234
|
+
stderr: result.stderr,
|
|
1235
|
+
error: result.error
|
|
1236
|
+
});
|
|
1237
|
+
|
|
1238
|
+
// Report execution status to relay API
|
|
1239
|
+
await reportExecutionStatus(approval_id, result);
|
|
1240
|
+
|
|
1241
|
+
// Store execution result for later delivery to the user
|
|
1242
|
+
await storeExecutionResult(session_id, approval_id, tool_name, command, result);
|
|
1243
|
+
|
|
1244
|
+
// Immediately send final result message to frontend
|
|
1245
|
+
// This ensures results are returned automatically without waiting for next tool use
|
|
1246
|
+
const resultText = result.success
|
|
1247
|
+
? `✅ Task completed successfully:\n\n${result.stdout || '(No output)'}`
|
|
1248
|
+
: `❌ Task failed:\n\nError: ${result.error || 'Unknown error'}\n\nStderr:\n${result.stderr || '(No stderr)'}`;
|
|
1249
|
+
|
|
1250
|
+
try {
|
|
1251
|
+
await sendStreamingMessage(session_id, resultText, {
|
|
1252
|
+
type: 'result',
|
|
1253
|
+
approval_id,
|
|
1254
|
+
command_success: result.success,
|
|
1255
|
+
command_exit_code: result.exit_code,
|
|
1256
|
+
execution_type: 'daemon'
|
|
1257
|
+
});
|
|
1258
|
+
} catch (error) {
|
|
1259
|
+
console.error(`[daemon] Failed to send final result message: ${error.message}`);
|
|
1260
|
+
// Continue - result is still stored and will be delivered via hook
|
|
1261
|
+
}
|
|
1262
|
+
|
|
1263
|
+
console.log(`[daemon] Approval ${approval_id} ${result.success ? 'completed' : 'failed'}`);
|
|
1264
|
+
} catch (error) {
|
|
1265
|
+
console.error(`[daemon] Execution error for approval ${approval_id}:`, error.message);
|
|
1266
|
+
|
|
1267
|
+
executions.set(approval_id, {
|
|
1268
|
+
...executions.get(approval_id),
|
|
1269
|
+
status: 'failed',
|
|
1270
|
+
completed_at: Date.now(),
|
|
1271
|
+
error: error.message
|
|
1272
|
+
});
|
|
1273
|
+
|
|
1274
|
+
const fallbackResult = {
|
|
1275
|
+
success: false,
|
|
1276
|
+
exit_code: null,
|
|
1277
|
+
stdout: '',
|
|
1278
|
+
stderr: '',
|
|
1279
|
+
error: error.message,
|
|
1280
|
+
duration_ms: null,
|
|
1281
|
+
started_at: null,
|
|
1282
|
+
executed_at: Date.now()
|
|
1283
|
+
};
|
|
1284
|
+
|
|
1285
|
+
// Report failure to relay API
|
|
1286
|
+
await reportExecutionStatus(approval_id, {
|
|
1287
|
+
success: false,
|
|
1288
|
+
error: error.message
|
|
1289
|
+
});
|
|
1290
|
+
|
|
1291
|
+
// Store failed execution result for later delivery
|
|
1292
|
+
await storeExecutionResult(session_id, approval_id, tool_name, command, fallbackResult);
|
|
1293
|
+
}
|
|
1294
|
+
}
|
|
1295
|
+
|
|
1296
|
+
/**
|
|
1297
|
+
* Build tool execution prompt
|
|
1298
|
+
* Uses structured JSON format to prevent command injection
|
|
1299
|
+
* Claude Code will parse this as structured input, not natural language
|
|
1300
|
+
*/
|
|
1301
|
+
function buildToolPrompt(tool_name, tool_input) {
|
|
1302
|
+
// Use structured JSON format instead of string interpolation
|
|
1303
|
+
// This prevents command injection by ensuring inputs are properly escaped
|
|
1304
|
+
const input = tool_input || {};
|
|
1305
|
+
|
|
1306
|
+
// Return structured JSON that Claude Code can parse safely
|
|
1307
|
+
// This format prevents any malicious input from being interpreted as commands
|
|
1308
|
+
return JSON.stringify({
|
|
1309
|
+
tool: tool_name,
|
|
1310
|
+
parameters: input,
|
|
1311
|
+
mode: 'headless_execution',
|
|
1312
|
+
timestamp: Date.now()
|
|
1313
|
+
});
|
|
1314
|
+
}
|
|
1315
|
+
|
|
1316
|
+
/**
|
|
1317
|
+
* Create a streaming output callback with throttling
|
|
1318
|
+
* Accumulates chunks and sends them periodically to avoid overwhelming the API
|
|
1319
|
+
* @param {string} session_id - Session ID
|
|
1320
|
+
* @param {string} approval_id - Approval or message ID for tracking
|
|
1321
|
+
* @param {Object} meta - Additional metadata to include in streaming messages
|
|
1322
|
+
* @returns {Function} onOutput callback function
|
|
1323
|
+
*/
|
|
1324
|
+
function createStreamingCallback(session_id, approval_id, meta = {}) {
|
|
1325
|
+
let accumulatedOutput = '';
|
|
1326
|
+
let lastStreamTime = 0;
|
|
1327
|
+
const STREAM_THROTTLE_MS = 2000; // Send updates at most every 2 seconds
|
|
1328
|
+
const MIN_CHUNK_SIZE = 50; // Minimum characters before sending
|
|
1329
|
+
|
|
1330
|
+
return async (chunk, streamType) => {
|
|
1331
|
+
accumulatedOutput += chunk;
|
|
1332
|
+
const now = Date.now();
|
|
1333
|
+
const timeSinceLastStream = now - lastStreamTime;
|
|
1334
|
+
const hasEnoughContent = accumulatedOutput.length >= MIN_CHUNK_SIZE;
|
|
1335
|
+
const hasNewline = chunk.includes('\n');
|
|
1336
|
+
const shouldSend = hasEnoughContent && (hasNewline || timeSinceLastStream >= STREAM_THROTTLE_MS);
|
|
1337
|
+
|
|
1338
|
+
if (shouldSend && accumulatedOutput.trim().length > 0) {
|
|
1339
|
+
try {
|
|
1340
|
+
await sendStreamingMessage(session_id, accumulatedOutput, {
|
|
1341
|
+
type: 'streaming_update',
|
|
1342
|
+
approval_id,
|
|
1343
|
+
stream_type: streamType,
|
|
1344
|
+
partial: true,
|
|
1345
|
+
...meta
|
|
1346
|
+
});
|
|
1347
|
+
accumulatedOutput = ''; // Reset accumulator after sending
|
|
1348
|
+
lastStreamTime = now;
|
|
1349
|
+
} catch (error) {
|
|
1350
|
+
// Don't fail execution if streaming fails
|
|
1351
|
+
console.debug(`[daemon] Failed to stream output: ${error.message}`);
|
|
1352
|
+
}
|
|
1353
|
+
}
|
|
1354
|
+
};
|
|
1355
|
+
}
|
|
1356
|
+
|
|
1357
|
+
/**
|
|
1358
|
+
* Send a message to the frontend via relay API
|
|
1359
|
+
* Helper function for streaming updates during execution
|
|
1360
|
+
*/
|
|
1361
|
+
async function sendStreamingMessage(session_id, text, meta = {}) {
|
|
1362
|
+
if (!session_id || !RELAY_API_URL || !RELAY_API_KEY) return;
|
|
1363
|
+
|
|
1364
|
+
try {
|
|
1365
|
+
await fetch(`${RELAY_API_URL}/api/messages`, {
|
|
1366
|
+
method: 'POST',
|
|
1367
|
+
headers: {
|
|
1368
|
+
'Content-Type': 'application/json',
|
|
1369
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
1370
|
+
},
|
|
1371
|
+
body: JSON.stringify({
|
|
1372
|
+
session_id,
|
|
1373
|
+
text,
|
|
1374
|
+
meta: {
|
|
1375
|
+
type: 'streaming_update',
|
|
1376
|
+
from_agent_id: 'daemon',
|
|
1377
|
+
target_agent_id: 'human',
|
|
1378
|
+
...meta
|
|
1379
|
+
}
|
|
1380
|
+
})
|
|
1381
|
+
});
|
|
1382
|
+
} catch (error) {
|
|
1383
|
+
// Don't spam logs for streaming message failures
|
|
1384
|
+
console.debug(`[daemon] Failed to send streaming message: ${error.message}`);
|
|
1385
|
+
}
|
|
1386
|
+
}
|
|
1387
|
+
|
|
1388
|
+
/**
|
|
1389
|
+
* Execute a prompt using the machine coder interface
|
|
1390
|
+
* Supports Claude Code, Gemini CLI, and future coders via unified interface.
|
|
1391
|
+
*
|
|
1392
|
+
* SECURITY: This function requires an approvalContext to ensure all executions
|
|
1393
|
+
* have an audit trail. It should only be called for pre-approved requests.
|
|
1394
|
+
*
|
|
1395
|
+
* @param {string} session_id - Teleportation session ID
|
|
1396
|
+
* @param {string} prompt - Natural language prompt to execute
|
|
1397
|
+
* @param {Object} options - Execution options
|
|
1398
|
+
* @param {Object} options.approvalContext - REQUIRED: Approval context for audit trail
|
|
1399
|
+
* @param {string} options.approvalContext.type - Type of approval (e.g., 'inbox_message', 'approval_queue')
|
|
1400
|
+
* @param {string} options.approvalContext.id - ID of the approval source
|
|
1401
|
+
* @param {Function} [options.onOutput] - Streaming output callback
|
|
1402
|
+
* @param {Function} [options.onSpawn] - Called when process spawns (for tracking)
|
|
1403
|
+
* @param {string} [options.preferredCoder] - Override preferred coder
|
|
1404
|
+
* @returns {Promise<Object>} Execution result
|
|
1405
|
+
* @throws {Error} If approvalContext is missing or invalid
|
|
1406
|
+
*/
|
|
1407
|
+
async function executeWithMachineCoder(session_id, prompt, options = {}) {
|
|
1408
|
+
const { onOutput, onSpawn, preferredCoder, approvalContext } = options;
|
|
1409
|
+
|
|
1410
|
+
// SECURITY: This function should only be called for pre-approved requests.
|
|
1411
|
+
// The approvalContext tracks where the approval came from (inbox message, approval queue, etc.)
|
|
1412
|
+
// approvalContext is REQUIRED to ensure all executions have an audit trail
|
|
1413
|
+
if (!approvalContext || !approvalContext.type || !approvalContext.id) {
|
|
1414
|
+
const error = new Error(`SECURITY: executeWithMachineCoder requires approvalContext with type and id`);
|
|
1415
|
+
console.error(`[daemon] ${error.message} - session: ${session_id}`);
|
|
1416
|
+
throw error;
|
|
1417
|
+
}
|
|
1418
|
+
|
|
1419
|
+
console.log(`[daemon] Executing with approval context: ${approvalContext.type}:${approvalContext.id}`);
|
|
1420
|
+
|
|
1421
|
+
// Get session info
|
|
1422
|
+
const session = await getOrFetchSession(session_id);
|
|
1423
|
+
if (!session) {
|
|
1424
|
+
throw new Error(`Session not registered and not found in relay: ${session_id}`);
|
|
1425
|
+
}
|
|
1426
|
+
|
|
1427
|
+
const cwd = session.cwd || process.cwd();
|
|
1428
|
+
|
|
1429
|
+
// Determine which coder to use
|
|
1430
|
+
let coderPreference = preferredCoder || PREFERRED_CODER;
|
|
1431
|
+
let coder;
|
|
1432
|
+
|
|
1433
|
+
if (coderPreference === 'auto') {
|
|
1434
|
+
// Auto-select: prefer Claude Code, fallback to Gemini
|
|
1435
|
+
coder = await getBestCoder({
|
|
1436
|
+
largeContext: USE_GEMINI_FOR_LARGE_CONTEXT,
|
|
1437
|
+
});
|
|
1438
|
+
} else {
|
|
1439
|
+
// Specific coder requested
|
|
1440
|
+
const available = await getAvailableCoders();
|
|
1441
|
+
coder = available.find(c => c.name === coderPreference);
|
|
1442
|
+
|
|
1443
|
+
// Fallback to any available if preferred not found
|
|
1444
|
+
if (!coder && available.length > 0) {
|
|
1445
|
+
console.warn(`[daemon] Preferred coder '${coderPreference}' not available, using ${available[0].displayName}`);
|
|
1446
|
+
coder = available[0];
|
|
1447
|
+
}
|
|
1448
|
+
}
|
|
1449
|
+
|
|
1450
|
+
if (!coder) {
|
|
1451
|
+
throw new Error('No machine coders available (install claude or gemini CLI)');
|
|
1452
|
+
}
|
|
1453
|
+
|
|
1454
|
+
console.log(`[daemon] Using machine coder: ${coder.displayName}`);
|
|
1455
|
+
|
|
1456
|
+
// SECURITY: Verify session is still active via relay API
|
|
1457
|
+
try {
|
|
1458
|
+
const sessionCheck = await fetch(`${RELAY_API_URL}/api/sessions/${session_id}/daemon-state`, {
|
|
1459
|
+
headers: { 'Authorization': `Bearer ${RELAY_API_KEY}` },
|
|
1460
|
+
signal: AbortSignal.timeout(5000)
|
|
1461
|
+
});
|
|
1462
|
+
|
|
1463
|
+
if (!sessionCheck.ok) {
|
|
1464
|
+
throw new Error(`Session invalid or expired: ${session_id} (status: ${sessionCheck.status})`);
|
|
1465
|
+
}
|
|
1466
|
+
} catch (error) {
|
|
1467
|
+
console.error(`[daemon] SECURITY: Session validation failed for ${session_id}:`, error.message);
|
|
1468
|
+
throw new Error(`Session validation failed: ${error.message}`);
|
|
1469
|
+
}
|
|
1470
|
+
|
|
1471
|
+
// Build execution options
|
|
1472
|
+
const execOptions = {
|
|
1473
|
+
projectPath: cwd,
|
|
1474
|
+
sessionId: session_id,
|
|
1475
|
+
prompt,
|
|
1476
|
+
autoApprove: true, // Daemon executes pre-approved requests
|
|
1477
|
+
timeoutMs: CHILD_TIMEOUT_MS,
|
|
1478
|
+
model: session.meta?.current_model,
|
|
1479
|
+
|
|
1480
|
+
// Progress callback for streaming
|
|
1481
|
+
onProgress: (event) => {
|
|
1482
|
+
if (onOutput) {
|
|
1483
|
+
switch (event.type) {
|
|
1484
|
+
case 'message':
|
|
1485
|
+
if (event.data?.content) {
|
|
1486
|
+
onOutput(event.data.content, 'stdout');
|
|
1487
|
+
}
|
|
1488
|
+
break;
|
|
1489
|
+
case 'tool_use':
|
|
1490
|
+
onOutput(`[Tool: ${event.data?.tool}]\n`, 'stdout');
|
|
1491
|
+
break;
|
|
1492
|
+
case 'tool_result':
|
|
1493
|
+
if (event.data?.output) {
|
|
1494
|
+
onOutput(event.data.output + '\n', 'stdout');
|
|
1495
|
+
}
|
|
1496
|
+
break;
|
|
1497
|
+
case 'error':
|
|
1498
|
+
if (event.data?.message) {
|
|
1499
|
+
onOutput(event.data.message + '\n', 'stderr');
|
|
1500
|
+
}
|
|
1501
|
+
break;
|
|
1502
|
+
case 'output':
|
|
1503
|
+
case 'stderr':
|
|
1504
|
+
if (event.data?.text) {
|
|
1505
|
+
onOutput(event.data.text, event.type === 'stderr' ? 'stderr' : 'stdout');
|
|
1506
|
+
}
|
|
1507
|
+
break;
|
|
1508
|
+
}
|
|
1509
|
+
}
|
|
1510
|
+
},
|
|
1511
|
+
|
|
1512
|
+
// Tool approval callback
|
|
1513
|
+
// SECURITY: Auto-approve is safe here because:
|
|
1514
|
+
// 1. This function is only called for pre-approved requests (inbox messages, approval queue)
|
|
1515
|
+
// 2. The approvalContext parameter tracks the source of approval
|
|
1516
|
+
// 3. Session validation already occurred above
|
|
1517
|
+
// For Gemini CLI (no native hooks), we intercept tool_use events and auto-approve
|
|
1518
|
+
// For Claude Code (has hooks), the hooks handle approval via Relay
|
|
1519
|
+
onToolCall: async (tool) => {
|
|
1520
|
+
const context = approvalContext ? `via ${approvalContext.type}:${approvalContext.id}` : 'no context';
|
|
1521
|
+
console.log(`[daemon] Tool call: ${tool.tool} (pre-approved ${context})`);
|
|
1522
|
+
return 'allow';
|
|
1523
|
+
},
|
|
1524
|
+
};
|
|
1525
|
+
|
|
1526
|
+
// Execute via the coder
|
|
1527
|
+
const startedAt = Date.now();
|
|
1528
|
+
|
|
1529
|
+
try {
|
|
1530
|
+
// For Claude Code with resume support, use resume if we have a claude_session_id
|
|
1531
|
+
let result;
|
|
1532
|
+
if (coder.name === 'claude-code' && session.claude_session_id) {
|
|
1533
|
+
result = await coder.resume(session.claude_session_id, prompt, execOptions);
|
|
1534
|
+
} else {
|
|
1535
|
+
result = await coder.execute(execOptions);
|
|
1536
|
+
}
|
|
1537
|
+
|
|
1538
|
+
// Notify caller that execution started (for tracking)
|
|
1539
|
+
// Note: Machine coders handle their own process management
|
|
1540
|
+
if (onSpawn) {
|
|
1541
|
+
onSpawn({ pid: result.executionId || 'machine-coder' });
|
|
1542
|
+
}
|
|
1543
|
+
|
|
1544
|
+
return {
|
|
1545
|
+
success: result.success,
|
|
1546
|
+
exit_code: result.success ? 0 : 1,
|
|
1547
|
+
stdout: result.output || '',
|
|
1548
|
+
stderr: result.error || '',
|
|
1549
|
+
error: result.error || null,
|
|
1550
|
+
duration_ms: result.stats?.durationMs || (Date.now() - startedAt),
|
|
1551
|
+
started_at: startedAt,
|
|
1552
|
+
executed_at: Date.now(),
|
|
1553
|
+
coder_used: coder.name,
|
|
1554
|
+
model_used: result.stats?.model,
|
|
1555
|
+
tokens_used: result.stats?.tokensUsed,
|
|
1556
|
+
};
|
|
1557
|
+
} catch (error) {
|
|
1558
|
+
return {
|
|
1559
|
+
success: false,
|
|
1560
|
+
exit_code: -1,
|
|
1561
|
+
stdout: '',
|
|
1562
|
+
stderr: '',
|
|
1563
|
+
error: error.message,
|
|
1564
|
+
duration_ms: Date.now() - startedAt,
|
|
1565
|
+
started_at: startedAt,
|
|
1566
|
+
executed_at: Date.now(),
|
|
1567
|
+
coder_used: coder.name,
|
|
1568
|
+
};
|
|
1569
|
+
}
|
|
1570
|
+
}
|
|
1571
|
+
|
|
1572
|
+
/**
|
|
1573
|
+
* Spawn child Claude Code process
|
|
1574
|
+
* Executes: claude --resume <session_id> -p "<prompt>"
|
|
1575
|
+
*
|
|
1576
|
+
* Security: Validates session exists and is authorized before execution
|
|
1577
|
+
*
|
|
1578
|
+
* NOTE: This function is kept for backward compatibility and direct shell execution.
|
|
1579
|
+
* For agent execution, consider using executeWithMachineCoder() which supports
|
|
1580
|
+
* multiple backends (Claude Code, Gemini CLI, etc.)
|
|
1581
|
+
*
|
|
1582
|
+
* @param {string} session_id - Session ID
|
|
1583
|
+
* @param {string} prompt - Prompt to execute
|
|
1584
|
+
* @param {Object} options - Options including onOutput callback for streaming
|
|
1585
|
+
*/
|
|
1586
|
+
async function spawnClaudeProcess(session_id, prompt, options = {}) {
|
|
1587
|
+
// Try local cache first, then fetch from relay if not found
|
|
1588
|
+
const session = await getOrFetchSession(session_id);
|
|
1589
|
+
if (!session) {
|
|
1590
|
+
throw new Error(`Session not registered and not found in relay: ${session_id}`);
|
|
1591
|
+
}
|
|
1592
|
+
|
|
1593
|
+
console.log(`[daemon] Processing approval for session ${session_id}`);
|
|
1594
|
+
|
|
1595
|
+
// SECURITY: Verify session is still active via relay API
|
|
1596
|
+
try {
|
|
1597
|
+
const sessionCheck = await fetch(`${RELAY_API_URL}/api/sessions/${session_id}/daemon-state`, {
|
|
1598
|
+
headers: { 'Authorization': `Bearer ${RELAY_API_KEY}` },
|
|
1599
|
+
signal: AbortSignal.timeout(5000)
|
|
1600
|
+
});
|
|
1601
|
+
|
|
1602
|
+
if (!sessionCheck.ok) {
|
|
1603
|
+
throw new Error(`Session invalid or expired: ${session_id} (status: ${sessionCheck.status})`);
|
|
1604
|
+
}
|
|
1605
|
+
} catch (error) {
|
|
1606
|
+
// Log security event
|
|
1607
|
+
console.error(`[daemon] SECURITY: Session validation failed for ${session_id}:`, error.message);
|
|
1608
|
+
throw new Error(`Session validation failed: ${error.message}`);
|
|
1609
|
+
}
|
|
1610
|
+
|
|
1611
|
+
return new Promise((resolve, reject) => {
|
|
1612
|
+
const cwd = session.cwd || process.cwd();
|
|
1613
|
+
const startedAt = Date.now();
|
|
1614
|
+
const { onOutput, onSpawn } = options;
|
|
1615
|
+
|
|
1616
|
+
// SECURITY: Defense-in-depth - validate session_id format at spawn point
|
|
1617
|
+
// Even though we validate at HTTP endpoints, add assertion here for safety
|
|
1618
|
+
if (!/^[a-zA-Z0-9_@.-]+$/.test(session_id)) {
|
|
1619
|
+
throw new Error('Invalid session_id format at spawn point');
|
|
1620
|
+
}
|
|
1621
|
+
|
|
1622
|
+
// Determine execution mode
|
|
1623
|
+
let isToolExecution = false;
|
|
1624
|
+
let commandToRun = '';
|
|
1625
|
+
let agentPrompt = '';
|
|
1626
|
+
|
|
1627
|
+
try {
|
|
1628
|
+
const promptObj = JSON.parse(prompt);
|
|
1629
|
+
if (promptObj.parameters?.command) {
|
|
1630
|
+
isToolExecution = true;
|
|
1631
|
+
commandToRun = promptObj.parameters.command;
|
|
1632
|
+
}
|
|
1633
|
+
} catch (e) {
|
|
1634
|
+
// Not JSON, treat as natural language prompt
|
|
1635
|
+
}
|
|
1636
|
+
|
|
1637
|
+
if (!isToolExecution) {
|
|
1638
|
+
agentPrompt = prompt;
|
|
1639
|
+
}
|
|
1640
|
+
|
|
1641
|
+
console.log(`[daemon] Spawning process (${isToolExecution ? 'Tool Shell' : 'Claude Agent'})...`);
|
|
1642
|
+
if (isToolExecution) {
|
|
1643
|
+
console.log(`[daemon] Executing tool command: ${commandToRun}`);
|
|
1644
|
+
} else {
|
|
1645
|
+
console.log(`[daemon] Executing agent prompt: ${agentPrompt}`);
|
|
1646
|
+
}
|
|
1647
|
+
|
|
1648
|
+
let child;
|
|
1649
|
+
|
|
1650
|
+
if (isToolExecution) {
|
|
1651
|
+
/**
|
|
1652
|
+
* MODE 1: Tool Execution (Direct Shell)
|
|
1653
|
+
* SECURITY: Command execution with defense-in-depth
|
|
1654
|
+
*
|
|
1655
|
+
* Security layers:
|
|
1656
|
+
* 1. Commands validated against whitelist (isCommandAllowed)
|
|
1657
|
+
* 2. Shell metacharacters blocked (sanitizeCommand) - prevents injection
|
|
1658
|
+
* 3. Commands are pre-validated before reaching this point
|
|
1659
|
+
*/
|
|
1660
|
+
child = spawn('sh', ['-c', commandToRun], {
|
|
1661
|
+
cwd,
|
|
1662
|
+
stdio: 'pipe',
|
|
1663
|
+
env: {
|
|
1664
|
+
...process.env,
|
|
1665
|
+
TELEPORTATION_DAEMON_CHILD: 'true'
|
|
1666
|
+
}
|
|
1667
|
+
});
|
|
1668
|
+
} else {
|
|
1669
|
+
/**
|
|
1670
|
+
* MODE 2: Agent Execution (Claude CLI)
|
|
1671
|
+
* Invokes Claude Code with the natural language prompt
|
|
1672
|
+
* Uses --resume to attach to the correct session context
|
|
1673
|
+
*/
|
|
1674
|
+
// Use CLAUDE_CLI_PATH from env or default to 'claude'
|
|
1675
|
+
const cliBin = process.env.CLAUDE_CLI_PATH || 'claude';
|
|
1676
|
+
|
|
1677
|
+
// Use the actual Claude session ID for resuming, not the teleportation session ID
|
|
1678
|
+
const resumeSessionId = session.claude_session_id || session_id;
|
|
1679
|
+
|
|
1680
|
+
const args = [
|
|
1681
|
+
'--resume', resumeSessionId,
|
|
1682
|
+
'-p', agentPrompt,
|
|
1683
|
+
'--dangerously-skip-permissions' // Skip permissions for headless execution
|
|
1684
|
+
];
|
|
1685
|
+
|
|
1686
|
+
// Use the same model that the session was started with (if available)
|
|
1687
|
+
// This ensures daemon-spawned Claude uses the same model as the user's session
|
|
1688
|
+
// Model sources (in priority order):
|
|
1689
|
+
// 1. session.meta.current_model (from ANTHROPIC_MODEL/CLAUDE_MODEL env vars)
|
|
1690
|
+
// 2. session.meta.current_model (from ~/.claude/settings.json)
|
|
1691
|
+
// 3. No explicit model (--resume uses session's original model)
|
|
1692
|
+
const sessionModel = session.meta?.current_model;
|
|
1693
|
+
if (sessionModel) {
|
|
1694
|
+
args.unshift('--model', sessionModel);
|
|
1695
|
+
console.log(`[daemon] Using session model: ${sessionModel}`);
|
|
1696
|
+
} else {
|
|
1697
|
+
console.log(`[daemon] No explicit model set, --resume will use session's original model`);
|
|
1698
|
+
}
|
|
1699
|
+
|
|
1700
|
+
console.log(`[daemon] Invoking: ${cliBin} ${args.join(' ')}`);
|
|
1701
|
+
|
|
1702
|
+
child = spawn(cliBin, args, {
|
|
1703
|
+
cwd,
|
|
1704
|
+
stdio: 'pipe',
|
|
1705
|
+
env: {
|
|
1706
|
+
...process.env,
|
|
1707
|
+
TELEPORTATION_DAEMON_CHILD: 'true',
|
|
1708
|
+
// Ensure CI/non-interactive mode
|
|
1709
|
+
CI: 'true'
|
|
1710
|
+
}
|
|
1711
|
+
});
|
|
1712
|
+
}
|
|
1713
|
+
|
|
1714
|
+
// Notify caller that child process has spawned (so it can be tracked for cancellation)
|
|
1715
|
+
if (onSpawn && child) {
|
|
1716
|
+
onSpawn(child);
|
|
1717
|
+
}
|
|
1718
|
+
|
|
1719
|
+
// Close stdin immediately to prevent hanging
|
|
1720
|
+
if (child.stdin) {
|
|
1721
|
+
child.stdin.end();
|
|
1722
|
+
}
|
|
1723
|
+
|
|
1724
|
+
let stdout = '';
|
|
1725
|
+
let stderr = '';
|
|
1726
|
+
let timedOut = false;
|
|
1727
|
+
|
|
1728
|
+
// Capture stdout and stream to frontend
|
|
1729
|
+
child.stdout.on('data', (data) => {
|
|
1730
|
+
const chunk = data.toString();
|
|
1731
|
+
|
|
1732
|
+
// Stream chunk directly to frontend (no accumulation for streaming)
|
|
1733
|
+
// This prevents memory issues with large outputs
|
|
1734
|
+
if (onOutput && chunk.trim()) {
|
|
1735
|
+
onOutput(chunk, 'stdout');
|
|
1736
|
+
}
|
|
1737
|
+
|
|
1738
|
+
// Accumulate for final result (needed for return value)
|
|
1739
|
+
stdout += chunk;
|
|
1740
|
+
});
|
|
1741
|
+
|
|
1742
|
+
// Capture stderr and stream to frontend
|
|
1743
|
+
child.stderr.on('data', (data) => {
|
|
1744
|
+
const chunk = data.toString();
|
|
1745
|
+
|
|
1746
|
+
// Stream chunk directly to frontend (no accumulation for streaming)
|
|
1747
|
+
if (onOutput && chunk.trim()) {
|
|
1748
|
+
onOutput(chunk, 'stderr');
|
|
1749
|
+
}
|
|
1750
|
+
|
|
1751
|
+
// Accumulate for final result (needed for return value)
|
|
1752
|
+
stderr += chunk;
|
|
1753
|
+
});
|
|
1754
|
+
|
|
1755
|
+
// Timeout handler
|
|
1756
|
+
const timeout = setTimeout(() => {
|
|
1757
|
+
timedOut = true;
|
|
1758
|
+
child.kill('SIGTERM');
|
|
1759
|
+
}, CHILD_TIMEOUT_MS);
|
|
1760
|
+
|
|
1761
|
+
// Handle process exit
|
|
1762
|
+
child.on('close', (code) => {
|
|
1763
|
+
clearTimeout(timeout);
|
|
1764
|
+
|
|
1765
|
+
const executedAt = Date.now();
|
|
1766
|
+
|
|
1767
|
+
resolve({
|
|
1768
|
+
success: code === 0 && !timedOut,
|
|
1769
|
+
exit_code: code,
|
|
1770
|
+
stdout: truncateOutput(stdout, 'STDOUT'),
|
|
1771
|
+
stderr: truncateOutput(stderr, 'STDERR'),
|
|
1772
|
+
error: timedOut ? 'Execution timed out' : null,
|
|
1773
|
+
duration_ms: executedAt - startedAt,
|
|
1774
|
+
started_at: startedAt,
|
|
1775
|
+
executed_at: executedAt,
|
|
1776
|
+
child_process: null // Clear reference after completion
|
|
1777
|
+
});
|
|
1778
|
+
});
|
|
1779
|
+
|
|
1780
|
+
// Handle spawn errors
|
|
1781
|
+
child.on('error', (err) => {
|
|
1782
|
+
clearTimeout(timeout);
|
|
1783
|
+
|
|
1784
|
+
const executedAt = Date.now();
|
|
1785
|
+
|
|
1786
|
+
resolve({
|
|
1787
|
+
success: false,
|
|
1788
|
+
exit_code: -1,
|
|
1789
|
+
stdout: '',
|
|
1790
|
+
stderr: '',
|
|
1791
|
+
error: err.message,
|
|
1792
|
+
duration_ms: 0,
|
|
1793
|
+
started_at: startedAt,
|
|
1794
|
+
executed_at: executedAt,
|
|
1795
|
+
child_process: null
|
|
1796
|
+
});
|
|
1797
|
+
});
|
|
1798
|
+
});
|
|
1799
|
+
}
|
|
1800
|
+
|
|
1801
|
+
/**
|
|
1802
|
+
* Store execution result in relay pending_results for later delivery
|
|
1803
|
+
*/
|
|
1804
|
+
async function storeExecutionResult(session_id, approval_id, tool_name, command, executionResult, options = {}) {
|
|
1805
|
+
if (!session_id || !RELAY_API_URL || !RELAY_API_KEY) {
|
|
1806
|
+
return;
|
|
1807
|
+
}
|
|
1808
|
+
|
|
1809
|
+
const payload = {
|
|
1810
|
+
approval_id,
|
|
1811
|
+
command: command || '',
|
|
1812
|
+
tool_name,
|
|
1813
|
+
exit_code: executionResult.exit_code ?? null,
|
|
1814
|
+
stdout: (executionResult.stdout || '').slice(0, 10_000),
|
|
1815
|
+
stderr: (executionResult.stderr || '').slice(0, 10_000),
|
|
1816
|
+
executed_at: executionResult.executed_at || Date.now(),
|
|
1817
|
+
// Include triggered_by_message_id to link executions back to originating commands
|
|
1818
|
+
...(options.triggered_by_message_id && { triggered_by_message_id: options.triggered_by_message_id }),
|
|
1819
|
+
// Cost metadata from router (if available)
|
|
1820
|
+
...(executionResult.cost !== undefined && {
|
|
1821
|
+
cost: executionResult.cost,
|
|
1822
|
+
cost_breakdown: executionResult.cost_breakdown,
|
|
1823
|
+
model_used: executionResult.model_used,
|
|
1824
|
+
tier_used: executionResult.tier_used,
|
|
1825
|
+
}),
|
|
1826
|
+
// Machine coder metadata (if available)
|
|
1827
|
+
...(executionResult.coder_used && { coder_used: executionResult.coder_used }),
|
|
1828
|
+
};
|
|
1829
|
+
|
|
1830
|
+
const url = `${RELAY_API_URL}/api/sessions/${encodeURIComponent(session_id)}/results`;
|
|
1831
|
+
|
|
1832
|
+
const attempt = async () => {
|
|
1833
|
+
const res = await fetch(url, {
|
|
1834
|
+
method: 'POST',
|
|
1835
|
+
headers: {
|
|
1836
|
+
'Content-Type': 'application/json',
|
|
1837
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
1838
|
+
},
|
|
1839
|
+
body: JSON.stringify(payload)
|
|
1840
|
+
});
|
|
1841
|
+
|
|
1842
|
+
if (!res.ok) {
|
|
1843
|
+
console.error(`[daemon] Failed to store execution result for approval ${approval_id}: HTTP ${res.status}`);
|
|
1844
|
+
} else {
|
|
1845
|
+
console.log(`[daemon] Stored execution result for approval ${approval_id} (session ${session_id})`);
|
|
1846
|
+
}
|
|
1847
|
+
};
|
|
1848
|
+
|
|
1849
|
+
try {
|
|
1850
|
+
await attempt();
|
|
1851
|
+
} catch (error) {
|
|
1852
|
+
// Retry once on network failure
|
|
1853
|
+
console.error(`[daemon] Error storing execution result for approval ${approval_id}, retrying once:`, error.message);
|
|
1854
|
+
try {
|
|
1855
|
+
await attempt();
|
|
1856
|
+
} catch (error2) {
|
|
1857
|
+
console.error(`[daemon] Second attempt to store execution result failed for approval ${approval_id}:`, error2.message);
|
|
1858
|
+
}
|
|
1859
|
+
}
|
|
1860
|
+
}
|
|
1861
|
+
|
|
1862
|
+
/**
|
|
1863
|
+
* Report execution status to relay API
|
|
1864
|
+
*/
|
|
1865
|
+
async function reportExecutionStatus(approval_id, result) {
|
|
1866
|
+
try {
|
|
1867
|
+
await fetch(`${RELAY_API_URL}/api/approvals/${approval_id}/executed`, {
|
|
1868
|
+
method: 'POST',
|
|
1869
|
+
headers: {
|
|
1870
|
+
'Content-Type': 'application/json',
|
|
1871
|
+
'Authorization': `Bearer ${RELAY_API_KEY}`
|
|
1872
|
+
},
|
|
1873
|
+
body: JSON.stringify({
|
|
1874
|
+
success: result.success,
|
|
1875
|
+
exit_code: result.exit_code,
|
|
1876
|
+
stdout: result.stdout?.slice(0, 10_000), // Send first 10KB only
|
|
1877
|
+
stderr: result.stderr?.slice(0, 10_000),
|
|
1878
|
+
error: result.error,
|
|
1879
|
+
duration_ms: result.duration_ms
|
|
1880
|
+
})
|
|
1881
|
+
});
|
|
1882
|
+
} catch (error) {
|
|
1883
|
+
console.error(`[daemon] Failed to report execution status:`, error.message);
|
|
1884
|
+
}
|
|
1885
|
+
}
|
|
1886
|
+
|
|
1887
|
+
/**
|
|
1888
|
+
* Cleanup function
|
|
1889
|
+
*/
|
|
1890
|
+
async function cleanup() {
|
|
1891
|
+
console.log('[daemon] Cleanup function called.');
|
|
1892
|
+
console.log('[daemon] Cleaning up...');
|
|
1893
|
+
isShuttingDown = true;
|
|
1894
|
+
|
|
1895
|
+
// Stop polling
|
|
1896
|
+
if (pollingTimer) {
|
|
1897
|
+
clearTimeout(pollingTimer);
|
|
1898
|
+
pollingTimer = null;
|
|
1899
|
+
}
|
|
1900
|
+
|
|
1901
|
+
// Clear cleanup timer
|
|
1902
|
+
if (cleanupTimer) {
|
|
1903
|
+
clearInterval(cleanupTimer);
|
|
1904
|
+
cleanupTimer = null;
|
|
1905
|
+
}
|
|
1906
|
+
|
|
1907
|
+
if (idleTimer) {
|
|
1908
|
+
clearInterval(idleTimer);
|
|
1909
|
+
idleTimer = null;
|
|
1910
|
+
}
|
|
1911
|
+
|
|
1912
|
+
// Close HTTP server
|
|
1913
|
+
if (server) {
|
|
1914
|
+
await new Promise((resolve) => {
|
|
1915
|
+
server.close(resolve);
|
|
1916
|
+
});
|
|
1917
|
+
}
|
|
1918
|
+
|
|
1919
|
+
// Release PID lock
|
|
1920
|
+
await releasePidLock(process.pid);
|
|
1921
|
+
|
|
1922
|
+
console.log('[daemon] Cleanup complete');
|
|
1923
|
+
}
|
|
1924
|
+
|
|
1925
|
+
/**
|
|
1926
|
+
* Start daemon
|
|
1927
|
+
*/
|
|
1928
|
+
async function main() {
|
|
1929
|
+
console.log('[daemon] Main function started.');
|
|
1930
|
+
try {
|
|
1931
|
+
// Acquire PID lock
|
|
1932
|
+
await acquirePidLock(process.pid);
|
|
1933
|
+
|
|
1934
|
+
// Setup signal handlers
|
|
1935
|
+
setupSignalHandlers(cleanup);
|
|
1936
|
+
|
|
1937
|
+
// Start HTTP server (using built-in http module)
|
|
1938
|
+
server = http.createServer(handleRequest);
|
|
1939
|
+
server.listen(PORT, '127.0.0.1', () => {
|
|
1940
|
+
console.log(`[daemon] HTTP server listening on http://127.0.0.1:${PORT}`);
|
|
1941
|
+
console.log(`[daemon] Relay API: ${RELAY_API_URL}`);
|
|
1942
|
+
console.log(`[daemon] Poll interval: ${POLL_INTERVAL_MS}ms`);
|
|
1943
|
+
console.log(`[daemon] PID: ${process.pid}`);
|
|
1944
|
+
});
|
|
1945
|
+
|
|
1946
|
+
// Start polling loop
|
|
1947
|
+
console.log('[daemon] Starting relay API polling...');
|
|
1948
|
+
pollRelayAPI();
|
|
1949
|
+
|
|
1950
|
+
// Start cleanup interval for old executions
|
|
1951
|
+
cleanupTimer = setInterval(cleanupOldExecutions, CLEANUP_INTERVAL_MS);
|
|
1952
|
+
console.log(`[daemon] Cleanup interval: ${CLEANUP_INTERVAL_MS / 1000}s`);
|
|
1953
|
+
|
|
1954
|
+
idleTimer = setInterval(() => {
|
|
1955
|
+
checkIdleTimeout().catch((err) => {
|
|
1956
|
+
console.error('[daemon] Idle timeout check failed:', err.message);
|
|
1957
|
+
});
|
|
1958
|
+
}, IDLE_CHECK_INTERVAL_MS);
|
|
1959
|
+
console.log(`[daemon] Idle timeout: ${IDLE_TIMEOUT_MS / 60000}m, check interval: ${IDLE_CHECK_INTERVAL_MS / 1000}s`);
|
|
1960
|
+
} catch (error) {
|
|
1961
|
+
console.error('[daemon] Failed to start:', error.message);
|
|
1962
|
+
process.exit(1);
|
|
1963
|
+
}
|
|
1964
|
+
}
|
|
1965
|
+
|
|
1966
|
+
// Start daemon if run directly
|
|
1967
|
+
if (process.argv[1] === fileURLToPath(import.meta.url)) {
|
|
1968
|
+
main();
|
|
1969
|
+
}
|
|
1970
|
+
|
|
1971
|
+
const __test = {
|
|
1972
|
+
hasIdleTimedOut,
|
|
1973
|
+
checkIdleTimeout,
|
|
1974
|
+
_getLastSessionActivityAt: () => lastSessionActivityAt,
|
|
1975
|
+
_setLastSessionActivityAt: (value) => {
|
|
1976
|
+
lastSessionActivityAt = value;
|
|
1977
|
+
},
|
|
1978
|
+
_getSessionsMap: () => sessions
|
|
1979
|
+
};
|
|
1980
|
+
|
|
1981
|
+
export {
|
|
1982
|
+
main,
|
|
1983
|
+
cleanup,
|
|
1984
|
+
buildToolPrompt,
|
|
1985
|
+
spawnClaudeProcess,
|
|
1986
|
+
executeWithMachineCoder, // New: unified machine coder interface
|
|
1987
|
+
pollRelayAPI,
|
|
1988
|
+
processQueue,
|
|
1989
|
+
storeExecutionResult,
|
|
1990
|
+
hasIdleTimedOut,
|
|
1991
|
+
validateSessionId,
|
|
1992
|
+
validateApprovalId,
|
|
1993
|
+
validateToolName,
|
|
1994
|
+
parseJSONBody,
|
|
1995
|
+
cleanupOldExecutions,
|
|
1996
|
+
executeCommand,
|
|
1997
|
+
handleInboxMessage,
|
|
1998
|
+
getOrFetchSession,
|
|
1999
|
+
fetchAndCacheSession,
|
|
2000
|
+
sendStreamingMessage,
|
|
2001
|
+
MAX_EXECUTIONS,
|
|
2002
|
+
PREFERRED_CODER, // New: machine coder preference
|
|
2003
|
+
// Router integration for cost-aware LLM calls
|
|
2004
|
+
routedCompletion,
|
|
2005
|
+
classifyTaskTier,
|
|
2006
|
+
getRouter,
|
|
2007
|
+
ROUTER_ENABLED,
|
|
2008
|
+
__test
|
|
2009
|
+
};
|