agentgui 1.0.837 → 1.0.839

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,6 +1,9 @@
1
1
  ## [Unreleased]
2
2
 
3
3
  ### Refactor
4
+ - Extract message/stream/queue routes (messagesMatch, streamMatch, queueMatch handlers) to lib/routes-messages.js (140L) and session/chunk/full/execution routes to lib/routes-sessions.js (145L); server.js reduced from 2406L to 2127L; both files ≤200L; wired via _messagesRoutes._match and _sessionsRoutes._match in request handler
5
+ - Extract runs/scripts/agent-auth/auth-config HTTP routes from server.js to lib/routes-runs.js (157L), lib/routes-scripts.js (136L), lib/routes-agent-actions.js (118L), lib/routes-auth-config.js (30L); routes-auth-config uses getProviderConfigs/saveProviderConfig from server.js deps (no duplication); server.js reduced from 2406L to 1399L total (-1007L)
6
+ - Extract processMessageWithStreaming (539L), scheduleRetry, drainMessageQueue, and parseRateLimitResetTime from server.js into lib/process-message.js (127L, createProcessMessage factory), lib/stream-event-handler.js (116L, createEventHandler), lib/message-queue.js (63L, createMessageQueue), lib/process-message-rate-limit.js (19L); all files ≤200L; server.js reduced by ~660L and imports/wires all factories after broadcastSync is created
4
7
  - refactor: extract broadcastSync to lib/broadcast.js (createBroadcast factory) and recovery functions to lib/recovery.js (createRecovery factory); server.js reduced from 3419L to 3226L
5
8
  - refactor: remove JSDoc and standalone code comments from scripts/patch-fsbrowse.js; reduce from 229L to 200L
6
9
  - Split database.js (651L) into database.js (81L) + database-schema.js (176L) + database-migrations.js (150L) + database-migrations-acp.js (134L); all files ≤200L; no circular imports; migration functions receive db as parameter
@@ -0,0 +1,62 @@
1
+ export function createMessageQueue({ queries, activeExecutions, messageQueues, broadcastSync, execMachine, cleanupExecution, debugLog, getProcessMessageWithStreaming }) {
2
+ function scheduleRetry(conversationId, messageId, content, agentId, model, subAgent) {
3
+ debugLog(`[rate-limit] scheduleRetry called for conv ${conversationId}, messageId=${messageId}`);
4
+ if (!content) {
5
+ queries.getConversation(conversationId);
6
+ const lastMsg = queries.getLastUserMessage(conversationId);
7
+ content = lastMsg?.content || 'continue';
8
+ debugLog(`[rate-limit] Recovered content from last message: ${content?.substring?.(0, 50)}...`);
9
+ }
10
+ const newSession = queries.createSession(conversationId);
11
+ queries.createEvent('session.created', { messageId, sessionId: newSession.id, retryReason: 'rate_limit' }, conversationId, newSession.id);
12
+ debugLog(`[rate-limit] Broadcasting streaming_start for retry session ${newSession.id}`);
13
+ broadcastSync({ type: 'streaming_start', sessionId: newSession.id, conversationId, messageId, agentId, queueLength: messageQueues.get(conversationId)?.length || 0, timestamp: Date.now() });
14
+ const startTime = Date.now();
15
+ activeExecutions.set(conversationId, { pid: null, startTime, sessionId: newSession.id, lastActivity: startTime });
16
+ debugLog(`[rate-limit] Calling processMessageWithStreaming for retry`);
17
+ getProcessMessageWithStreaming()(conversationId, messageId, newSession.id, content, agentId, model, subAgent)
18
+ .catch(err => {
19
+ debugLog(`[rate-limit] Retry failed: ${err.message}`);
20
+ console.error(`[rate-limit] Retry error for conv ${conversationId}:`, err);
21
+ cleanupExecution(conversationId);
22
+ broadcastSync({ type: 'streaming_error', sessionId: newSession.id, conversationId, error: `Rate limit retry failed: ${err.message}`, recoverable: false, timestamp: Date.now() });
23
+ });
24
+ }
25
+
26
+ function drainMessageQueue(conversationId) {
27
+ const machineQueue = execMachine.getQueue(conversationId);
28
+ const mapQueue = messageQueues.get(conversationId);
29
+ if (machineQueue.length === 0 && (!mapQueue || mapQueue.length === 0)) return;
30
+ let next;
31
+ if (machineQueue.length > 0) {
32
+ execMachine.send(conversationId, { type: 'COMPLETE' });
33
+ const ctx = execMachine.getContext(conversationId);
34
+ next = ctx?.nextItem;
35
+ if (mapQueue && mapQueue.length > 0) mapQueue.shift();
36
+ if (mapQueue && mapQueue.length === 0) messageQueues.delete(conversationId);
37
+ } else {
38
+ next = mapQueue.shift();
39
+ if (mapQueue.length === 0) messageQueues.delete(conversationId);
40
+ }
41
+ if (!next) return;
42
+ debugLog(`[queue] Draining next message for ${conversationId}, messageId=${next.messageId}`);
43
+ const remainingQueueLength = execMachine.getQueue(conversationId).length || messageQueues.get(conversationId)?.length || 0;
44
+ broadcastSync({ type: 'queue_item_dequeued', conversationId, messageId: next.messageId, queueLength: remainingQueueLength, timestamp: Date.now() });
45
+ const session = queries.createSession(conversationId);
46
+ queries.createEvent('session.created', { messageId: next.messageId, sessionId: session.id }, conversationId, session.id);
47
+ broadcastSync({ type: 'streaming_start', sessionId: session.id, conversationId, messageId: next.messageId, agentId: next.agentId, queueLength: remainingQueueLength, timestamp: Date.now() });
48
+ broadcastSync({ type: 'queue_status', conversationId, queueLength: remainingQueueLength, timestamp: Date.now() });
49
+ const startTime = Date.now();
50
+ execMachine.send(conversationId, { type: 'START', sessionId: session.id });
51
+ activeExecutions.set(conversationId, { pid: null, startTime, sessionId: session.id, lastActivity: startTime });
52
+ getProcessMessageWithStreaming()(conversationId, next.messageId, session.id, next.content, next.agentId, next.model, next.subAgent)
53
+ .catch(err => {
54
+ debugLog(`[queue] Error processing queued message: ${err.message}`);
55
+ cleanupExecution(conversationId);
56
+ broadcastSync({ type: 'streaming_error', sessionId: session.id, conversationId, error: `Queue processing failed: ${err.message}`, recoverable: true, timestamp: Date.now() });
57
+ setTimeout(() => drainMessageQueue(conversationId), 100);
58
+ });
59
+ }
60
+
61
+ return { scheduleRetry, drainMessageQueue };
62
+ }
@@ -0,0 +1,18 @@
1
+ /**
2
+ * Parse the rate-limit reset time from a text message.
3
+ * Returns seconds until reset (minimum 60, default 300).
4
+ */
5
+ export function parseRateLimitResetTime(text) {
6
+ const match = text.match(/resets?\s+(?:at\s+)?(\d{1,2})(?::(\d{2}))?\s*(am|pm)?\s*\(?(UTC|[A-Z]{2,4})\)?/i);
7
+ if (!match) return 300;
8
+ let hours = parseInt(match[1], 10);
9
+ const minutes = match[2] ? parseInt(match[2], 10) : 0;
10
+ const period = match[3]?.toLowerCase();
11
+ if (period === 'pm' && hours !== 12) hours += 12;
12
+ if (period === 'am' && hours === 12) hours = 0;
13
+ const now = new Date();
14
+ const resetTime = new Date(now);
15
+ resetTime.setUTCHours(hours, minutes, 0, 0);
16
+ if (resetTime <= now) resetTime.setUTCDate(resetTime.getUTCDate() + 1);
17
+ return Math.max(60, Math.ceil((resetTime.getTime() - now.getTime()) / 1000));
18
+ }
@@ -0,0 +1,126 @@
1
+ export function createProcessMessage({ queries, activeExecutions, rateLimitState, execMachine, broadcastSync, runClaudeWithStreaming, cleanupExecution, checkpointManager, discoveredAgents, ownedSessionIds, STARTUP_CWD, buildSystemPrompt, parseRateLimitResetTime, eagerTTS, touchACP, createChunkBatcher, debugLog, logError, scheduleRetry, drainMessageQueue, createEventHandler }) {
2
+ async function processMessageWithStreaming(conversationId, messageId, sessionId, content, agentId, model, subAgent) {
3
+ const startTime = Date.now();
4
+ touchACP(agentId);
5
+ const conv = queries.getConversation(conversationId);
6
+ if (!conv) {
7
+ console.error(`[stream] Conversation ${conversationId} not found, aborting`);
8
+ queries.updateSession(sessionId, { status: 'error', error: 'Conversation not found' });
9
+ queries.setIsStreaming(conversationId, false);
10
+ return;
11
+ }
12
+ if (activeExecutions.has(conversationId)) {
13
+ const existing = activeExecutions.get(conversationId);
14
+ if (existing.sessionId !== sessionId) {
15
+ debugLog(`[stream] Conversation ${conversationId} already has active execution (different session), aborting duplicate`);
16
+ return;
17
+ }
18
+ }
19
+ if (rateLimitState.has(conversationId)) {
20
+ const rlState = rateLimitState.get(conversationId);
21
+ if (rlState.retryAt > Date.now()) {
22
+ debugLog(`[stream] Conversation ${conversationId} is in rate limit cooldown, aborting`);
23
+ return;
24
+ }
25
+ }
26
+ activeExecutions.set(conversationId, { pid: null, startTime, sessionId, lastActivity: startTime });
27
+ execMachine.send(conversationId, { type: 'START', sessionId });
28
+ queries.setIsStreaming(conversationId, true);
29
+ queries.updateSession(sessionId, { status: 'active' });
30
+ const batcher = createChunkBatcher(queries, debugLog);
31
+ const cwd = conv?.workingDirectory || STARTUP_CWD;
32
+ const allBlocksRef = { val: [] };
33
+ const currentSequenceRef = { val: queries.getMaxSequence(sessionId) ?? -1 };
34
+ const batcherRef = { batcher, eventCount: 0, resumeSessionId: conv?.claudeSessionId || null };
35
+ const onEvent = createEventHandler({ queries, activeExecutions, broadcastSync, rateLimitState, batcherRef, sessionId, conversationId, messageId, content, agentId, model, subAgent, ownedSessionIds, allBlocksRef, currentSequenceRef, scheduleRetry, eagerTTS, debugLog, parseRateLimitResetTime });
36
+ try {
37
+ debugLog(`[stream] Starting: conversationId=${conversationId}, sessionId=${sessionId}`);
38
+ let resolvedAgentId = agentId || 'claude-code';
39
+ const wrapperAgent = discoveredAgents.find(a => a.id === resolvedAgentId && a.protocol === 'cli-wrapper' && a.acpId);
40
+ if (wrapperAgent) resolvedAgentId = wrapperAgent.acpId;
41
+ const resolvedModel = model || conv?.model || null;
42
+ const resolvedSubAgent = subAgent || conv?.subAgent || null;
43
+ const config = {
44
+ verbose: true, outputFormat: 'stream-json', timeout: 1800000, print: true,
45
+ resumeSessionId: batcherRef.resumeSessionId,
46
+ systemPrompt: buildSystemPrompt(agentId, resolvedModel, resolvedSubAgent),
47
+ model: resolvedModel || undefined, subAgent: resolvedSubAgent || undefined, onEvent,
48
+ onPid: (pid) => { const e = activeExecutions.get(conversationId); if (e) e.pid = pid; execMachine.send(conversationId, { type: 'SET_PID', pid }); },
49
+ onProcess: (proc) => { const e = activeExecutions.get(conversationId); if (e) e.proc = proc; execMachine.send(conversationId, { type: 'SET_PROC', proc }); }
50
+ };
51
+ const { outputs, sessionId: claudeSessionId } = await runClaudeWithStreaming(content, cwd, resolvedAgentId, config);
52
+ if (rateLimitState.get(conversationId)?.isStreamDetected) {
53
+ debugLog(`[rate-limit] Rate limit already handled in stream for conv ${conversationId}, skipping success handler`);
54
+ return;
55
+ }
56
+ activeExecutions.delete(conversationId);
57
+ execMachine.send(conversationId, { type: 'COMPLETE' });
58
+ batcher.drain();
59
+ if (claudeSessionId) ownedSessionIds.delete(claudeSessionId);
60
+ debugLog(`[stream] Claude returned ${outputs.length} outputs, sessionId=${claudeSessionId}`);
61
+ queries.updateSession(sessionId, { status: 'complete', response: JSON.stringify({ outputs, eventCount: batcherRef.eventCount }), completed_at: Date.now() });
62
+ broadcastSync({ type: 'streaming_complete', sessionId, conversationId, agentId, eventCount: batcherRef.eventCount, seq: currentSequenceRef.val, timestamp: Date.now() });
63
+ debugLog(`[stream] Completed: ${outputs.length} outputs, ${batcherRef.eventCount} events`);
64
+ } catch (error) {
65
+ const elapsed = Date.now() - startTime;
66
+ debugLog(`[stream] Error after ${elapsed}ms: ${error.message}`);
67
+ const conv2 = queries.getConversation(conversationId);
68
+ if (conv2?.claudeSessionId) ownedSessionIds.delete(conv2.claudeSessionId);
69
+ if (rateLimitState.get(conversationId)?.isStreamDetected) {
70
+ debugLog(`[rate-limit] Rate limit already handled in stream for conv ${conversationId}, skipping catch handler`);
71
+ return;
72
+ }
73
+ const isAuthError = error.authError || error.nonRetryable || /401|unauthorized|invalid.*auth|invalid.*token|auth.*failed|permission denied|access denied/i.test(error.message);
74
+ const isRateLimit = error.rateLimited || /rate.?limit|429|too many requests|overloaded|throttl/i.test(error.message);
75
+ queries.updateSession(sessionId, { status: 'error', error: error.message, completed_at: Date.now() });
76
+ if (isAuthError) {
77
+ debugLog(`[auth-error] Auth error for conv ${conversationId}: ${error.message}`);
78
+ broadcastSync({ type: 'streaming_error', sessionId, conversationId, error: `Authentication failed: ${error.message}. Please check your API credentials.`, recoverable: false, isAuthError: true, timestamp: Date.now() });
79
+ const errMsg = queries.createMessage(conversationId, 'assistant', `Error: Authentication failed. ${error.message}. Please update your credentials and try again.`);
80
+ broadcastSync({ type: 'message_created', conversationId, message: errMsg, timestamp: Date.now() });
81
+ queries.setIsStreaming(conversationId, false);
82
+ batcher.drain();
83
+ activeExecutions.delete(conversationId);
84
+ return;
85
+ }
86
+ if (isRateLimit) {
87
+ const existingState = rateLimitState.get(conversationId) || {};
88
+ const retryCount = (existingState.retryCount || 0) + 1;
89
+ const maxRateLimitRetries = 3;
90
+ if (retryCount > maxRateLimitRetries) {
91
+ broadcastSync({ type: 'streaming_error', sessionId, conversationId, error: `Rate limit exceeded after ${retryCount} attempts. Please try again later.`, recoverable: false, timestamp: Date.now() });
92
+ const errMsg = queries.createMessage(conversationId, 'assistant', `Error: Rate limit exceeded after ${retryCount} attempts. Please try again later.`);
93
+ broadcastSync({ type: 'message_created', conversationId, message: errMsg, timestamp: Date.now() });
94
+ queries.setIsStreaming(conversationId, false);
95
+ return;
96
+ }
97
+ const cooldownMs = (error.retryAfterSec || 60) * 1000;
98
+ const retryAt = Date.now() + cooldownMs;
99
+ rateLimitState.set(conversationId, { retryAt, cooldownMs, retryCount });
100
+ broadcastSync({ type: 'rate_limit_hit', sessionId, conversationId, retryAfterMs: cooldownMs, retryAt, retryCount, timestamp: Date.now() });
101
+ batcher.drain();
102
+ debugLog(`[rate-limit] Scheduling retry for conv ${conversationId} in ${cooldownMs}ms (attempt ${retryCount + 1})`);
103
+ setTimeout(() => {
104
+ debugLog(`[rate-limit] Timeout fired for conv ${conversationId}, calling scheduleRetry`);
105
+ rateLimitState.delete(conversationId);
106
+ broadcastSync({ type: 'rate_limit_clear', conversationId, timestamp: Date.now() });
107
+ scheduleRetry(conversationId, messageId, content, agentId, model, subAgent);
108
+ }, cooldownMs);
109
+ return;
110
+ }
111
+ const isSessionConflict = error.exitCode === null && batcherRef.eventCount === 0;
112
+ broadcastSync({ type: 'streaming_error', sessionId, conversationId, error: error.message, isPrematureEnd: error.isPrematureEnd || false, exitCode: error.exitCode, stderrText: error.stderrText, recoverable: elapsed < 60000, isSessionConflict, timestamp: Date.now() });
113
+ if (!isSessionConflict) {
114
+ const errMsg = queries.createMessage(conversationId, 'assistant', `Error: ${error.message}`);
115
+ broadcastSync({ type: 'message_created', conversationId, message: errMsg, timestamp: Date.now() });
116
+ }
117
+ } finally {
118
+ batcher.drain();
119
+ if (!rateLimitState.has(conversationId)) {
120
+ cleanupExecution(conversationId);
121
+ drainMessageQueue(conversationId);
122
+ }
123
+ }
124
+ }
125
+ return { processMessageWithStreaming };
126
+ }
@@ -0,0 +1,117 @@
1
+ import os from 'os';
2
+ import { spawn } from 'child_process';
3
+
4
+ export function register(deps) {
5
+ const { sendJSON, queries, broadcastSync, discoveredAgents, activeScripts, startGeminiOAuth, startCodexOAuth, getGeminiOAuthState, getCodexOAuthState, modelCache, PORT, BASE_URL, rootDir } = deps;
6
+
7
+ const routes = {};
8
+
9
+ routes['_match'] = (method, pathOnly) => {
10
+ let m;
11
+ if (method === 'POST' && (m = pathOnly.match(/^\/api\/agents\/([^/]+)\/auth$/))) return (req, res) => handleAgentAuth(req, res, m[1]);
12
+ if (method === 'POST' && (m = pathOnly.match(/^\/api\/agents\/([^/]+)\/update$/))) return (req, res) => handleAgentUpdate(req, res, m[1]);
13
+ return null;
14
+ };
15
+
16
+ async function handleAgentAuth(req, res, agentId) {
17
+ const agent = discoveredAgents.find(a => a.id === agentId);
18
+ if (!agent) { sendJSON(req, res, 404, { error: 'Agent not found' }); return; }
19
+
20
+ if (agentId === 'codex' || agentId === 'cli-codex') {
21
+ try {
22
+ const result = await startCodexOAuth(req, { PORT, BASE_URL });
23
+ const conversationId = '__agent_auth__';
24
+ broadcastSync({ type: 'script_started', conversationId, script: 'auth-codex', agentId: 'codex', timestamp: Date.now() });
25
+ broadcastSync({ type: 'script_output', conversationId, data: `\x1b[36mOpening OpenAI OAuth in your browser...\x1b[0m\r\n\r\nIf it doesn't open automatically, visit:\r\n${result.authUrl}\r\n`, stream: 'stdout', timestamp: Date.now() });
26
+ const pollId = setInterval(() => {
27
+ const state = getCodexOAuthState();
28
+ if (state.status === 'success') {
29
+ clearInterval(pollId);
30
+ const email = state.email || '';
31
+ broadcastSync({ type: 'script_output', conversationId, data: `\r\n\x1b[32mAuthentication successful${email ? ' (' + email + ')' : ''}\x1b[0m\r\n`, stream: 'stdout', timestamp: Date.now() });
32
+ broadcastSync({ type: 'script_stopped', conversationId, code: 0, timestamp: Date.now() });
33
+ } else if (state.status === 'error') {
34
+ clearInterval(pollId);
35
+ broadcastSync({ type: 'script_output', conversationId, data: `\r\n\x1b[31mAuthentication failed: ${state.error}\x1b[0m\r\n`, stream: 'stderr', timestamp: Date.now() });
36
+ broadcastSync({ type: 'script_stopped', conversationId, code: 1, error: state.error, timestamp: Date.now() });
37
+ }
38
+ }, 1000);
39
+ setTimeout(() => clearInterval(pollId), 5 * 60 * 1000);
40
+ sendJSON(req, res, 200, { ok: true, agentId, authUrl: result.authUrl, mode: result.mode });
41
+ } catch (e) {
42
+ console.error('[codex-oauth] /api/agents/codex/auth failed:', e);
43
+ sendJSON(req, res, 500, { error: e.message });
44
+ }
45
+ return;
46
+ }
47
+
48
+ if (agentId === 'gemini') {
49
+ try {
50
+ const result = await startGeminiOAuth(req, { PORT, BASE_URL, rootDir });
51
+ const conversationId = '__agent_auth__';
52
+ broadcastSync({ type: 'script_started', conversationId, script: 'auth-gemini', agentId: 'gemini', timestamp: Date.now() });
53
+ broadcastSync({ type: 'script_output', conversationId, data: `\x1b[36mOpening Google OAuth in your browser...\x1b[0m\r\n\r\nIf it doesn't open automatically, visit:\r\n${result.authUrl}\r\n`, stream: 'stdout', timestamp: Date.now() });
54
+ const pollId = setInterval(() => {
55
+ const state = getGeminiOAuthState();
56
+ if (state.status === 'success') {
57
+ clearInterval(pollId);
58
+ const email = state.email || '';
59
+ broadcastSync({ type: 'script_output', conversationId, data: `\r\n\x1b[32mAuthentication successful${email ? ' (' + email + ')' : ''}\x1b[0m\r\n`, stream: 'stdout', timestamp: Date.now() });
60
+ broadcastSync({ type: 'script_stopped', conversationId, code: 0, timestamp: Date.now() });
61
+ } else if (state.status === 'error') {
62
+ clearInterval(pollId);
63
+ broadcastSync({ type: 'script_output', conversationId, data: `\r\n\x1b[31mAuthentication failed: ${state.error}\x1b[0m\r\n`, stream: 'stderr', timestamp: Date.now() });
64
+ broadcastSync({ type: 'script_stopped', conversationId, code: 1, error: state.error, timestamp: Date.now() });
65
+ }
66
+ }, 1000);
67
+ setTimeout(() => clearInterval(pollId), 5 * 60 * 1000);
68
+ sendJSON(req, res, 200, { ok: true, agentId, authUrl: result.authUrl, mode: result.mode });
69
+ } catch (e) {
70
+ console.error('[gemini-oauth] /api/agents/gemini/auth failed:', e);
71
+ sendJSON(req, res, 500, { error: e.message });
72
+ }
73
+ return;
74
+ }
75
+
76
+ const authCommands = {
77
+ 'claude-code': { cmd: 'claude', args: ['setup-token'] },
78
+ 'opencode': { cmd: 'opencode', args: ['auth', 'login'] },
79
+ };
80
+ const authCmd = authCommands[agentId];
81
+ if (!authCmd) { sendJSON(req, res, 400, { error: 'No auth command for this agent' }); return; }
82
+ const conversationId = '__agent_auth__';
83
+ if (activeScripts.has(conversationId)) { sendJSON(req, res, 409, { error: 'Auth process already running' }); return; }
84
+ const child = spawn(authCmd.cmd, authCmd.args, { stdio: ['pipe', 'pipe', 'pipe'], env: { ...process.env, FORCE_COLOR: '1' }, shell: os.platform() === 'win32' });
85
+ activeScripts.set(conversationId, { process: child, script: 'auth-' + agentId, startTime: Date.now() });
86
+ broadcastSync({ type: 'script_started', conversationId, script: 'auth-' + agentId, agentId, timestamp: Date.now() });
87
+ const onData = (stream) => (chunk) => broadcastSync({ type: 'script_output', conversationId, data: chunk.toString(), stream, timestamp: Date.now() });
88
+ child.stdout.on('data', onData('stdout'));
89
+ child.stderr.on('data', onData('stderr'));
90
+ child.stdout.on('error', () => {});
91
+ child.stderr.on('error', () => {});
92
+ child.on('error', (err) => { activeScripts.delete(conversationId); broadcastSync({ type: 'script_stopped', conversationId, code: 1, error: err.message, timestamp: Date.now() }); });
93
+ child.on('close', (code) => { activeScripts.delete(conversationId); broadcastSync({ type: 'script_stopped', conversationId, code: code || 0, timestamp: Date.now() }); });
94
+ sendJSON(req, res, 200, { ok: true, agentId, pid: child.pid });
95
+ }
96
+
97
+ async function handleAgentUpdate(req, res, agentId) {
98
+ const updateCommands = { 'claude-code': { cmd: 'claude', args: ['update', '--yes'] } };
99
+ const updateCmd = updateCommands[agentId];
100
+ if (!updateCmd) { sendJSON(req, res, 400, { error: 'No update command for this agent' }); return; }
101
+ const conversationId = '__agent_update__';
102
+ if (activeScripts.has(conversationId)) { sendJSON(req, res, 409, { error: 'Update already running' }); return; }
103
+ const child = spawn(updateCmd.cmd, updateCmd.args, { stdio: ['pipe', 'pipe', 'pipe'], env: { ...process.env, FORCE_COLOR: '1' }, shell: os.platform() === 'win32' });
104
+ activeScripts.set(conversationId, { process: child, script: 'update-' + agentId, startTime: Date.now() });
105
+ broadcastSync({ type: 'script_started', conversationId, script: 'update-' + agentId, agentId, timestamp: Date.now() });
106
+ const onData = (stream) => (chunk) => broadcastSync({ type: 'script_output', conversationId, data: chunk.toString(), stream, timestamp: Date.now() });
107
+ child.stdout.on('data', onData('stdout'));
108
+ child.stderr.on('data', onData('stderr'));
109
+ child.stdout.on('error', () => {});
110
+ child.stderr.on('error', () => {});
111
+ child.on('error', (err) => { activeScripts.delete(conversationId); broadcastSync({ type: 'script_stopped', conversationId, code: 1, error: err.message, timestamp: Date.now() }); });
112
+ child.on('close', (code) => { activeScripts.delete(conversationId); modelCache.delete(agentId); broadcastSync({ type: 'script_stopped', conversationId, code: code || 0, timestamp: Date.now() }); });
113
+ sendJSON(req, res, 200, { ok: true, agentId, pid: child.pid });
114
+ }
115
+
116
+ return routes;
117
+ }
@@ -0,0 +1,30 @@
1
+ export function register(deps) {
2
+ const { sendJSON, parseBody, getProviderConfigs, saveProviderConfig } = deps;
3
+
4
+ const routes = {};
5
+
6
+ routes['GET /api/auth/configs'] = (req, res) => {
7
+ sendJSON(req, res, 200, getProviderConfigs());
8
+ };
9
+
10
+ routes['POST /api/auth/save-config'] = async (req, res) => {
11
+ try {
12
+ const body = await parseBody(req);
13
+ const { providerId, apiKey, defaultModel } = body || {};
14
+ if (typeof providerId !== 'string' || !providerId.length || providerId.length > 100) { sendJSON(req, res, 400, { error: 'Invalid providerId' }); return; }
15
+ if (typeof apiKey !== 'string' || !apiKey.length || apiKey.length > 10000) { sendJSON(req, res, 400, { error: 'Invalid apiKey' }); return; }
16
+ if (defaultModel !== undefined && (typeof defaultModel !== 'string' || defaultModel.length > 200)) { sendJSON(req, res, 400, { error: 'Invalid defaultModel' }); return; }
17
+ const configPath = saveProviderConfig(providerId, apiKey, defaultModel || '');
18
+ sendJSON(req, res, 200, { success: true, path: configPath });
19
+ } catch (err) {
20
+ sendJSON(req, res, 400, { error: err.message });
21
+ }
22
+ };
23
+
24
+ routes['_match'] = (method, pathOnly) => {
25
+ const key = `${method} ${pathOnly}`;
26
+ return routes[key] || null;
27
+ };
28
+
29
+ return routes;
30
+ }
@@ -0,0 +1,139 @@
1
+ export function register(deps) {
2
+ const { queries, sendJSON, parseBody, broadcastSync, processMessageWithStreaming, activeExecutions, messageQueues, debugLog, logError } = deps;
3
+
4
+ const routes = {};
5
+
6
+ routes['_match'] = (method, pathOnly) => {
7
+ let m;
8
+
9
+ if ((m = pathOnly.match(/^\/api\/conversations\/([^/]+)\/messages$/))) {
10
+ if (method === 'GET') return (req, res) => handleGetMessages(req, res, m[1]);
11
+ if (method === 'POST') return (req, res) => handlePostMessage(req, res, m[1]);
12
+ }
13
+
14
+ if (method === 'POST' && (m = pathOnly.match(/^\/api\/conversations\/([^/]+)\/stream$/)))
15
+ return (req, res) => handleStream(req, res, m[1]);
16
+
17
+ if ((m = pathOnly.match(/^\/api\/conversations\/([^/]+)\/queue$/))) {
18
+ if (method === 'GET') return (req, res) => handleGetQueue(req, res, m[1]);
19
+ }
20
+
21
+ if ((m = pathOnly.match(/^\/api\/conversations\/([^/]+)\/queue\/([^/]+)$/))) {
22
+ if (method === 'DELETE') return (req, res) => handleDeleteQueueItem(req, res, m[1], m[2]);
23
+ if (method === 'PATCH') return (req, res) => handlePatchQueueItem(req, res, m[1], m[2]);
24
+ }
25
+
26
+ return null;
27
+ };
28
+
29
+ async function handleGetMessages(req, res, conversationId) {
30
+ const url = new URL(req.url, 'http://localhost');
31
+ const limit = Math.min(parseInt(url.searchParams.get('limit') || '50'), 500);
32
+ const offset = Math.max(parseInt(url.searchParams.get('offset') || '0'), 0);
33
+ const result = queries.getPaginatedMessages(conversationId, limit, offset);
34
+ sendJSON(req, res, 200, result);
35
+ }
36
+
37
+ async function handlePostMessage(req, res, conversationId) {
38
+ const conv = queries.getConversation(conversationId);
39
+ if (!conv) { sendJSON(req, res, 404, { error: 'Conversation not found' }); return; }
40
+ const body = await parseBody(req);
41
+ const agentId = body.agentId || conv.agentType || conv.agentId || 'claude-code';
42
+ const model = body.model || conv.model || null;
43
+ const subAgent = body.subAgent || conv.subAgent || null;
44
+ const idempotencyKey = body.idempotencyKey || null;
45
+ const message = queries.createMessage(conversationId, 'user', body.content, idempotencyKey);
46
+ queries.createEvent('message.created', { role: 'user', messageId: message.id }, conversationId);
47
+ broadcastSync({ type: 'message_created', conversationId, message, timestamp: Date.now() });
48
+
49
+ if (activeExecutions.has(conversationId)) {
50
+ if (!messageQueues.has(conversationId)) messageQueues.set(conversationId, []);
51
+ messageQueues.get(conversationId).push({ content: body.content, agentId, model, messageId: message.id, subAgent });
52
+ const queueLength = messageQueues.get(conversationId).length;
53
+ broadcastSync({ type: 'queue_status', conversationId, queueLength, messageId: message.id, timestamp: Date.now() });
54
+ sendJSON(req, res, 200, { message, queued: true, queuePosition: queueLength, idempotencyKey });
55
+ return;
56
+ }
57
+
58
+ const session = queries.createSession(conversationId);
59
+ queries.createEvent('session.created', { messageId: message.id, sessionId: session.id }, conversationId, session.id);
60
+ activeExecutions.set(conversationId, { pid: null, startTime: Date.now(), sessionId: session.id, lastActivity: Date.now() });
61
+ queries.setIsStreaming(conversationId, true);
62
+ broadcastSync({ type: 'streaming_start', sessionId: session.id, conversationId, messageId: message.id, agentId, timestamp: Date.now() });
63
+ sendJSON(req, res, 201, { message, session, idempotencyKey });
64
+
65
+ processMessageWithStreaming(conversationId, message.id, session.id, body.content, agentId, model, subAgent)
66
+ .catch(err => {
67
+ console.error(`[messages] Uncaught error for conv ${conversationId}:`, err.message);
68
+ debugLog(`[messages] Uncaught error: ${err.message}`);
69
+ logError('processMessageWithStreaming', err, { convId: conversationId });
70
+ });
71
+ }
72
+
73
+ async function handleStream(req, res, conversationId) {
74
+ const body = await parseBody(req);
75
+ const conv = queries.getConversation(conversationId);
76
+ if (!conv) { sendJSON(req, res, 404, { error: 'Conversation not found' }); return; }
77
+
78
+ const prompt = body.content || body.message || '';
79
+ const agentId = body.agentId || conv.agentType || conv.agentId || 'claude-code';
80
+ const model = body.model || conv.model || null;
81
+ const subAgent = body.subAgent || conv.subAgent || null;
82
+
83
+ const userMessage = queries.createMessage(conversationId, 'user', prompt);
84
+ queries.createEvent('message.created', { role: 'user', messageId: userMessage.id }, conversationId);
85
+ broadcastSync({ type: 'message_created', conversationId, message: userMessage, timestamp: Date.now() });
86
+
87
+ if (activeExecutions.has(conversationId)) {
88
+ debugLog(`[stream] Conversation ${conversationId} is busy, queuing message`);
89
+ if (!messageQueues.has(conversationId)) messageQueues.set(conversationId, []);
90
+ messageQueues.get(conversationId).push({ content: prompt, agentId, model, messageId: userMessage.id, subAgent });
91
+ const queueLength = messageQueues.get(conversationId).length;
92
+ broadcastSync({ type: 'queue_status', conversationId, queueLength, messageId: userMessage.id, timestamp: Date.now() });
93
+ sendJSON(req, res, 200, { message: userMessage, queued: true, queuePosition: queueLength });
94
+ return;
95
+ }
96
+
97
+ const session = queries.createSession(conversationId);
98
+ queries.createEvent('session.created', { messageId: userMessage.id, sessionId: session.id }, conversationId, session.id);
99
+ activeExecutions.set(conversationId, { pid: null, startTime: Date.now(), sessionId: session.id, lastActivity: Date.now() });
100
+ queries.setIsStreaming(conversationId, true);
101
+ broadcastSync({ type: 'streaming_start', sessionId: session.id, conversationId, messageId: userMessage.id, agentId, timestamp: Date.now() });
102
+ sendJSON(req, res, 200, { message: userMessage, session, streamId: session.id });
103
+
104
+ processMessageWithStreaming(conversationId, userMessage.id, session.id, prompt, agentId, model, subAgent)
105
+ .catch(err => debugLog(`[stream] Uncaught error: ${err.stack || err.message}`));
106
+ }
107
+
108
+ async function handleGetQueue(req, res, conversationId) {
109
+ const conv = queries.getConversation(conversationId);
110
+ if (!conv) { sendJSON(req, res, 404, { error: 'Conversation not found' }); return; }
111
+ const queue = messageQueues.get(conversationId) || [];
112
+ sendJSON(req, res, 200, { queue });
113
+ }
114
+
115
+ async function handleDeleteQueueItem(req, res, conversationId, messageId) {
116
+ const queue = messageQueues.get(conversationId);
117
+ if (!queue) { sendJSON(req, res, 404, { error: 'Queue not found' }); return; }
118
+ const index = queue.findIndex(q => q.messageId === messageId);
119
+ if (index === -1) { sendJSON(req, res, 404, { error: 'Queued message not found' }); return; }
120
+ queue.splice(index, 1);
121
+ if (queue.length === 0) messageQueues.delete(conversationId);
122
+ broadcastSync({ type: 'queue_status', conversationId, queueLength: queue?.length || 0, timestamp: Date.now() });
123
+ sendJSON(req, res, 200, { deleted: true });
124
+ }
125
+
126
+ async function handlePatchQueueItem(req, res, conversationId, messageId) {
127
+ const body = await parseBody(req);
128
+ const queue = messageQueues.get(conversationId);
129
+ if (!queue) { sendJSON(req, res, 404, { error: 'Queue not found' }); return; }
130
+ const item = queue.find(q => q.messageId === messageId);
131
+ if (!item) { sendJSON(req, res, 404, { error: 'Queued message not found' }); return; }
132
+ if (body.content !== undefined) item.content = body.content;
133
+ if (body.agentId !== undefined) item.agentId = body.agentId;
134
+ broadcastSync({ type: 'queue_updated', conversationId, messageId, content: item.content, agentId: item.agentId, timestamp: Date.now() });
135
+ sendJSON(req, res, 200, { updated: true, item });
136
+ }
137
+
138
+ return routes;
139
+ }