@vellumai/assistant 0.3.7 → 0.3.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vellumai/assistant",
3
- "version": "0.3.7",
3
+ "version": "0.3.8",
4
4
  "type": "module",
5
5
  "bin": {
6
6
  "vellum": "./src/index.ts"
@@ -0,0 +1,284 @@
1
+ /**
2
+ * Tests for POST /v1/messages queue-if-busy behavior and hub publishing.
3
+ *
4
+ * Validates that:
5
+ * - Messages are accepted (202) when the session is idle, with hub events published.
6
+ * - Messages are queued (202, queued: true) when the session is busy, not 409.
7
+ * - SSE subscribers receive events from messages sent via this endpoint.
8
+ */
9
+ import { describe, test, expect, beforeEach, afterAll, mock } from 'bun:test';
10
+ import { mkdtempSync, rmSync, realpathSync } from 'node:fs';
11
+ import { tmpdir } from 'node:os';
12
+ import { join } from 'node:path';
13
+ import type { ServerMessage } from '../daemon/ipc-protocol.js';
14
+ import type { Session } from '../daemon/session.js';
15
+
16
+ const testDir = realpathSync(mkdtempSync(join(tmpdir(), 'send-endpoint-busy-test-')));
17
+
18
+ mock.module('../util/platform.js', () => ({
19
+ getRootDir: () => testDir,
20
+ getDataDir: () => testDir,
21
+ isMacOS: () => process.platform === 'darwin',
22
+ isLinux: () => process.platform === 'linux',
23
+ isWindows: () => process.platform === 'win32',
24
+ getSocketPath: () => join(testDir, 'test.sock'),
25
+ getPidPath: () => join(testDir, 'test.pid'),
26
+ getDbPath: () => join(testDir, 'test.db'),
27
+ getLogPath: () => join(testDir, 'test.log'),
28
+ ensureDataDir: () => {},
29
+ }));
30
+
31
+ mock.module('../util/logger.js', () => ({
32
+ getLogger: () => new Proxy({} as Record<string, unknown>, {
33
+ get: () => () => {},
34
+ }),
35
+ }));
36
+
37
+ mock.module('../config/loader.js', () => ({
38
+ getConfig: () => ({
39
+ model: 'test',
40
+ provider: 'test',
41
+ apiKeys: {},
42
+ memory: { enabled: false },
43
+ rateLimit: { maxRequestsPerMinute: 0, maxTokensPerSession: 0 },
44
+ secretDetection: { enabled: false },
45
+ }),
46
+ }));
47
+
48
+ import { initializeDb, getDb, resetDb } from '../memory/db.js';
49
+ import { RuntimeHttpServer } from '../runtime/http-server.js';
50
+ import { AssistantEventHub } from '../runtime/assistant-event-hub.js';
51
+ import type { AssistantEvent } from '../runtime/assistant-event.js';
52
+
53
+ initializeDb();
54
+
55
+ // ---------------------------------------------------------------------------
56
+ // Session helpers
57
+ // ---------------------------------------------------------------------------
58
+
59
+ /** Session that completes its agent loop quickly and emits a text delta + message_complete. */
60
+ function makeCompletingSession(): Session {
61
+ let processing = false;
62
+ return {
63
+ isProcessing: () => processing,
64
+ persistUserMessage: (_content: string, _attachments: unknown[], requestId?: string) => {
65
+ processing = true;
66
+ return requestId ?? 'msg-1';
67
+ },
68
+ memoryPolicy: { scopeId: 'default', includeDefaultFallback: false, strictSideEffects: false },
69
+ setChannelCapabilities: () => {},
70
+ setAssistantId: () => {},
71
+ setGuardianContext: () => {},
72
+ setCommandIntent: () => {},
73
+ updateClient: () => {},
74
+ enqueueMessage: () => ({ queued: false, requestId: 'noop' }),
75
+ runAgentLoop: async (_content: string, _messageId: string, onEvent: (msg: ServerMessage) => void) => {
76
+ onEvent({ type: 'assistant_text_delta', text: 'Hello!' });
77
+ onEvent({ type: 'message_complete', sessionId: 'test-session' });
78
+ processing = false;
79
+ },
80
+ handleConfirmationResponse: () => {},
81
+ handleSecretResponse: () => {},
82
+ } as unknown as Session;
83
+ }
84
+
85
+ /** Session that hangs forever in the agent loop (simulates a busy session). */
86
+ function makeHangingSession(): Session {
87
+ let processing = false;
88
+ const enqueuedMessages: Array<{ content: string; onEvent: (msg: ServerMessage) => void; requestId: string }> = [];
89
+ return {
90
+ isProcessing: () => processing,
91
+ persistUserMessage: (_content: string, _attachments: unknown[], requestId?: string) => {
92
+ processing = true;
93
+ return requestId ?? 'msg-1';
94
+ },
95
+ memoryPolicy: { scopeId: 'default', includeDefaultFallback: false, strictSideEffects: false },
96
+ setChannelCapabilities: () => {},
97
+ setAssistantId: () => {},
98
+ setGuardianContext: () => {},
99
+ setCommandIntent: () => {},
100
+ updateClient: () => {},
101
+ enqueueMessage: (content: string, _attachments: unknown[], onEvent: (msg: ServerMessage) => void, requestId: string) => {
102
+ enqueuedMessages.push({ content, onEvent, requestId });
103
+ return { queued: true, requestId };
104
+ },
105
+ runAgentLoop: async () => {
106
+ // Hang forever
107
+ await new Promise<void>(() => {});
108
+ },
109
+ handleConfirmationResponse: () => {},
110
+ handleSecretResponse: () => {},
111
+ _enqueuedMessages: enqueuedMessages,
112
+ } as unknown as Session;
113
+ }
114
+
115
+ // ---------------------------------------------------------------------------
116
+ // Tests
117
+ // ---------------------------------------------------------------------------
118
+
119
+ const TEST_TOKEN = 'test-bearer-token-send';
120
+ const AUTH_HEADERS = { Authorization: `Bearer ${TEST_TOKEN}` };
121
+
122
+ describe('POST /v1/messages — queue-if-busy and hub publishing', () => {
123
+ let server: RuntimeHttpServer;
124
+ let port: number;
125
+ let eventHub: AssistantEventHub;
126
+
127
+ beforeEach(() => {
128
+ const db = getDb();
129
+ db.run('DELETE FROM messages');
130
+ db.run('DELETE FROM conversations');
131
+ db.run('DELETE FROM conversation_keys');
132
+ eventHub = new AssistantEventHub();
133
+ });
134
+
135
+ afterAll(() => {
136
+ resetDb();
137
+ try { rmSync(testDir, { recursive: true, force: true }); } catch { /* best effort */ }
138
+ });
139
+
140
+ async function startServer(sessionFactory: () => Session): Promise<void> {
141
+ port = 19000 + Math.floor(Math.random() * 1000);
142
+ server = new RuntimeHttpServer({
143
+ port,
144
+ bearerToken: TEST_TOKEN,
145
+ sendMessageDeps: {
146
+ getOrCreateSession: async () => sessionFactory(),
147
+ assistantEventHub: eventHub,
148
+ resolveAttachments: () => [],
149
+ },
150
+ });
151
+ await server.start();
152
+ }
153
+
154
+ async function stopServer(): Promise<void> {
155
+ await server?.stop();
156
+ }
157
+
158
+ function messagesUrl(): string {
159
+ return `http://127.0.0.1:${port}/v1/messages`;
160
+ }
161
+
162
+ // ── Idle session: immediate processing ──────────────────────────────
163
+
164
+ test('returns 202 with accepted: true and messageId when session is idle', async () => {
165
+ await startServer(() => makeCompletingSession());
166
+
167
+ const res = await fetch(messagesUrl(), {
168
+ method: 'POST',
169
+ headers: { 'Content-Type': 'application/json', ...AUTH_HEADERS },
170
+ body: JSON.stringify({ conversationKey: 'conv-idle', content: 'Hello', sourceChannel: 'macos' }),
171
+ });
172
+ const body = await res.json() as { accepted: boolean; messageId: string };
173
+
174
+ expect(res.status).toBe(202);
175
+ expect(body.accepted).toBe(true);
176
+ expect(body.messageId).toBeDefined();
177
+
178
+ await stopServer();
179
+ });
180
+
181
+ test('publishes events to assistantEventHub when session is idle', async () => {
182
+ const publishedEvents: AssistantEvent[] = [];
183
+
184
+ await startServer(() => makeCompletingSession());
185
+
186
+ eventHub.subscribe(
187
+ { assistantId: 'self' },
188
+ (event) => { publishedEvents.push(event); },
189
+ );
190
+
191
+ const res = await fetch(messagesUrl(), {
192
+ method: 'POST',
193
+ headers: { 'Content-Type': 'application/json', ...AUTH_HEADERS },
194
+ body: JSON.stringify({ conversationKey: 'conv-hub', content: 'Hello hub', sourceChannel: 'macos' }),
195
+ });
196
+ expect(res.status).toBe(202);
197
+
198
+ // Wait for the async agent loop to complete and events to be published
199
+ await new Promise((r) => setTimeout(r, 100));
200
+
201
+ // Should have received assistant_text_delta and message_complete
202
+ const types = publishedEvents.map((e) => e.message.type);
203
+ expect(types).toContain('assistant_text_delta');
204
+ expect(types).toContain('message_complete');
205
+
206
+ await stopServer();
207
+ });
208
+
209
+ // ── Busy session: queue-if-busy ─────────────────────────────────────
210
+
211
+ test('returns 202 with queued: true when session is busy (not 409)', async () => {
212
+ const session = makeHangingSession();
213
+ await startServer(() => session);
214
+
215
+ // First message starts the agent loop and makes the session busy
216
+ const res1 = await fetch(messagesUrl(), {
217
+ method: 'POST',
218
+ headers: { 'Content-Type': 'application/json', ...AUTH_HEADERS },
219
+ body: JSON.stringify({ conversationKey: 'conv-busy', content: 'First', sourceChannel: 'macos' }),
220
+ });
221
+ expect(res1.status).toBe(202);
222
+ const body1 = await res1.json() as { accepted: boolean; messageId: string };
223
+ expect(body1.accepted).toBe(true);
224
+ expect(body1.messageId).toBeDefined();
225
+
226
+ // Wait for the agent loop to start
227
+ await new Promise((r) => setTimeout(r, 30));
228
+
229
+ // Second message should be queued, not rejected
230
+ const res2 = await fetch(messagesUrl(), {
231
+ method: 'POST',
232
+ headers: { 'Content-Type': 'application/json', ...AUTH_HEADERS },
233
+ body: JSON.stringify({ conversationKey: 'conv-busy', content: 'Second', sourceChannel: 'macos' }),
234
+ });
235
+ const body2 = await res2.json() as { accepted: boolean; queued: boolean };
236
+
237
+ expect(res2.status).toBe(202);
238
+ expect(body2.accepted).toBe(true);
239
+ expect(body2.queued).toBe(true);
240
+
241
+ await stopServer();
242
+ });
243
+
244
+ // ── Validation ──────────────────────────────────────────────────────
245
+
246
+ test('returns 400 when sourceChannel is missing', async () => {
247
+ await startServer(() => makeCompletingSession());
248
+
249
+ const res = await fetch(messagesUrl(), {
250
+ method: 'POST',
251
+ headers: { 'Content-Type': 'application/json', ...AUTH_HEADERS },
252
+ body: JSON.stringify({ conversationKey: 'conv-val', content: 'Hello' }),
253
+ });
254
+ expect(res.status).toBe(400);
255
+
256
+ await stopServer();
257
+ });
258
+
259
+ test('returns 400 when content is empty', async () => {
260
+ await startServer(() => makeCompletingSession());
261
+
262
+ const res = await fetch(messagesUrl(), {
263
+ method: 'POST',
264
+ headers: { 'Content-Type': 'application/json', ...AUTH_HEADERS },
265
+ body: JSON.stringify({ conversationKey: 'conv-empty', content: '', sourceChannel: 'macos' }),
266
+ });
267
+ expect(res.status).toBe(400);
268
+
269
+ await stopServer();
270
+ });
271
+
272
+ test('returns 400 when conversationKey is missing', async () => {
273
+ await startServer(() => makeCompletingSession());
274
+
275
+ const res = await fetch(messagesUrl(), {
276
+ method: 'POST',
277
+ headers: { 'Content-Type': 'application/json', ...AUTH_HEADERS },
278
+ body: JSON.stringify({ content: 'Hello', sourceChannel: 'macos' }),
279
+ });
280
+ expect(res.status).toBe(400);
281
+
282
+ await stopServer();
283
+ });
284
+ });
@@ -397,8 +397,8 @@ describe('SubagentManager sendMessage validation', () => {
397
397
  const subagentId = 'sub-1';
398
398
  injectFakeSubagent(manager, subagentId, makeState(subagentId));
399
399
 
400
- expect(manager.sendMessage(subagentId, '')).toBe(false);
401
- expect(manager.sendMessage(subagentId, ' ')).toBe(false);
402
- expect(manager.sendMessage(subagentId, '\n\t')).toBe(false);
400
+ expect(manager.sendMessage(subagentId, '')).toBe('empty');
401
+ expect(manager.sendMessage(subagentId, ' ')).toBe('empty');
402
+ expect(manager.sendMessage(subagentId, '\n\t')).toBe('empty');
403
403
  });
404
404
  });
@@ -34,11 +34,11 @@ Preprocess a video asset: detect dead time via mpdecimate, segment the video int
34
34
 
35
35
  Parameters:
36
36
  - `asset_id` (required) — ID of the media asset.
37
- - `interval_seconds` — Interval between keyframes (default: 3s).
38
- - `segment_duration` — Duration of each segment window (default: 20s).
37
+ - `interval_seconds` — Interval between keyframes (default: 1s). Use 0.5s for sports/action content where frame density matters.
38
+ - `segment_duration` — Duration of each segment window (default: 15s).
39
39
  - `dead_time_threshold` — Sensitivity for dead-time detection (default: 0.02).
40
40
  - `section_config` — Path to a JSON file with manual section boundaries.
41
- - `skip_dead_time` — Whether to detect and skip dead time (default: true).
41
+ - `skip_dead_time` — Whether to detect and skip dead time (default: false). Dead-time detection can be too aggressive for continuous action video like sports — it may incorrectly skip live play. Enable only for content with clear idle periods (e.g., lectures, surveillance footage).
42
42
  - `short_edge` — Short edge resolution for downscaled frames in pixels (default: 480).
43
43
 
44
44
  ### analyze_keyframes
@@ -74,7 +74,7 @@ Get a diagnostic report for a media asset. Returns:
74
74
  - **Processing stats**: total keyframes extracted.
75
75
  - **Per-stage status and timing**: which stages (preprocess, map, reduce) have run, how long each took, current progress.
76
76
  - **Failure reasons**: last error from any failed stage.
77
- - **Cost estimation**: based on segment count and Gemini 2.5 Flash pricing, plus a note about Claude reduce costs.
77
+ - **Cost estimation**: based on segment count and current Gemini pricing.
78
78
 
79
79
  ## Services
80
80
 
@@ -110,6 +110,82 @@ Limits concurrent API calls during the Map phase to avoid rate limiting.
110
110
 
111
111
  Tracks estimated API costs during pipeline execution.
112
112
 
113
+ ## Best Practices
114
+
115
+ ### Map Prompt Strategy: Go Broad, Not Targeted
116
+
117
+ The single most important insight: **always use a broad, descriptive map prompt** instead of a targeted one.
118
+
119
+ A targeted prompt like "find turnovers" locks you into one topic. If the user later wants to ask about defense, formations, or specific players, you'd need to reprocess the entire video. Instead, run a general-purpose descriptive prompt that captures everything visible, creating a rich, reusable dataset. Then all follow-up questions can be handled via `query_media` with no reprocessing.
120
+
121
+ **One map run, many queries.**
122
+
123
+ The map output will be larger (more tokens per segment), but Gemini Flash is cheap enough that this is a good tradeoff. Only use a targeted prompt if the user explicitly asks for something narrow.
124
+
125
+ #### Sample General-Purpose Map Prompt
126
+
127
+ Use this as a starting point for the `system_prompt` parameter in `analyze_keyframes`:
128
+
129
+ ```
130
+ You are analyzing keyframes from a video. For each segment, describe everything you can observe:
131
+
132
+ - People visible: count, positions, identifying features (jersey numbers, clothing, names if visible)
133
+ - Actions and movements: what people are doing, direction of movement, interactions
134
+ - Objects of interest: ball location, equipment, vehicles, on-screen graphics
135
+ - Environment: setting, lighting, weather if outdoors
136
+ - Text on screen: scores, captions, titles, signs, timestamps
137
+ - Scene composition: camera angle, zoom level, any transitions between shots
138
+ - Any stoppages, pauses, or changes in activity
139
+
140
+ Be specific and factual. Describe what you see, not what you infer happened between frames.
141
+ ```
142
+
143
+ #### Sample Output Schema
144
+
145
+ ```json
146
+ {
147
+ "type": "object",
148
+ "properties": {
149
+ "scene_description": { "type": "string" },
150
+ "people": {
151
+ "type": "array",
152
+ "items": {
153
+ "type": "object",
154
+ "properties": {
155
+ "description": { "type": "string" },
156
+ "position": { "type": "string" },
157
+ "action": { "type": "string" }
158
+ }
159
+ }
160
+ },
161
+ "objects_of_interest": { "type": "array", "items": { "type": "string" } },
162
+ "on_screen_text": { "type": "array", "items": { "type": "string" } },
163
+ "camera": { "type": "string" },
164
+ "notable_events": { "type": "array", "items": { "type": "string" } }
165
+ }
166
+ }
167
+ ```
168
+
169
+ ### Clip Delivery
170
+
171
+ The `generate_clip` tool outputs clips as temporary files. These may not deliver reliably via sandbox attachments. For reliable delivery, use `host_bash` + ffmpeg to save clips to a user-specified location as a fallback.
172
+
173
+ ## Known Limitations — Vision Analysis
174
+
175
+ Gemini performs well at **spatial/descriptive analysis** from static keyframes:
176
+ - Player positions, formations, and spacing
177
+ - Jersey numbers and identifying features
178
+ - Ball location and which team has possession
179
+ - Score and on-screen text
180
+ - Camera angles and scene composition
181
+
182
+ Gemini **hallucinates when asked to detect fast temporal events** from static frames, regardless of frame density:
183
+ - Turnovers, steals, fouls, and specific plays
184
+ - Fast transitions and split-second actions
185
+ - Causality between frames (what "happened" vs. what's visible)
186
+
187
+ The model is good at describing **what is there** but bad at detecting **what happened**. Structure your map prompts and queries accordingly — ask the model to describe scenes, then use `query_media` (Claude) to reason about patterns and events across the descriptive data.
188
+
113
189
  ## Operator Runbook
114
190
 
115
191
  ### Monitoring Progress
@@ -137,16 +213,7 @@ After fixing the root cause, re-run the failed stage. The pipeline is resumable
137
213
 
138
214
  ### Cost Expectations
139
215
 
140
- The Map phase (Gemini 2.5 Flash) is the primary cost driver. Cost scales with video duration, keyframe interval, and segment size:
141
-
142
- | Video Duration | Interval | Keyframes | Segments (~10 frames each) | Estimated Map Cost |
143
- |----------------|----------|-----------|----------------------------|--------------------|
144
- | 30 min | 3s | ~600 | ~60 | ~$0.06 |
145
- | 60 min | 3s | ~1,200 | ~120 | ~$0.12 |
146
- | 90 min | 3s | ~1,800 | ~180 | ~$0.18 |
147
- | 90 min | 5s | ~1,080 | ~108 | ~$0.11 |
148
-
149
- The Reduce phase (Claude) adds a small additional cost per query. The `media_diagnostics` tool provides per-asset cost estimates.
216
+ Use `media_diagnostics` to get per-asset cost estimates. The Map phase (Gemini) is the primary cost driver it scales with video duration and keyframe interval. The Q&A phase (Claude) is negligible per query.
150
217
 
151
218
  ### Known Limitations
152
219
 
@@ -67,11 +67,11 @@
67
67
  },
68
68
  "interval_seconds": {
69
69
  "type": "number",
70
- "description": "Interval between keyframes in seconds. Default: 3"
70
+ "description": "Interval between keyframes in seconds. Default: 1. Use 0.5 for sports/action content."
71
71
  },
72
72
  "segment_duration": {
73
73
  "type": "number",
74
- "description": "Duration of each segment window in seconds. Default: 20"
74
+ "description": "Duration of each segment window in seconds. Default: 15"
75
75
  },
76
76
  "dead_time_threshold": {
77
77
  "type": "number",
@@ -83,7 +83,7 @@
83
83
  },
84
84
  "skip_dead_time": {
85
85
  "type": "boolean",
86
- "description": "Whether to detect and skip dead time. Default: true"
86
+ "description": "Whether to detect and skip dead time. Default: false. Can be too aggressive for continuous action video like sports."
87
87
  },
88
88
  "short_edge": {
89
89
  "type": "number",
@@ -355,13 +355,13 @@ export async function preprocessForAsset(
355
355
  onProgress?: (msg: string) => void,
356
356
  ): Promise<PreprocessManifest> {
357
357
  const config: PreprocessConfig = {
358
- intervalSeconds: options.intervalSeconds ?? 3,
359
- segmentDuration: options.segmentDuration ?? 20,
358
+ intervalSeconds: options.intervalSeconds ?? 1,
359
+ segmentDuration: options.segmentDuration ?? 15,
360
360
  deadTimeThreshold: options.deadTimeThreshold ?? 0.02,
361
361
  shortEdge: options.shortEdge ?? 480,
362
362
  };
363
363
 
364
- const skipDeadTime = options.skipDeadTime ?? true;
364
+ const skipDeadTime = options.skipDeadTime ?? false;
365
365
 
366
366
  const asset = getMediaAssetById(assetId);
367
367
  if (!asset) {
@@ -19,6 +19,10 @@ const DAEMON_TIMEOUT_DEFAULTS = {
19
19
  sigkillGracePeriodMs: 2000,
20
20
  };
21
21
 
22
+ function isPositiveInteger(v: unknown): v is number {
23
+ return typeof v === 'number' && Number.isInteger(v) && v > 0;
24
+ }
25
+
22
26
  /**
23
27
  * Read daemon timeout values directly from the config JSON file, bypassing
24
28
  * loadConfig() and its ensureMigratedDataDir()/ensureDataDir() side effects.
@@ -30,18 +34,15 @@ function readDaemonTimeouts(): typeof DAEMON_TIMEOUT_DEFAULTS {
30
34
  const raw = JSON.parse(readFileSync(getWorkspaceConfigPath(), 'utf-8'));
31
35
  if (raw.daemon && typeof raw.daemon === 'object') {
32
36
  return {
33
- startupSocketWaitMs:
34
- typeof raw.daemon.startupSocketWaitMs === 'number'
35
- ? raw.daemon.startupSocketWaitMs
36
- : DAEMON_TIMEOUT_DEFAULTS.startupSocketWaitMs,
37
- stopTimeoutMs:
38
- typeof raw.daemon.stopTimeoutMs === 'number'
39
- ? raw.daemon.stopTimeoutMs
40
- : DAEMON_TIMEOUT_DEFAULTS.stopTimeoutMs,
41
- sigkillGracePeriodMs:
42
- typeof raw.daemon.sigkillGracePeriodMs === 'number'
43
- ? raw.daemon.sigkillGracePeriodMs
44
- : DAEMON_TIMEOUT_DEFAULTS.sigkillGracePeriodMs,
37
+ startupSocketWaitMs: isPositiveInteger(raw.daemon.startupSocketWaitMs)
38
+ ? raw.daemon.startupSocketWaitMs
39
+ : DAEMON_TIMEOUT_DEFAULTS.startupSocketWaitMs,
40
+ stopTimeoutMs: isPositiveInteger(raw.daemon.stopTimeoutMs)
41
+ ? raw.daemon.stopTimeoutMs
42
+ : DAEMON_TIMEOUT_DEFAULTS.stopTimeoutMs,
43
+ sigkillGracePeriodMs: isPositiveInteger(raw.daemon.sigkillGracePeriodMs)
44
+ ? raw.daemon.sigkillGracePeriodMs
45
+ : DAEMON_TIMEOUT_DEFAULTS.sigkillGracePeriodMs,
45
46
  };
46
47
  }
47
48
  } catch {
@@ -109,10 +109,17 @@ export function handleSubagentMessage(
109
109
  return;
110
110
  }
111
111
 
112
- const sent = manager.sendMessage(msg.subagentId, msg.content);
112
+ const result = manager.sendMessage(msg.subagentId, msg.content);
113
113
 
114
- if (!sent) {
115
- log.warn({ subagentId: msg.subagentId }, 'Client sent message to terminal subagent');
114
+ if (result === 'queue_full') {
115
+ log.warn({ subagentId: msg.subagentId }, 'Subagent message rejected queue full');
116
+ ctx.send(socket, {
117
+ type: 'error',
118
+ message: `Subagent "${msg.subagentId}" message queue is full. Please wait for current messages to be processed.`,
119
+ category: 'queue_full',
120
+ });
121
+ } else if (result !== 'sent') {
122
+ log.warn({ subagentId: msg.subagentId, reason: result }, 'Client sent message to terminal subagent');
116
123
  ctx.send(socket, {
117
124
  type: 'error',
118
125
  message: `Subagent "${msg.subagentId}" not found or in terminal state.`,
@@ -35,6 +35,8 @@ import { QdrantManager } from '../memory/qdrant-manager.js';
35
35
  import { initQdrantClient } from '../memory/qdrant-client.js';
36
36
  import { startScheduler } from '../schedule/scheduler.js';
37
37
  import { RuntimeHttpServer } from '../runtime/http-server.js';
38
+ import { assistantEventHub } from '../runtime/assistant-event-hub.js';
39
+ import * as attachmentsStore from '../memory/attachments-store.js';
38
40
  import { getHookManager } from '../hooks/manager.js';
39
41
  import { installTemplates } from '../hooks/templates.js';
40
42
  import { installCliLaunchers } from './install-cli-launchers.js';
@@ -46,6 +48,7 @@ import { createApprovalCopyGenerator, createApprovalConversationGenerator } from
46
48
  import { initializeProvidersAndTools, registerWatcherProviders, registerMessagingProviders } from './providers-setup.js';
47
49
  import { installShutdownHandlers } from './shutdown-handlers.js';
48
50
  import { writePid, cleanupPidFile } from './daemon-control.js';
51
+ import { initPairingHandlers } from './handlers/pairing.js';
49
52
 
50
53
  // Re-export public API so existing consumers don't need to change imports
51
54
  export {
@@ -259,11 +262,24 @@ export async function runDaemon(): Promise<void> {
259
262
  interfacesDir: getInterfacesDir(),
260
263
  approvalCopyGenerator: createApprovalCopyGenerator(),
261
264
  approvalConversationGenerator: createApprovalConversationGenerator(),
265
+ sendMessageDeps: {
266
+ getOrCreateSession: (conversationId) =>
267
+ server.getSessionForMessages(conversationId),
268
+ assistantEventHub,
269
+ resolveAttachments: (attachmentIds) =>
270
+ attachmentsStore.getAttachmentsByIds(attachmentIds).map((a) => ({
271
+ id: a.id,
272
+ filename: a.originalFilename,
273
+ mimeType: a.mimeType,
274
+ data: a.dataBase64,
275
+ })),
276
+ },
262
277
  });
263
278
  try {
264
279
  await runtimeHttp.start();
265
280
  setRelayBroadcast((msg) => server.broadcast(msg));
266
281
  runtimeHttp.setPairingBroadcast((msg) => server.broadcast(msg));
282
+ initPairingHandlers(runtimeHttp.getPairingStore(), bearerToken);
267
283
  server.setHttpPort(httpPort);
268
284
  log.info({ port: httpPort, hostname }, 'Daemon startup: runtime HTTP server listening');
269
285
  } catch (err) {
@@ -819,6 +819,14 @@ export class DaemonServer {
819
819
  return { messageId };
820
820
  }
821
821
 
822
+ /**
823
+ * Expose session lookup for the POST /v1/messages handler.
824
+ * The handler manages busy-state checking and queueing itself.
825
+ */
826
+ async getSessionForMessages(conversationId: string): Promise<Session> {
827
+ return this.getOrCreateSession(conversationId, undefined, true);
828
+ }
829
+
822
830
  createRunOrchestrator(): RunOrchestrator {
823
831
  return new RunOrchestrator({
824
832
  getOrCreateSession: (conversationId, transport) =>
@@ -8,4 +8,5 @@ import type { DrizzleDb } from '../db-connection.js';
8
8
  */
9
9
  export function migrateMemorySegmentsIndexes(database: DrizzleDb): void {
10
10
  database.run(/*sql*/ `CREATE INDEX IF NOT EXISTS idx_memory_segments_scope_id ON memory_segments(scope_id)`);
11
+ database.run(/*sql*/ `DROP INDEX IF EXISTS idx_memory_segments_conversation_id`);
11
12
  }
@@ -64,7 +64,6 @@ export const memorySegments = sqliteTable('memory_segments', {
64
64
  updatedAt: integer('updated_at').notNull(),
65
65
  }, (table) => [
66
66
  index('idx_memory_segments_scope_id').on(table.scopeId),
67
- index('idx_memory_segments_conversation_id').on(table.conversationId),
68
67
  ]);
69
68
 
70
69
  export const memoryItems = sqliteTable('memory_items', {
@@ -121,6 +121,7 @@ export type {
121
121
  RuntimeAttachmentMetadata,
122
122
  ApprovalCopyGenerator,
123
123
  ApprovalConversationGenerator,
124
+ SendMessageDeps,
124
125
  } from './http-types.js';
125
126
 
126
127
  import type {
@@ -129,6 +130,7 @@ import type {
129
130
  RuntimeHttpServerOptions,
130
131
  ApprovalCopyGenerator,
131
132
  ApprovalConversationGenerator,
133
+ SendMessageDeps,
132
134
  } from './http-types.js';
133
135
 
134
136
  const log = getLogger('runtime-http');
@@ -156,6 +158,7 @@ export class RuntimeHttpServer {
156
158
  private sweepInProgress = false;
157
159
  private pairingStore = new PairingStore();
158
160
  private pairingBroadcast?: (msg: ServerMessage) => void;
161
+ private sendMessageDeps?: SendMessageDeps;
159
162
 
160
163
  constructor(options: RuntimeHttpServerOptions = {}) {
161
164
  this.port = options.port ?? DEFAULT_PORT;
@@ -167,6 +170,7 @@ export class RuntimeHttpServer {
167
170
  this.approvalCopyGenerator = options.approvalCopyGenerator;
168
171
  this.approvalConversationGenerator = options.approvalConversationGenerator;
169
172
  this.interfacesDir = options.interfacesDir ?? null;
173
+ this.sendMessageDeps = options.sendMessageDeps;
170
174
  }
171
175
 
172
176
  /** The port the server is actually listening on (resolved after start). */
@@ -558,6 +562,7 @@ export class RuntimeHttpServer {
558
562
  return await handleSendMessage(req, {
559
563
  processMessage: this.processMessage,
560
564
  persistAndProcessMessage: this.persistAndProcessMessage,
565
+ sendMessageDeps: this.sendMessageDeps,
561
566
  });
562
567
  }
563
568
 
@@ -5,6 +5,8 @@ import type { ChannelId } from '../channels/types.js';
5
5
  import type { RunOrchestrator } from './run-orchestrator.js';
6
6
  import type { GuardianRuntimeContext } from '../daemon/session-runtime-assembly.js';
7
7
  import type { ApprovalMessageContext, ComposeApprovalMessageGenerativeOptions } from './approval-message-composer.js';
8
+ import type { Session } from '../daemon/session.js';
9
+ import type { AssistantEventHub } from './assistant-event-hub.js';
8
10
 
9
11
  /**
10
12
  * Daemon-injected function that generates approval copy using a provider.
@@ -84,6 +86,24 @@ export type NonBlockingMessageProcessor = (
84
86
  sourceChannel?: ChannelId,
85
87
  ) => Promise<{ messageId: string }>;
86
88
 
89
+ /**
90
+ * Dependencies for the POST /v1/messages handler.
91
+ *
92
+ * The handler needs direct access to the session so it can check busy state,
93
+ * persist user messages, fire the agent loop, or queue messages when busy.
94
+ * Hub publishing wires outbound events to the SSE stream.
95
+ */
96
+ export interface SendMessageDeps {
97
+ getOrCreateSession: (conversationId: string) => Promise<Session>;
98
+ assistantEventHub: AssistantEventHub;
99
+ resolveAttachments: (attachmentIds: string[]) => Array<{
100
+ id: string;
101
+ filename: string;
102
+ mimeType: string;
103
+ data: string;
104
+ }>;
105
+ }
106
+
87
107
  export interface RuntimeHttpServerOptions {
88
108
  port?: number;
89
109
  /** Hostname / IP to bind to. Defaults to '127.0.0.1' (loopback-only). */
@@ -101,6 +121,8 @@ export interface RuntimeHttpServerOptions {
101
121
  approvalCopyGenerator?: ApprovalCopyGenerator;
102
122
  /** Daemon-injected generator for conversational approval flow (provider-backed). */
103
123
  approvalConversationGenerator?: ApprovalConversationGenerator;
124
+ /** Dependencies for the POST /v1/messages queue-if-busy handler. */
125
+ sendMessageDeps?: SendMessageDeps;
104
126
  }
105
127
 
106
128
  export interface RuntimeAttachmentMetadata {
@@ -18,7 +18,13 @@ import type {
18
18
  NonBlockingMessageProcessor,
19
19
  RuntimeAttachmentMetadata,
20
20
  RuntimeMessagePayload,
21
+ SendMessageDeps,
21
22
  } from '../http-types.js';
23
+ import type { ServerMessage } from '../../daemon/ipc-protocol.js';
24
+ import { buildAssistantEvent } from '../assistant-event.js';
25
+ import { getLogger } from '../../util/logger.js';
26
+
27
+ const log = getLogger('conversation-routes');
22
28
 
23
29
  const SUGGESTION_CACHE_MAX = 100;
24
30
 
@@ -134,11 +140,40 @@ export function handleListMessages(
134
140
  return Response.json({ messages });
135
141
  }
136
142
 
143
+ /**
144
+ * Build an `onEvent` callback that publishes every outbound event to the
145
+ * assistant event hub, maintaining ordered delivery through a serial chain.
146
+ */
147
+ function makeHubPublisher(
148
+ deps: SendMessageDeps,
149
+ conversationId: string,
150
+ ): (msg: ServerMessage) => void {
151
+ let hubChain: Promise<void> = Promise.resolve();
152
+ return (msg: ServerMessage) => {
153
+ const msgRecord = msg as unknown as Record<string, unknown>;
154
+ const msgSessionId =
155
+ 'sessionId' in msg && typeof msgRecord.sessionId === 'string'
156
+ ? (msgRecord.sessionId as string)
157
+ : undefined;
158
+ const resolvedSessionId = msgSessionId ?? conversationId;
159
+ const event = buildAssistantEvent('self', msg, resolvedSessionId);
160
+ hubChain = (async () => {
161
+ await hubChain;
162
+ try {
163
+ await deps.assistantEventHub.publish(event);
164
+ } catch (err) {
165
+ log.warn({ err }, 'assistant-events hub subscriber threw during POST /messages');
166
+ }
167
+ })();
168
+ };
169
+ }
170
+
137
171
  export async function handleSendMessage(
138
172
  req: Request,
139
173
  deps: {
140
174
  processMessage?: MessageProcessor;
141
175
  persistAndProcessMessage?: NonBlockingMessageProcessor;
176
+ sendMessageDeps?: SendMessageDeps;
142
177
  },
143
178
  ): Promise<Response> {
144
179
  const body = await req.json() as {
@@ -204,6 +239,47 @@ export async function handleSendMessage(
204
239
 
205
240
  const mapping = getOrCreateConversation(conversationKey);
206
241
 
242
+ // ── Queue-if-busy path (preferred when sendMessageDeps is wired) ────
243
+ if (deps.sendMessageDeps) {
244
+ const smDeps = deps.sendMessageDeps;
245
+ const session = await smDeps.getOrCreateSession(mapping.conversationId);
246
+ const onEvent = makeHubPublisher(smDeps, mapping.conversationId);
247
+
248
+ const attachments = hasAttachments
249
+ ? smDeps.resolveAttachments(attachmentIds)
250
+ : [];
251
+
252
+ if (session.isProcessing()) {
253
+ // Queue the message so it's processed when the current turn completes
254
+ const requestId = crypto.randomUUID();
255
+ const result = session.enqueueMessage(
256
+ content ?? '',
257
+ attachments,
258
+ onEvent,
259
+ requestId,
260
+ );
261
+ if (result.rejected) {
262
+ return Response.json(
263
+ { error: 'Message queue is full. Please retry later.' },
264
+ { status: 429 },
265
+ );
266
+ }
267
+ return Response.json({ accepted: true, queued: true }, { status: 202 });
268
+ }
269
+
270
+ // Session is idle — persist and fire agent loop immediately
271
+ const requestId = crypto.randomUUID();
272
+ const messageId = session.persistUserMessage(content ?? '', attachments, requestId);
273
+
274
+ // Fire-and-forget the agent loop; events flow to the hub via onEvent
275
+ session.runAgentLoop(content ?? '', messageId, onEvent).catch((err) => {
276
+ log.error({ err, conversationId: mapping.conversationId }, 'Agent loop failed (POST /messages)');
277
+ });
278
+
279
+ return Response.json({ accepted: true, messageId }, { status: 202 });
280
+ }
281
+
282
+ // ── Legacy path (fallback when sendMessageDeps not wired) ───────────
207
283
  const processor = deps.persistAndProcessMessage ?? deps.processMessage;
208
284
  if (!processor) {
209
285
  return Response.json({ error: 'Message processing not configured' }, { status: 503 });
@@ -217,7 +293,7 @@ export async function handleSendMessage(
217
293
  undefined,
218
294
  sourceChannel,
219
295
  );
220
- return Response.json({ accepted: true, messageId: result.messageId });
296
+ return Response.json({ accepted: true, messageId: result.messageId }, { status: 202 });
221
297
  } catch (err) {
222
298
  if (err instanceof Error && err.message === 'Session is already processing a message') {
223
299
  return Response.json(
@@ -343,20 +343,20 @@ export class SubagentManager {
343
343
 
344
344
  // ── Send message to subagent ──────────────────────────────────────────
345
345
 
346
- sendMessage(subagentId: string, content: string): boolean {
346
+ sendMessage(subagentId: string, content: string): 'sent' | 'empty' | 'not_found' | 'terminal' | 'queue_full' {
347
347
  const trimmed = content?.trim();
348
- if (!trimmed) return false;
348
+ if (!trimmed) return 'empty';
349
349
 
350
350
  const managed = this.subagents.get(subagentId);
351
- if (!managed) return false;
352
- if (TERMINAL_STATUSES.has(managed.state.status)) return false;
351
+ if (!managed) return 'not_found';
352
+ if (TERMINAL_STATUSES.has(managed.state.status)) return 'terminal';
353
353
 
354
354
  const onEvent = managed.session.sendToClient;
355
355
  const requestId = uuid();
356
356
 
357
357
  // If the session is busy, queue the message; otherwise process immediately.
358
358
  const result = managed.session.enqueueMessage(trimmed, [], onEvent, requestId);
359
- if (result.rejected) return false;
359
+ if (result.rejected) return 'queue_full';
360
360
  if (!result.queued) {
361
361
  // Session is idle — send directly. Fire-and-forget so we don't block.
362
362
  const messageId = managed.session.persistUserMessage(trimmed, []);
@@ -364,7 +364,7 @@ export class SubagentManager {
364
364
  log.error({ subagentId, err }, 'Subagent message processing failed');
365
365
  });
366
366
  }
367
- return true;
367
+ return 'sent';
368
368
  }
369
369
 
370
370
  // ── Queries ───────────────────────────────────────────────────────────
@@ -23,9 +23,16 @@ export async function executeSubagentMessage(
23
23
  };
24
24
  }
25
25
 
26
- const sent = manager.sendMessage(subagentId, content);
26
+ const result = manager.sendMessage(subagentId, content);
27
27
 
28
- if (!sent) {
28
+ if (result === 'queue_full') {
29
+ return {
30
+ content: `Subagent "${subagentId}" message queue is full. Please wait for current messages to be processed.`,
31
+ isError: true,
32
+ };
33
+ }
34
+
35
+ if (result !== 'sent') {
29
36
  return {
30
37
  content: `Could not send message to subagent "${subagentId}". It may not exist or be in a terminal state.`,
31
38
  isError: true,