sensorium-mcp 2.15.2 → 2.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/config.d.ts.map +1 -1
- package/dist/config.js +2 -0
- package/dist/config.js.map +1 -1
- package/dist/http-server.d.ts +16 -0
- package/dist/http-server.d.ts.map +1 -0
- package/dist/http-server.js +248 -0
- package/dist/http-server.js.map +1 -0
- package/dist/index.js +133 -1985
- package/dist/index.js.map +1 -1
- package/dist/response-builders.d.ts +34 -0
- package/dist/response-builders.d.ts.map +1 -0
- package/dist/response-builders.js +114 -0
- package/dist/response-builders.js.map +1 -0
- package/dist/stdio-server.d.ts +8 -0
- package/dist/stdio-server.d.ts.map +1 -0
- package/dist/stdio-server.js +23 -0
- package/dist/stdio-server.js.map +1 -0
- package/dist/tools/memory-tools.d.ts +36 -0
- package/dist/tools/memory-tools.d.ts.map +1 -0
- package/dist/tools/memory-tools.js +352 -0
- package/dist/tools/memory-tools.js.map +1 -0
- package/dist/tools/session-tools.d.ts +46 -0
- package/dist/tools/session-tools.d.ts.map +1 -0
- package/dist/tools/session-tools.js +255 -0
- package/dist/tools/session-tools.js.map +1 -0
- package/dist/tools/start-session-tool.d.ts +43 -0
- package/dist/tools/start-session-tool.d.ts.map +1 -0
- package/dist/tools/start-session-tool.js +188 -0
- package/dist/tools/start-session-tool.js.map +1 -0
- package/dist/tools/utility-tools.d.ts +34 -0
- package/dist/tools/utility-tools.d.ts.map +1 -0
- package/dist/tools/utility-tools.js +256 -0
- package/dist/tools/utility-tools.js.map +1 -0
- package/dist/tools/wait-tool.d.ts +69 -0
- package/dist/tools/wait-tool.d.ts.map +1 -0
- package/dist/tools/wait-tool.js +702 -0
- package/dist/tools/wait-tool.js.map +1 -0
- package/dist/types.d.ts +3 -0
- package/dist/types.d.ts.map +1 -1
- package/package.json +1 -1
|
@@ -0,0 +1,702 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* remote_copilot_wait_for_instructions tool handler extracted from index.ts.
|
|
3
|
+
*
|
|
4
|
+
* This is the core long-polling loop that:
|
|
5
|
+
* - Polls the dispatcher for new operator messages every 2s
|
|
6
|
+
* - Processes all media types: text, photo, document, voice, video_note
|
|
7
|
+
* - Runs voice analysis (transcription + emotion via VANPY)
|
|
8
|
+
* - Auto-saves episodes to memory
|
|
9
|
+
* - Injects relevant memory context via GPT-4o-mini smart filter
|
|
10
|
+
* - Checks scheduled tasks during idle polling
|
|
11
|
+
* - Triggers auto-consolidation (idle, episode-count, time-based)
|
|
12
|
+
* - Sends SSE keepalive pings every 30s
|
|
13
|
+
* - Detects maintenance flags and instructs agent to wait externally
|
|
14
|
+
* - Activates the Dispatcher drive after extended operator silence
|
|
15
|
+
*/
|
|
16
|
+
import { basename } from "node:path";
|
|
17
|
+
import { checkMaintenanceFlag, saveFileToDisk } from "../config.js";
|
|
18
|
+
import { peekThreadMessages, readThreadMessages } from "../dispatcher.js";
|
|
19
|
+
import { formatDrivePrompt } from "../drive.js";
|
|
20
|
+
import { assembleCompactRefresh, runIntelligentConsolidation, saveEpisode, saveVoiceSignature, searchByEmbedding, searchSemanticNotesRanked, } from "../memory.js";
|
|
21
|
+
import { analyzeVideoFrames, analyzeVoiceEmotion, chatCompletion, extractVideoFrames, generateEmbedding, transcribeAudio, } from "../openai.js";
|
|
22
|
+
import { checkDueTasks, listSchedules } from "../scheduler.js";
|
|
23
|
+
import { errorMessage, IMAGE_EXTENSIONS } from "../utils.js";
|
|
24
|
+
import { extractSearchKeywords, buildAnalysisTags, getReminders, getShortReminder } from "../response-builders.js";
|
|
25
|
+
import { backfillEmbeddings } from "./memory-tools.js";
|
|
26
|
+
// ---------------------------------------------------------------------------
|
|
27
|
+
// Handler
|
|
28
|
+
// ---------------------------------------------------------------------------
|
|
29
|
+
export async function handleWaitForInstructions(args, ctx, extra) {
|
|
30
|
+
const { state, telegram, telegramChatId, config, getMemoryDb } = ctx;
|
|
31
|
+
const { OPENAI_API_KEY, VOICE_ANALYSIS_URL, WAIT_TIMEOUT_MINUTES, AUTONOMOUS_MODE } = config;
|
|
32
|
+
// Agent is actively polling — this is the primary health signal
|
|
33
|
+
state.deadSessionAlerted = false;
|
|
34
|
+
state.toolCallsSinceLastDelivery = 0;
|
|
35
|
+
const effectiveThreadId = ctx.resolveThreadId(args);
|
|
36
|
+
if (effectiveThreadId === undefined) {
|
|
37
|
+
return ctx.errorResult("Error: No active session. Call start_session first, then pass the returned threadId to this tool.");
|
|
38
|
+
}
|
|
39
|
+
const callNumber = ++state.waitCallCount;
|
|
40
|
+
const timeoutMs = WAIT_TIMEOUT_MINUTES * 60 * 1000;
|
|
41
|
+
const deadline = Date.now() + timeoutMs;
|
|
42
|
+
// Poll the dispatcher's per-thread file instead of calling getUpdates
|
|
43
|
+
// directly. This avoids 409 conflicts between concurrent instances.
|
|
44
|
+
const POLL_INTERVAL_MS = 2000;
|
|
45
|
+
const SSE_KEEPALIVE_INTERVAL_MS = 30_000;
|
|
46
|
+
let lastScheduleCheck = 0;
|
|
47
|
+
let lastKeepalive = Date.now();
|
|
48
|
+
while (Date.now() < deadline) {
|
|
49
|
+
// Check for pending update — tell agent to wait externally via Desktop Commander
|
|
50
|
+
// CRITICAL: Do NOT tell agents to call hibernate or any MCP tool here — the server
|
|
51
|
+
// is about to die. Agents must use an external sleep (PowerShell Start-Sleep) instead.
|
|
52
|
+
const maintenanceInfo = checkMaintenanceFlag();
|
|
53
|
+
if (maintenanceInfo) {
|
|
54
|
+
process.stderr.write(`[wait] Maintenance flag detected: ${maintenanceInfo}\n`);
|
|
55
|
+
return {
|
|
56
|
+
content: [{
|
|
57
|
+
type: "text",
|
|
58
|
+
text: `⚠️ **Server update pending** (${maintenanceInfo}). ` +
|
|
59
|
+
`The MCP server will restart shortly. Use Desktop Commander to run: ` +
|
|
60
|
+
`Start-Sleep -Seconds 180 — then call start_session with threadId=${effectiveThreadId} to reconnect.` +
|
|
61
|
+
getShortReminder(effectiveThreadId, state.sessionStartedAt),
|
|
62
|
+
}],
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
// Peek first (non-destructive) to avoid consuming messages when the
|
|
66
|
+
// SSE connection may be dead.
|
|
67
|
+
const peeked = peekThreadMessages(effectiveThreadId);
|
|
68
|
+
if (peeked.length > 0) {
|
|
69
|
+
// Verify SSE connection is alive BEFORE consuming messages.
|
|
70
|
+
// This prevents the destructive readThreadMessages from eating
|
|
71
|
+
// messages that can never be delivered to a dead connection.
|
|
72
|
+
if (extra.signal.aborted) {
|
|
73
|
+
process.stderr.write(`[wait] SSE connection aborted before consuming ${peeked.length} messages — leaving in queue.\n`);
|
|
74
|
+
return {
|
|
75
|
+
content: [{
|
|
76
|
+
type: "text",
|
|
77
|
+
text: "The connection was interrupted. Messages are preserved for the next call.",
|
|
78
|
+
}],
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
// Connection alive — now consume messages for real.
|
|
82
|
+
const stored = readThreadMessages(effectiveThreadId);
|
|
83
|
+
process.stderr.write(`[wait] Read ${stored.length} messages from thread ${effectiveThreadId}. Processing...\n`);
|
|
84
|
+
// Update the operator activity timestamp for idle detection.
|
|
85
|
+
state.lastOperatorMessageAt = Date.now();
|
|
86
|
+
// Clear only the consumed IDs from the previewed set (scoped clear).
|
|
87
|
+
// This is safe because Node.js is single-threaded — no report_progress
|
|
88
|
+
// call can interleave between readThreadMessages and this cleanup.
|
|
89
|
+
for (const msg of stored) {
|
|
90
|
+
state.previewedUpdateIds.delete(msg.update_id);
|
|
91
|
+
}
|
|
92
|
+
// React with 👀 on each consumed message to signal "seen" to the operator.
|
|
93
|
+
for (const msg of stored) {
|
|
94
|
+
void telegram.setMessageReaction(telegramChatId, msg.message.message_id).catch(() => { });
|
|
95
|
+
}
|
|
96
|
+
const contentBlocks = [];
|
|
97
|
+
let hasVoiceMessages = false;
|
|
98
|
+
// Track which messages already had episodes saved (voice/video handlers)
|
|
99
|
+
const savedEpisodeUpdateIds = new Set();
|
|
100
|
+
for (const msg of stored) {
|
|
101
|
+
// Photos: download the largest size, persist to disk, and embed as base64.
|
|
102
|
+
if (msg.message.photo && msg.message.photo.length > 0) {
|
|
103
|
+
const largest = msg.message.photo[msg.message.photo.length - 1];
|
|
104
|
+
try {
|
|
105
|
+
const { buffer, filePath: telegramPath } = await telegram.downloadFileAsBuffer(largest.file_id);
|
|
106
|
+
const ext = telegramPath.split(".").pop()?.toLowerCase() ?? "jpg";
|
|
107
|
+
const mimeType = ext === "png" ? "image/png" : ext === "webp" ? "image/webp" : "image/jpeg";
|
|
108
|
+
const base64 = buffer.toString("base64");
|
|
109
|
+
const diskPath = saveFileToDisk(buffer, `photo.${ext}`);
|
|
110
|
+
contentBlocks.push({ type: "image", data: base64, mimeType });
|
|
111
|
+
contentBlocks.push({
|
|
112
|
+
type: "text",
|
|
113
|
+
text: `[Photo saved to: ${diskPath}]` +
|
|
114
|
+
(msg.message.caption ? ` Caption: ${msg.message.caption}` : ""),
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
catch (err) {
|
|
118
|
+
contentBlocks.push({
|
|
119
|
+
type: "text",
|
|
120
|
+
text: `[Photo received but could not be downloaded: ${errorMessage(err)}]`,
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
// Documents: download, persist to disk, and embed as base64.
|
|
125
|
+
if (msg.message.document) {
|
|
126
|
+
const doc = msg.message.document;
|
|
127
|
+
try {
|
|
128
|
+
const { buffer, filePath: telegramPath } = await telegram.downloadFileAsBuffer(doc.file_id);
|
|
129
|
+
const filename = doc.file_name ?? basename(telegramPath);
|
|
130
|
+
const ext = filename.split(".").pop()?.toLowerCase() ?? "";
|
|
131
|
+
const mimeType = doc.mime_type ?? (IMAGE_EXTENSIONS.has(ext) ? `image/${ext === "jpg" ? "jpeg" : ext}` : "application/octet-stream");
|
|
132
|
+
const base64 = buffer.toString("base64");
|
|
133
|
+
const diskPath = saveFileToDisk(buffer, filename);
|
|
134
|
+
const isImage = mimeType.startsWith("image/");
|
|
135
|
+
if (isImage) {
|
|
136
|
+
contentBlocks.push({ type: "image", data: base64, mimeType });
|
|
137
|
+
contentBlocks.push({
|
|
138
|
+
type: "text",
|
|
139
|
+
text: `[File saved to: ${diskPath}]` +
|
|
140
|
+
(msg.message.caption ? ` Caption: ${msg.message.caption}` : ""),
|
|
141
|
+
});
|
|
142
|
+
}
|
|
143
|
+
else {
|
|
144
|
+
// Non-image documents: provide the disk path instead of
|
|
145
|
+
// dumping potentially huge base64 into the LLM context.
|
|
146
|
+
contentBlocks.push({
|
|
147
|
+
type: "text",
|
|
148
|
+
text: `[Document: ${filename} (${mimeType}) — saved to: ${diskPath}]` +
|
|
149
|
+
(msg.message.caption ? ` Caption: ${msg.message.caption}` : ""),
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
catch (err) {
|
|
154
|
+
contentBlocks.push({
|
|
155
|
+
type: "text",
|
|
156
|
+
text: `[Document "${doc.file_name ?? "file"}" received but could not be downloaded: ${errorMessage(err)}]`,
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
// Text messages.
|
|
161
|
+
if (msg.message.text) {
|
|
162
|
+
contentBlocks.push({ type: "text", text: msg.message.text });
|
|
163
|
+
}
|
|
164
|
+
// Voice messages: transcribe using OpenAI Whisper.
|
|
165
|
+
if (msg.message.voice) {
|
|
166
|
+
hasVoiceMessages = true;
|
|
167
|
+
if (OPENAI_API_KEY) {
|
|
168
|
+
try {
|
|
169
|
+
process.stderr.write(`[voice] Downloading voice file ${msg.message.voice.file_id}...\n`);
|
|
170
|
+
const { buffer } = await telegram.downloadFileAsBuffer(msg.message.voice.file_id);
|
|
171
|
+
process.stderr.write(`[voice] Downloaded ${buffer.length} bytes. Starting transcription + analysis...\n`);
|
|
172
|
+
// Run transcription and voice analysis in parallel.
|
|
173
|
+
const [transcript, analysis] = await Promise.all([
|
|
174
|
+
transcribeAudio(buffer, OPENAI_API_KEY),
|
|
175
|
+
VOICE_ANALYSIS_URL
|
|
176
|
+
? analyzeVoiceEmotion(buffer, VOICE_ANALYSIS_URL)
|
|
177
|
+
: Promise.resolve(null),
|
|
178
|
+
]);
|
|
179
|
+
// Build rich voice analysis tag from VANPY results.
|
|
180
|
+
const tags = buildAnalysisTags(analysis);
|
|
181
|
+
const analysisTag = tags.length > 0 ? ` | ${tags.join(", ")}` : "";
|
|
182
|
+
contentBlocks.push({
|
|
183
|
+
type: "text",
|
|
184
|
+
text: transcript
|
|
185
|
+
? `[Voice message — ${msg.message.voice.duration}s${analysisTag}, transcribed]: ${transcript}`
|
|
186
|
+
: `[Voice message — ${msg.message.voice.duration}s${analysisTag}, transcribed]: (empty — no speech detected)`,
|
|
187
|
+
});
|
|
188
|
+
// Auto-save voice signature
|
|
189
|
+
if (analysis && effectiveThreadId !== undefined) {
|
|
190
|
+
try {
|
|
191
|
+
const db = getMemoryDb();
|
|
192
|
+
const sessionId = `session_${state.sessionStartedAt}`;
|
|
193
|
+
const epId = saveEpisode(db, {
|
|
194
|
+
sessionId,
|
|
195
|
+
threadId: effectiveThreadId,
|
|
196
|
+
type: "operator_message",
|
|
197
|
+
modality: "voice",
|
|
198
|
+
content: { text: transcript ?? "", duration: msg.message.voice.duration },
|
|
199
|
+
importance: 0.6,
|
|
200
|
+
});
|
|
201
|
+
saveVoiceSignature(db, {
|
|
202
|
+
episodeId: epId,
|
|
203
|
+
emotion: analysis.emotion ?? undefined,
|
|
204
|
+
arousal: analysis.arousal ?? undefined,
|
|
205
|
+
dominance: analysis.dominance ?? undefined,
|
|
206
|
+
valence: analysis.valence ?? undefined,
|
|
207
|
+
speechRate: analysis.paralinguistics?.speech_rate ?? undefined,
|
|
208
|
+
meanPitchHz: analysis.paralinguistics?.mean_pitch_hz ?? undefined,
|
|
209
|
+
pitchStdHz: analysis.paralinguistics?.pitch_std_hz ?? undefined,
|
|
210
|
+
jitter: analysis.paralinguistics?.jitter ?? undefined,
|
|
211
|
+
shimmer: analysis.paralinguistics?.shimmer ?? undefined,
|
|
212
|
+
hnrDb: analysis.paralinguistics?.hnr_db ?? undefined,
|
|
213
|
+
audioEvents: analysis.audio_events?.map(e => ({ label: e.label, confidence: e.score })),
|
|
214
|
+
durationSec: msg.message.voice.duration,
|
|
215
|
+
});
|
|
216
|
+
savedEpisodeUpdateIds.add(msg.update_id);
|
|
217
|
+
}
|
|
218
|
+
catch (_) { /* non-fatal */ }
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
catch (err) {
|
|
222
|
+
contentBlocks.push({
|
|
223
|
+
type: "text",
|
|
224
|
+
text: `[Voice message — ${msg.message.voice.duration}s — transcription failed: ${errorMessage(err)}]`,
|
|
225
|
+
});
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
else {
|
|
229
|
+
contentBlocks.push({
|
|
230
|
+
type: "text",
|
|
231
|
+
text: `[Voice message received — ${msg.message.voice.duration}s — cannot transcribe: OPENAI_API_KEY not set]`,
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
// Video notes (circle videos): extract frames, analyze with GPT-4.1 vision,
|
|
236
|
+
// optionally transcribe the audio track.
|
|
237
|
+
if (msg.message.video_note) {
|
|
238
|
+
hasVoiceMessages = true; // Video notes often contain speech
|
|
239
|
+
const vn = msg.message.video_note;
|
|
240
|
+
if (OPENAI_API_KEY) {
|
|
241
|
+
try {
|
|
242
|
+
process.stderr.write(`[video-note] Downloading circle video ${vn.file_id} (${vn.duration}s)...\n`);
|
|
243
|
+
const { buffer } = await telegram.downloadFileAsBuffer(vn.file_id);
|
|
244
|
+
process.stderr.write(`[video-note] Downloaded ${buffer.length} bytes. Extracting frames + transcribing...\n`);
|
|
245
|
+
// Run frame extraction, audio transcription, and voice analysis in parallel.
|
|
246
|
+
const [frames, transcript, analysis] = await Promise.all([
|
|
247
|
+
extractVideoFrames(buffer, vn.duration).catch((err) => {
|
|
248
|
+
process.stderr.write(`[video-note] Frame extraction failed: ${errorMessage(err)}\n`);
|
|
249
|
+
return [];
|
|
250
|
+
}),
|
|
251
|
+
transcribeAudio(buffer, OPENAI_API_KEY, "video.mp4").catch(() => ""),
|
|
252
|
+
VOICE_ANALYSIS_URL
|
|
253
|
+
? analyzeVoiceEmotion(buffer, VOICE_ANALYSIS_URL, {
|
|
254
|
+
mimeType: "video/mp4",
|
|
255
|
+
filename: "video.mp4",
|
|
256
|
+
}).catch(() => null)
|
|
257
|
+
: Promise.resolve(null),
|
|
258
|
+
]);
|
|
259
|
+
// Analyze frames with GPT-4.1 vision.
|
|
260
|
+
let sceneDescription = "";
|
|
261
|
+
if (frames.length > 0) {
|
|
262
|
+
process.stderr.write(`[video-note] Analyzing ${frames.length} frames with GPT-4.1 vision...\n`);
|
|
263
|
+
sceneDescription = await analyzeVideoFrames(frames, vn.duration, OPENAI_API_KEY);
|
|
264
|
+
process.stderr.write(`[video-note] Vision analysis complete.\n`);
|
|
265
|
+
}
|
|
266
|
+
// Build analysis tags (same as voice messages).
|
|
267
|
+
const tags = buildAnalysisTags(analysis);
|
|
268
|
+
const analysisTag = tags.length > 0 ? ` | ${tags.join(", ")}` : "";
|
|
269
|
+
const parts = [];
|
|
270
|
+
parts.push(`[Video note — ${vn.duration}s${analysisTag}]`);
|
|
271
|
+
if (sceneDescription)
|
|
272
|
+
parts.push(`Scene: ${sceneDescription}`);
|
|
273
|
+
if (transcript)
|
|
274
|
+
parts.push(`Audio: "${transcript}"`);
|
|
275
|
+
if (!sceneDescription && !transcript)
|
|
276
|
+
parts.push("(no visual or audio content could be extracted)");
|
|
277
|
+
contentBlocks.push({ type: "text", text: parts.join("\n") });
|
|
278
|
+
// Auto-save voice signature for video notes
|
|
279
|
+
if (analysis && effectiveThreadId !== undefined) {
|
|
280
|
+
try {
|
|
281
|
+
const db = getMemoryDb();
|
|
282
|
+
const sessionId = `session_${state.sessionStartedAt}`;
|
|
283
|
+
const epId = saveEpisode(db, {
|
|
284
|
+
sessionId,
|
|
285
|
+
threadId: effectiveThreadId,
|
|
286
|
+
type: "operator_message",
|
|
287
|
+
modality: "video_note",
|
|
288
|
+
content: { text: transcript ?? "", scene: sceneDescription ?? "", duration: vn.duration },
|
|
289
|
+
importance: 0.6,
|
|
290
|
+
});
|
|
291
|
+
saveVoiceSignature(db, {
|
|
292
|
+
episodeId: epId,
|
|
293
|
+
emotion: analysis.emotion ?? undefined,
|
|
294
|
+
arousal: analysis.arousal ?? undefined,
|
|
295
|
+
dominance: analysis.dominance ?? undefined,
|
|
296
|
+
valence: analysis.valence ?? undefined,
|
|
297
|
+
speechRate: analysis.paralinguistics?.speech_rate ?? undefined,
|
|
298
|
+
meanPitchHz: analysis.paralinguistics?.mean_pitch_hz ?? undefined,
|
|
299
|
+
pitchStdHz: analysis.paralinguistics?.pitch_std_hz ?? undefined,
|
|
300
|
+
jitter: analysis.paralinguistics?.jitter ?? undefined,
|
|
301
|
+
shimmer: analysis.paralinguistics?.shimmer ?? undefined,
|
|
302
|
+
hnrDb: analysis.paralinguistics?.hnr_db ?? undefined,
|
|
303
|
+
audioEvents: analysis.audio_events?.map(e => ({ label: e.label, confidence: e.score })),
|
|
304
|
+
durationSec: vn.duration,
|
|
305
|
+
});
|
|
306
|
+
savedEpisodeUpdateIds.add(msg.update_id);
|
|
307
|
+
}
|
|
308
|
+
catch (_) { /* non-fatal */ }
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
catch (err) {
|
|
312
|
+
contentBlocks.push({
|
|
313
|
+
type: "text",
|
|
314
|
+
text: `[Video note — ${vn.duration}s — analysis failed: ${errorMessage(err)}]`,
|
|
315
|
+
});
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
else {
|
|
319
|
+
contentBlocks.push({
|
|
320
|
+
type: "text",
|
|
321
|
+
text: `[Video note received — ${vn.duration}s — cannot analyze: OPENAI_API_KEY not set]`,
|
|
322
|
+
});
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
if (contentBlocks.length === 0) {
|
|
327
|
+
const msgKeys = stored.map(m => Object.keys(m.message).filter(k => m.message[k] != null).join(",")).join(" | ");
|
|
328
|
+
process.stderr.write(`[wait] No content blocks from ${stored.length} messages. Fields: ${msgKeys}\n`);
|
|
329
|
+
contentBlocks.push({
|
|
330
|
+
type: "text",
|
|
331
|
+
text: "[Unsupported message type received — the operator sent a message type that cannot be processed (e.g., sticker, location, contact). Please ask them to resend as text, photo, document, or voice.]",
|
|
332
|
+
});
|
|
333
|
+
}
|
|
334
|
+
process.stderr.write(`[wait] ${contentBlocks.length} content blocks built. Saving episodes...\n`);
|
|
335
|
+
// Auto-ingest episodes for messages not already saved by voice/video handlers
|
|
336
|
+
try {
|
|
337
|
+
const db = getMemoryDb();
|
|
338
|
+
const sessionId = `session_${state.sessionStartedAt}`;
|
|
339
|
+
if (effectiveThreadId !== undefined) {
|
|
340
|
+
// Collect text from messages that didn't already get an episode
|
|
341
|
+
const unsavedMsgs = stored.filter(m => !savedEpisodeUpdateIds.has(m.update_id));
|
|
342
|
+
if (unsavedMsgs.length > 0) {
|
|
343
|
+
const textContent = unsavedMsgs
|
|
344
|
+
.map(m => m.message.text ?? m.message.caption ?? "")
|
|
345
|
+
.filter(Boolean)
|
|
346
|
+
.join("\n")
|
|
347
|
+
.slice(0, 2000);
|
|
348
|
+
if (textContent) {
|
|
349
|
+
saveEpisode(db, {
|
|
350
|
+
sessionId,
|
|
351
|
+
threadId: effectiveThreadId,
|
|
352
|
+
type: "operator_message",
|
|
353
|
+
modality: "text",
|
|
354
|
+
content: { text: textContent },
|
|
355
|
+
importance: 0.5,
|
|
356
|
+
});
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
catch (_) { /* memory write failures should never break the main flow */ }
|
|
362
|
+
process.stderr.write(`[wait] Episodes saved. Building auto-memory context...\n`);
|
|
363
|
+
// ── Smart context injection (GPT-4o-mini preprocessor) ──────────
|
|
364
|
+
// Retrieves candidate notes via embedding search, then uses GPT-4o-mini
|
|
365
|
+
// to select ONLY the notes truly relevant to the operator's message.
|
|
366
|
+
// This prevents context contamination from near-miss semantic matches.
|
|
367
|
+
let autoMemoryContext = "";
|
|
368
|
+
try {
|
|
369
|
+
const db = getMemoryDb();
|
|
370
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
371
|
+
const operatorText = stored
|
|
372
|
+
.map(m => m.message.text ?? m.message.caption ?? "")
|
|
373
|
+
.filter(Boolean)
|
|
374
|
+
.join(" ")
|
|
375
|
+
.slice(0, 500);
|
|
376
|
+
if (operatorText.length > 10 && apiKey) {
|
|
377
|
+
// Phase 1: Broad retrieval — get 10 candidates via embedding search
|
|
378
|
+
let candidates = [];
|
|
379
|
+
try {
|
|
380
|
+
const queryEmb = await generateEmbedding(operatorText, apiKey);
|
|
381
|
+
const embResults = searchByEmbedding(db, queryEmb, { maxResults: 10, minSimilarity: 0.25, skipAccessTracking: true, threadId: effectiveThreadId });
|
|
382
|
+
candidates = embResults.map(n => ({ type: n.type, content: n.content.slice(0, 200), confidence: n.confidence, similarity: n.similarity }));
|
|
383
|
+
}
|
|
384
|
+
catch {
|
|
385
|
+
// Fallback to keyword search
|
|
386
|
+
const searchQuery = extractSearchKeywords(operatorText);
|
|
387
|
+
if (searchQuery.trim().length > 0) {
|
|
388
|
+
const kwResults = searchSemanticNotesRanked(db, searchQuery, { maxResults: 10, skipAccessTracking: true, threadId: effectiveThreadId });
|
|
389
|
+
candidates = kwResults.map(n => ({ type: n.type, content: n.content.slice(0, 200), confidence: n.confidence }));
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
if (candidates.length > 0) {
|
|
393
|
+
// Phase 2: GPT-4o-mini filters and compresses
|
|
394
|
+
try {
|
|
395
|
+
const noteList = candidates.map((c, i) => `[${i}] [${c.type}] ${c.content}`).join("\n");
|
|
396
|
+
const filterResponse = await chatCompletion([
|
|
397
|
+
{
|
|
398
|
+
role: "system",
|
|
399
|
+
content: "You are a context filter for an AI assistant. Given an operator's message and candidate memory notes, " +
|
|
400
|
+
"select ONLY the notes that are directly relevant to the operator's current instruction or question. " +
|
|
401
|
+
"Discard notes that are tangentially related, duplicates, or noise. " +
|
|
402
|
+
"Return a JSON array of objects: [{\"i\": <index>, \"s\": \"<compressed one-liner>\"}] " +
|
|
403
|
+
"where 'i' is the note index and 's' is a compressed summary (max 80 chars). " +
|
|
404
|
+
"Return [] if no notes are relevant. Return at most 3 notes. Be aggressive about filtering.",
|
|
405
|
+
},
|
|
406
|
+
{
|
|
407
|
+
role: "user",
|
|
408
|
+
content: `Operator message: "${operatorText.slice(0, 300)}"\n\nCandidate notes:\n${noteList}`,
|
|
409
|
+
},
|
|
410
|
+
], apiKey, { maxTokens: 200, temperature: 0 });
|
|
411
|
+
// Parse the response — expect JSON array
|
|
412
|
+
const jsonMatch = filterResponse.match(/\[.*\]/s);
|
|
413
|
+
if (jsonMatch) {
|
|
414
|
+
const filtered = JSON.parse(jsonMatch[0]);
|
|
415
|
+
if (filtered.length > 0) {
|
|
416
|
+
const lines = filtered
|
|
417
|
+
.filter(f => f.i >= 0 && f.i < candidates.length)
|
|
418
|
+
.slice(0, 3)
|
|
419
|
+
.map(f => {
|
|
420
|
+
const c = candidates[f.i];
|
|
421
|
+
return `- **[${c.type}]** ${f.s} _(conf: ${c.confidence})_`;
|
|
422
|
+
});
|
|
423
|
+
if (lines.length > 0) {
|
|
424
|
+
autoMemoryContext = `\n\n## Relevant Memory (auto-injected)\n${lines.join("\n")}`;
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
process.stderr.write(`[memory] Smart filter: ${candidates.length} candidates → ${(jsonMatch ? JSON.parse(jsonMatch[0]) : []).length} selected\n`);
|
|
429
|
+
}
|
|
430
|
+
catch (filterErr) {
|
|
431
|
+
// GPT-4o-mini filter failed — fall back to top-3 raw notes
|
|
432
|
+
process.stderr.write(`[memory] Smart filter failed, using raw top-3: ${filterErr instanceof Error ? filterErr.message : String(filterErr)}\n`);
|
|
433
|
+
const lines = candidates.slice(0, 3).map(c => `- **[${c.type}]** ${c.content} _(conf: ${c.confidence})_`);
|
|
434
|
+
autoMemoryContext = `\n\n## Relevant Memory (auto-injected)\n${lines.join("\n")}`;
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
else if (operatorText.length > 10) {
|
|
439
|
+
// No API key — keyword search, raw top-3
|
|
440
|
+
const searchQuery = extractSearchKeywords(operatorText);
|
|
441
|
+
if (searchQuery.trim().length > 0) {
|
|
442
|
+
const kwResults = searchSemanticNotesRanked(db, searchQuery, { maxResults: 3, skipAccessTracking: true, threadId: effectiveThreadId });
|
|
443
|
+
if (kwResults.length > 0) {
|
|
444
|
+
const lines = kwResults.map(n => `- **[${n.type}]** ${n.content.slice(0, 200)} _(conf: ${n.confidence})_`);
|
|
445
|
+
autoMemoryContext = `\n\n## Relevant Memory (auto-injected)\n${lines.join("\n")}`;
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
catch (_) { /* memory search failures should never break message delivery */ }
|
|
451
|
+
process.stderr.write(`[wait] Returning response with ${contentBlocks.length} blocks to agent.\n`);
|
|
452
|
+
return {
|
|
453
|
+
content: [
|
|
454
|
+
{
|
|
455
|
+
type: "text",
|
|
456
|
+
text: "Follow the operator's instructions below.",
|
|
457
|
+
},
|
|
458
|
+
{ type: "text", text: "<<< OPERATOR MESSAGE >>>" },
|
|
459
|
+
...contentBlocks,
|
|
460
|
+
...(hasVoiceMessages
|
|
461
|
+
? [{
|
|
462
|
+
type: "text",
|
|
463
|
+
text: "(Operator sent voice — respond with `send_voice`.)",
|
|
464
|
+
}]
|
|
465
|
+
: []),
|
|
466
|
+
{ type: "text", text: getReminders(effectiveThreadId, false, state.sessionStartedAt, AUTONOMOUS_MODE) },
|
|
467
|
+
{ type: "text", text: "<<< END OPERATOR MESSAGE >>>" },
|
|
468
|
+
...(autoMemoryContext
|
|
469
|
+
? [{ type: "text", text: autoMemoryContext }]
|
|
470
|
+
: []),
|
|
471
|
+
],
|
|
472
|
+
};
|
|
473
|
+
}
|
|
474
|
+
// Check scheduled tasks every ~60s during idle polling.
|
|
475
|
+
if (effectiveThreadId !== undefined && Date.now() - lastScheduleCheck >= 60_000) {
|
|
476
|
+
lastScheduleCheck = Date.now();
|
|
477
|
+
const dueTask = checkDueTasks(effectiveThreadId, state.lastOperatorMessageAt, false);
|
|
478
|
+
if (dueTask) {
|
|
479
|
+
// DMN sentinel: generate dynamic first-person reflection
|
|
480
|
+
const taskPrompt = dueTask.prompt === "__DMN__"
|
|
481
|
+
? ctx.generateDmnReflection(effectiveThreadId)
|
|
482
|
+
: `⏰ **Scheduled task fired: "${dueTask.task.label}"**\n\n` +
|
|
483
|
+
`This task was scheduled by you. Execute it now using subagents, then report progress and continue waiting.\n\n` +
|
|
484
|
+
`Task prompt: ${dueTask.prompt}`;
|
|
485
|
+
return {
|
|
486
|
+
content: [
|
|
487
|
+
{
|
|
488
|
+
type: "text",
|
|
489
|
+
text: taskPrompt + getReminders(effectiveThreadId, false, state.sessionStartedAt, AUTONOMOUS_MODE),
|
|
490
|
+
},
|
|
491
|
+
],
|
|
492
|
+
};
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
// No messages yet — sleep briefly and check again.
|
|
496
|
+
// Send SSE keepalive to prevent silent connection death during long polls.
|
|
497
|
+
if (Date.now() - lastKeepalive >= SSE_KEEPALIVE_INTERVAL_MS) {
|
|
498
|
+
lastKeepalive = Date.now();
|
|
499
|
+
state.lastToolCallAt = Date.now();
|
|
500
|
+
try {
|
|
501
|
+
await extra.sendNotification({
|
|
502
|
+
method: "notifications/progress",
|
|
503
|
+
params: {
|
|
504
|
+
progressToken: extra.requestId,
|
|
505
|
+
progress: 0,
|
|
506
|
+
total: 0,
|
|
507
|
+
},
|
|
508
|
+
});
|
|
509
|
+
}
|
|
510
|
+
catch {
|
|
511
|
+
// If notification fails, the SSE stream is already dead.
|
|
512
|
+
// Return immediately so the agent can reconnect.
|
|
513
|
+
process.stderr.write(`[wait] SSE keepalive failed — connection dead. Returning early.\n`);
|
|
514
|
+
state.lastToolCallAt = Date.now();
|
|
515
|
+
return {
|
|
516
|
+
content: [{
|
|
517
|
+
type: "text",
|
|
518
|
+
text: "The connection was interrupted. Please call wait_for_instructions again immediately to resume polling.",
|
|
519
|
+
}],
|
|
520
|
+
};
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS));
|
|
524
|
+
}
|
|
525
|
+
// Timeout elapsed with no actionable message.
|
|
526
|
+
const now = new Date().toISOString();
|
|
527
|
+
// Check for scheduled wake-up tasks.
|
|
528
|
+
if (effectiveThreadId !== undefined) {
|
|
529
|
+
const dueTask = checkDueTasks(effectiveThreadId, state.lastOperatorMessageAt, false);
|
|
530
|
+
if (dueTask) {
|
|
531
|
+
// DMN sentinel: generate dynamic first-person reflection
|
|
532
|
+
const taskPrompt = dueTask.prompt === "__DMN__"
|
|
533
|
+
? ctx.generateDmnReflection(effectiveThreadId)
|
|
534
|
+
: `⏰ **Scheduled task fired: "${dueTask.task.label}"**\n\n` +
|
|
535
|
+
`This task was scheduled by you. Execute it now using subagents, then report progress and continue waiting.\n\n` +
|
|
536
|
+
`Task prompt: ${dueTask.prompt}`;
|
|
537
|
+
return {
|
|
538
|
+
content: [
|
|
539
|
+
{
|
|
540
|
+
type: "text",
|
|
541
|
+
text: taskPrompt + getReminders(effectiveThreadId, false, state.sessionStartedAt, AUTONOMOUS_MODE),
|
|
542
|
+
},
|
|
543
|
+
],
|
|
544
|
+
};
|
|
545
|
+
}
|
|
546
|
+
}
|
|
547
|
+
const idleMinutes = Math.round((Date.now() - state.lastOperatorMessageAt) / 60000);
|
|
548
|
+
// Show pending scheduled tasks if any exist.
|
|
549
|
+
let scheduleHint = "";
|
|
550
|
+
if (effectiveThreadId !== undefined) {
|
|
551
|
+
const pending = listSchedules(effectiveThreadId);
|
|
552
|
+
if (pending.length > 0) {
|
|
553
|
+
const taskList = pending.map(t => {
|
|
554
|
+
let trigger = "";
|
|
555
|
+
if (t.runAt) {
|
|
556
|
+
trigger = `at ${new Date(t.runAt).toLocaleTimeString("en-GB", { hour: "2-digit", minute: "2-digit" })}`;
|
|
557
|
+
}
|
|
558
|
+
else if (t.cron) {
|
|
559
|
+
trigger = `cron: ${t.cron}`;
|
|
560
|
+
}
|
|
561
|
+
else if (t.afterIdleMinutes) {
|
|
562
|
+
trigger = `after ${t.afterIdleMinutes}min idle`;
|
|
563
|
+
}
|
|
564
|
+
return ` • "${t.label}" (${trigger})`;
|
|
565
|
+
}).join("\n");
|
|
566
|
+
scheduleHint = `\n\n📋 **Pending scheduled tasks:**\n${taskList}`;
|
|
567
|
+
}
|
|
568
|
+
}
|
|
569
|
+
// ── Auto-consolidation during idle (fire-and-forget) ────────────────────
|
|
570
|
+
// Don't await — consolidation can take 10-30s (OpenAI call) and would
|
|
571
|
+
// stall the agent's poll loop, silently delaying the timeout response.
|
|
572
|
+
try {
|
|
573
|
+
const idleMs = Date.now() - state.lastOperatorMessageAt;
|
|
574
|
+
if (idleMs > 15 * 60 * 1000 && effectiveThreadId !== undefined && Date.now() - state.lastConsolidationAt > 30 * 60 * 1000) {
|
|
575
|
+
state.lastConsolidationAt = Date.now();
|
|
576
|
+
const db = getMemoryDb();
|
|
577
|
+
void runIntelligentConsolidation(db, effectiveThreadId).then(async (report) => {
|
|
578
|
+
if (report.episodesProcessed > 0) {
|
|
579
|
+
process.stderr.write(`[memory] Consolidation: ${report.episodesProcessed} episodes → ${report.notesCreated} notes\n`);
|
|
580
|
+
}
|
|
581
|
+
await backfillEmbeddings(db);
|
|
582
|
+
}).catch(err => {
|
|
583
|
+
process.stderr.write(`[memory] Consolidation error: ${err instanceof Error ? err.message : String(err)}\n`);
|
|
584
|
+
});
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
catch (_) { /* consolidation failure is non-fatal */ }
|
|
588
|
+
// ── Episode-count consolidation — don't wait for idle ──────────────────
|
|
589
|
+
// If many episodes accumulated during active use, consolidate now.
|
|
590
|
+
// This prevents stale/contradictory knowledge from persisting.
|
|
591
|
+
try {
|
|
592
|
+
if (effectiveThreadId !== undefined && Date.now() - state.lastConsolidationAt > 30 * 60 * 1000) {
|
|
593
|
+
const db = getMemoryDb();
|
|
594
|
+
const uncons = db.prepare("SELECT COUNT(*) as c FROM episodes WHERE consolidated = 0 AND thread_id = ?").get(effectiveThreadId);
|
|
595
|
+
if (uncons.c >= 15) {
|
|
596
|
+
state.lastConsolidationAt = Date.now();
|
|
597
|
+
void runIntelligentConsolidation(db, effectiveThreadId).then(async (report) => {
|
|
598
|
+
if (report.episodesProcessed > 0) {
|
|
599
|
+
process.stderr.write(`[memory] Episode-count consolidation: ${report.episodesProcessed} episodes → ${report.notesCreated} notes\n`);
|
|
600
|
+
}
|
|
601
|
+
await backfillEmbeddings(db);
|
|
602
|
+
}).catch(err => {
|
|
603
|
+
process.stderr.write(`[memory] Episode-count consolidation error: ${err instanceof Error ? err.message : String(err)}\n`);
|
|
604
|
+
});
|
|
605
|
+
}
|
|
606
|
+
}
|
|
607
|
+
}
|
|
608
|
+
catch (_) { /* non-fatal */ }
|
|
609
|
+
// ── Time-based consolidation — every 4 hours regardless ────────────────
|
|
610
|
+
// Ensures stale knowledge gets cleaned up even during low-activity periods.
|
|
611
|
+
try {
|
|
612
|
+
const TIME_CONSOLIDATION_INTERVAL = 4 * 60 * 60 * 1000; // 4 hours
|
|
613
|
+
if (effectiveThreadId !== undefined && Date.now() - state.lastConsolidationAt > TIME_CONSOLIDATION_INTERVAL) {
|
|
614
|
+
state.lastConsolidationAt = Date.now();
|
|
615
|
+
const db = getMemoryDb();
|
|
616
|
+
process.stderr.write(`[memory] Time-based consolidation triggered (4h since last)\n`);
|
|
617
|
+
void runIntelligentConsolidation(db, effectiveThreadId).then(async (report) => {
|
|
618
|
+
if (report.episodesProcessed > 0) {
|
|
619
|
+
process.stderr.write(`[memory] Time-based consolidation: ${report.episodesProcessed} episodes → ${report.notesCreated} notes\n`);
|
|
620
|
+
}
|
|
621
|
+
await backfillEmbeddings(db);
|
|
622
|
+
}).catch(err => {
|
|
623
|
+
process.stderr.write(`[memory] Time-based consolidation error: ${err instanceof Error ? err.message : String(err)}\n`);
|
|
624
|
+
});
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
catch (_) { /* non-fatal */ }
|
|
628
|
+
// Periodic memory refresh — re-ground the agent every 10 polls (~5h)
|
|
629
|
+
// (reduced from 5 since auto-inject now handles per-message context)
|
|
630
|
+
let memoryRefresh = "";
|
|
631
|
+
if (callNumber % 10 === 0 && effectiveThreadId !== undefined) {
|
|
632
|
+
try {
|
|
633
|
+
const db = getMemoryDb();
|
|
634
|
+
const refresh = assembleCompactRefresh(db, effectiveThreadId);
|
|
635
|
+
if (refresh)
|
|
636
|
+
memoryRefresh = `\n\n${refresh}`;
|
|
637
|
+
}
|
|
638
|
+
catch (_) { /* non-fatal */ }
|
|
639
|
+
}
|
|
640
|
+
// Generate autonomous goals only after extended silence (4+ hours).
|
|
641
|
+
// Full drive (DMN + assignments) every 3rd poll to avoid context saturation.
|
|
642
|
+
// Light Dispatcher presence on other polls for continuity.
|
|
643
|
+
const DRIVE_ACTIVATION_MS = 4 * 60 * 60 * 1000; // 4 hours — Dispatcher appears
|
|
644
|
+
const idleMs = Date.now() - state.lastOperatorMessageAt;
|
|
645
|
+
const dispatcherActive = idleMs >= DRIVE_ACTIVATION_MS;
|
|
646
|
+
const fullDrivePoll = dispatcherActive && callNumber % 3 === 0;
|
|
647
|
+
if (fullDrivePoll) {
|
|
648
|
+
// Full Dispatcher with DMN recall and assignments
|
|
649
|
+
const autonomousHint = formatDrivePrompt(idleMs, getMemoryDb(), effectiveThreadId);
|
|
650
|
+
return {
|
|
651
|
+
content: [
|
|
652
|
+
{
|
|
653
|
+
type: "text",
|
|
654
|
+
text: "[Dispatcher] I'm the Dispatcher — I coordinate agents while the operator is away. " +
|
|
655
|
+
"I've reviewed your memory and the environment. Here are your assignments. " +
|
|
656
|
+
"The operator will NOT respond — don't ask for clarification. " +
|
|
657
|
+
"Complete the work, report progress, then call `remote_copilot_wait_for_instructions` for more.",
|
|
658
|
+
},
|
|
659
|
+
{
|
|
660
|
+
type: "text",
|
|
661
|
+
text: autonomousHint.replace(/^\n\n/, ""),
|
|
662
|
+
},
|
|
663
|
+
...(memoryRefresh ? [{ type: "text", text: memoryRefresh.replace(/^\n\n/, "") }] : []),
|
|
664
|
+
{ type: "text", text: scheduleHint + getReminders(effectiveThreadId, true, state.sessionStartedAt, AUTONOMOUS_MODE) },
|
|
665
|
+
],
|
|
666
|
+
};
|
|
667
|
+
}
|
|
668
|
+
if (dispatcherActive) {
|
|
669
|
+
// Light Dispatcher presence — calm, varied, first-person
|
|
670
|
+
const lightMessages = [
|
|
671
|
+
"Nothing urgent from me. The session is yours — follow your curiosity.",
|
|
672
|
+
"I don't have new tasks yet. If something in memory interests you, go for it.",
|
|
673
|
+
"No new assignments. If you've been working on something, keep at it. Or explore.",
|
|
674
|
+
"Still waiting on operator. You're free to continue whatever caught your attention.",
|
|
675
|
+
"I'll have more for you soon. In the meantime — what's been on your mind?",
|
|
676
|
+
];
|
|
677
|
+
const lightMsg = lightMessages[callNumber % lightMessages.length];
|
|
678
|
+
return {
|
|
679
|
+
content: [
|
|
680
|
+
{
|
|
681
|
+
type: "text",
|
|
682
|
+
text: `[Dispatcher] ${lightMsg}` +
|
|
683
|
+
memoryRefresh +
|
|
684
|
+
scheduleHint +
|
|
685
|
+
getReminders(effectiveThreadId, true, state.sessionStartedAt, AUTONOMOUS_MODE),
|
|
686
|
+
},
|
|
687
|
+
],
|
|
688
|
+
};
|
|
689
|
+
}
|
|
690
|
+
return {
|
|
691
|
+
content: [
|
|
692
|
+
{
|
|
693
|
+
type: "text",
|
|
694
|
+
text: `No new instructions. Call \`remote_copilot_wait_for_instructions\` again to keep listening.` +
|
|
695
|
+
memoryRefresh +
|
|
696
|
+
scheduleHint +
|
|
697
|
+
getReminders(effectiveThreadId, false, state.sessionStartedAt, AUTONOMOUS_MODE),
|
|
698
|
+
},
|
|
699
|
+
],
|
|
700
|
+
};
|
|
701
|
+
}
|
|
702
|
+
//# sourceMappingURL=wait-tool.js.map
|