@cloudflare/think 0.0.0 → 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +241 -0
- package/dist/classPrivateFieldSet2-COLddhya.js +27 -0
- package/dist/classPrivateMethodInitSpec-CdQXQy1O.js +7 -0
- package/dist/extensions/index.d.ts +20 -0
- package/dist/extensions/index.js +62 -0
- package/dist/extensions/index.js.map +1 -0
- package/dist/index-BlcvIdWK.d.ts +171 -0
- package/dist/index-C4OTSwUW.d.ts +193 -0
- package/dist/manager-DIV0gQf3.js +214 -0
- package/dist/manager-DIV0gQf3.js.map +1 -0
- package/dist/message-builder.d.ts +51 -0
- package/dist/message-builder.js +217 -0
- package/dist/message-builder.js.map +1 -0
- package/dist/session/index.d.ts +22 -0
- package/dist/session/index.js +2 -0
- package/dist/session-C6ZU_1zM.js +507 -0
- package/dist/session-C6ZU_1zM.js.map +1 -0
- package/dist/think.d.ts +315 -0
- package/dist/think.js +701 -0
- package/dist/think.js.map +1 -0
- package/dist/tools/execute.d.ts +105 -0
- package/dist/tools/execute.js +64 -0
- package/dist/tools/execute.js.map +1 -0
- package/dist/tools/extensions.d.ts +67 -0
- package/dist/tools/extensions.js +85 -0
- package/dist/tools/extensions.js.map +1 -0
- package/dist/tools/workspace.d.ts +303 -0
- package/dist/tools/workspace.js +398 -0
- package/dist/tools/workspace.js.map +1 -0
- package/dist/transport.d.ts +69 -0
- package/dist/transport.js +166 -0
- package/dist/transport.js.map +1 -0
- package/package.json +83 -9
package/dist/think.js
ADDED
|
@@ -0,0 +1,701 @@
|
|
|
1
|
+
import { t as SessionManager } from "./session-C6ZU_1zM.js";
|
|
2
|
+
import { applyChunkToParts } from "./message-builder.js";
|
|
3
|
+
import { i as _classPrivateFieldInitSpec, n as _classPrivateFieldGet2, t as _classPrivateFieldSet2 } from "./classPrivateFieldSet2-COLddhya.js";
|
|
4
|
+
import { convertToModelMessages, pruneMessages, stepCountIs, streamText } from "ai";
|
|
5
|
+
import { Agent, __DO_NOT_USE_WILL_BREAK__agentContext } from "agents";
|
|
6
|
+
import { withFibers } from "agents/experimental/forever";
|
|
7
|
+
//#region src/sanitize.ts
|
|
8
|
+
/** Shared encoder for UTF-8 byte length measurement */
|
|
9
|
+
const textEncoder = new TextEncoder();
|
|
10
|
+
/** Maximum serialized message size before compaction (bytes). 1.8MB with headroom below SQLite's 2MB limit. */
|
|
11
|
+
const ROW_MAX_BYTES = 18e5;
|
|
12
|
+
/** Measure UTF-8 byte length of a string. */
|
|
13
|
+
function byteLength(s) {
|
|
14
|
+
return textEncoder.encode(s).byteLength;
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Sanitize a message for persistence by removing ephemeral provider-specific
|
|
18
|
+
* data that should not be stored or sent back in subsequent requests.
|
|
19
|
+
*
|
|
20
|
+
* 1. Strips OpenAI ephemeral fields (itemId, reasoningEncryptedContent)
|
|
21
|
+
* 2. Filters truly empty reasoning parts (no text, no remaining providerMetadata)
|
|
22
|
+
*/
|
|
23
|
+
function sanitizeMessage(message) {
|
|
24
|
+
const sanitizedParts = message.parts.map((part) => {
|
|
25
|
+
let sanitizedPart = part;
|
|
26
|
+
if ("providerMetadata" in sanitizedPart && sanitizedPart.providerMetadata && typeof sanitizedPart.providerMetadata === "object" && "openai" in sanitizedPart.providerMetadata) sanitizedPart = stripOpenAIMetadata(sanitizedPart, "providerMetadata");
|
|
27
|
+
if ("callProviderMetadata" in sanitizedPart && sanitizedPart.callProviderMetadata && typeof sanitizedPart.callProviderMetadata === "object" && "openai" in sanitizedPart.callProviderMetadata) sanitizedPart = stripOpenAIMetadata(sanitizedPart, "callProviderMetadata");
|
|
28
|
+
return sanitizedPart;
|
|
29
|
+
}).filter((part) => {
|
|
30
|
+
if (part.type === "reasoning") {
|
|
31
|
+
const reasoningPart = part;
|
|
32
|
+
if (!reasoningPart.text || reasoningPart.text.trim() === "") {
|
|
33
|
+
if ("providerMetadata" in reasoningPart && reasoningPart.providerMetadata && typeof reasoningPart.providerMetadata === "object" && Object.keys(reasoningPart.providerMetadata).length > 0) return true;
|
|
34
|
+
return false;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return true;
|
|
38
|
+
});
|
|
39
|
+
return {
|
|
40
|
+
...message,
|
|
41
|
+
parts: sanitizedParts
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Strip OpenAI-specific ephemeral fields from a metadata object.
|
|
46
|
+
*/
|
|
47
|
+
function stripOpenAIMetadata(part, metadataKey) {
|
|
48
|
+
const metadata = part[metadataKey];
|
|
49
|
+
if (!metadata?.openai) return part;
|
|
50
|
+
const { itemId: _itemId, reasoningEncryptedContent: _rec, ...restOpenai } = metadata.openai;
|
|
51
|
+
const hasOtherOpenaiFields = Object.keys(restOpenai).length > 0;
|
|
52
|
+
const { openai: _openai, ...restMetadata } = metadata;
|
|
53
|
+
let newMetadata;
|
|
54
|
+
if (hasOtherOpenaiFields) newMetadata = {
|
|
55
|
+
...restMetadata,
|
|
56
|
+
openai: restOpenai
|
|
57
|
+
};
|
|
58
|
+
else if (Object.keys(restMetadata).length > 0) newMetadata = restMetadata;
|
|
59
|
+
const { [metadataKey]: _oldMeta, ...restPart } = part;
|
|
60
|
+
if (newMetadata) return {
|
|
61
|
+
...restPart,
|
|
62
|
+
[metadataKey]: newMetadata
|
|
63
|
+
};
|
|
64
|
+
return restPart;
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Enforce SQLite row size limits by compacting tool outputs and text parts
|
|
68
|
+
* when a serialized message exceeds the safety threshold (1.8MB).
|
|
69
|
+
*
|
|
70
|
+
* Compaction strategy:
|
|
71
|
+
* 1. Compact tool outputs over 1KB (replace with summary)
|
|
72
|
+
* 2. If still too big, truncate text parts from oldest to newest
|
|
73
|
+
*/
|
|
74
|
+
function enforceRowSizeLimit(message) {
|
|
75
|
+
let json = JSON.stringify(message);
|
|
76
|
+
let size = byteLength(json);
|
|
77
|
+
if (size <= ROW_MAX_BYTES) return message;
|
|
78
|
+
if (message.role !== "assistant") return truncateTextParts(message);
|
|
79
|
+
const compactedParts = message.parts.map((part) => {
|
|
80
|
+
if ("output" in part && "toolCallId" in part && "state" in part && part.state === "output-available") {
|
|
81
|
+
const outputJson = JSON.stringify(part.output);
|
|
82
|
+
if (outputJson.length > 1e3) return {
|
|
83
|
+
...part,
|
|
84
|
+
output: `This tool output was too large to persist in storage (${outputJson.length} bytes). If the user asks about this data, suggest re-running the tool. Preview: ${outputJson.slice(0, 500)}...`
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
return part;
|
|
88
|
+
});
|
|
89
|
+
let result = {
|
|
90
|
+
...message,
|
|
91
|
+
parts: compactedParts
|
|
92
|
+
};
|
|
93
|
+
json = JSON.stringify(result);
|
|
94
|
+
size = byteLength(json);
|
|
95
|
+
if (size <= ROW_MAX_BYTES) return result;
|
|
96
|
+
return truncateTextParts(result);
|
|
97
|
+
}
|
|
98
|
+
/**
|
|
99
|
+
* Truncate text parts to fit within the row size limit.
|
|
100
|
+
*/
|
|
101
|
+
function truncateTextParts(message) {
|
|
102
|
+
const parts = [...message.parts];
|
|
103
|
+
for (let i = 0; i < parts.length; i++) {
|
|
104
|
+
const part = parts[i];
|
|
105
|
+
if (part.type === "text" && "text" in part) {
|
|
106
|
+
const text = part.text;
|
|
107
|
+
if (text.length > 1e3) {
|
|
108
|
+
parts[i] = {
|
|
109
|
+
...part,
|
|
110
|
+
text: `[Text truncated for storage (${text.length} chars). First 500 chars: ${text.slice(0, 500)}...]`
|
|
111
|
+
};
|
|
112
|
+
const candidate = {
|
|
113
|
+
...message,
|
|
114
|
+
parts
|
|
115
|
+
};
|
|
116
|
+
if (byteLength(JSON.stringify(candidate)) <= ROW_MAX_BYTES) break;
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
return {
|
|
121
|
+
...message,
|
|
122
|
+
parts
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
//#endregion
|
|
126
|
+
//#region src/think.ts
|
|
127
|
+
const ThinkBase = withFibers(Agent);
|
|
128
|
+
const MSG_CHAT_MESSAGES = "cf_agent_chat_messages";
|
|
129
|
+
const MSG_CHAT_REQUEST = "cf_agent_use_chat_request";
|
|
130
|
+
const MSG_CHAT_RESPONSE = "cf_agent_use_chat_response";
|
|
131
|
+
const MSG_CHAT_CLEAR = "cf_agent_chat_clear";
|
|
132
|
+
const MSG_CHAT_CANCEL = "cf_agent_chat_request_cancel";
|
|
133
|
+
var _configTableReady = /* @__PURE__ */ new WeakMap();
|
|
134
|
+
var _configCache = /* @__PURE__ */ new WeakMap();
|
|
135
|
+
/**
|
|
136
|
+
* A unified Agent base class for chat sessions.
|
|
137
|
+
*
|
|
138
|
+
* Works as both a top-level agent (WebSocket chat protocol) and a
|
|
139
|
+
* sub-agent (RPC streaming via `chat()`).
|
|
140
|
+
*
|
|
141
|
+
* @experimental Requires the `"experimental"` compatibility flag.
|
|
142
|
+
*/
|
|
143
|
+
var Think = class extends ThinkBase {
|
|
144
|
+
constructor(..._args) {
|
|
145
|
+
super(..._args);
|
|
146
|
+
this.messages = [];
|
|
147
|
+
this.fibers = false;
|
|
148
|
+
this.maxPersistedMessages = void 0;
|
|
149
|
+
this._persistedMessageCache = /* @__PURE__ */ new Map();
|
|
150
|
+
this._sessionId = null;
|
|
151
|
+
this._abortControllers = /* @__PURE__ */ new Map();
|
|
152
|
+
this._clearGeneration = 0;
|
|
153
|
+
_classPrivateFieldInitSpec(this, _configTableReady, false);
|
|
154
|
+
_classPrivateFieldInitSpec(this, _configCache, null);
|
|
155
|
+
}
|
|
156
|
+
_ensureConfigTable() {
|
|
157
|
+
if (_classPrivateFieldGet2(_configTableReady, this)) return;
|
|
158
|
+
this.sql`
|
|
159
|
+
CREATE TABLE IF NOT EXISTS _think_config (
|
|
160
|
+
key TEXT PRIMARY KEY, value TEXT NOT NULL
|
|
161
|
+
)
|
|
162
|
+
`;
|
|
163
|
+
_classPrivateFieldSet2(_configTableReady, this, true);
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Persist a typed configuration object.
|
|
167
|
+
* Stored in SQLite so it survives restarts and hibernation.
|
|
168
|
+
*/
|
|
169
|
+
configure(config) {
|
|
170
|
+
this._ensureConfigTable();
|
|
171
|
+
const json = JSON.stringify(config);
|
|
172
|
+
this.sql`
|
|
173
|
+
INSERT OR REPLACE INTO _think_config (key, value) VALUES ('config', ${json})
|
|
174
|
+
`;
|
|
175
|
+
_classPrivateFieldSet2(_configCache, this, config);
|
|
176
|
+
}
|
|
177
|
+
/**
|
|
178
|
+
* Read the persisted configuration, or null if never configured.
|
|
179
|
+
*/
|
|
180
|
+
getConfig() {
|
|
181
|
+
if (_classPrivateFieldGet2(_configCache, this)) return _classPrivateFieldGet2(_configCache, this);
|
|
182
|
+
this._ensureConfigTable();
|
|
183
|
+
const rows = this.sql`
|
|
184
|
+
SELECT value FROM _think_config WHERE key = 'config'
|
|
185
|
+
`;
|
|
186
|
+
if (rows.length > 0) {
|
|
187
|
+
_classPrivateFieldSet2(_configCache, this, JSON.parse(rows[0].value));
|
|
188
|
+
return _classPrivateFieldGet2(_configCache, this);
|
|
189
|
+
}
|
|
190
|
+
return null;
|
|
191
|
+
}
|
|
192
|
+
onStart() {
|
|
193
|
+
this.sessions = new SessionManager(this, { exec: (query, ...values) => {
|
|
194
|
+
this.ctx.storage.sql.exec(query, ...values);
|
|
195
|
+
} });
|
|
196
|
+
const existing = this.sessions.list();
|
|
197
|
+
if (existing.length > 0) {
|
|
198
|
+
this._sessionId = existing[0].id;
|
|
199
|
+
this.messages = this.sessions.getHistory(this._sessionId);
|
|
200
|
+
this._rebuildPersistenceCache();
|
|
201
|
+
}
|
|
202
|
+
this._setupProtocolHandlers();
|
|
203
|
+
if (this.fibers) this.checkFibers();
|
|
204
|
+
}
|
|
205
|
+
/**
|
|
206
|
+
* Return the language model to use for inference.
|
|
207
|
+
* Must be overridden by subclasses that rely on the default
|
|
208
|
+
* `onChatMessage` implementation (the agentic loop).
|
|
209
|
+
*/
|
|
210
|
+
getModel() {
|
|
211
|
+
throw new Error("Override getModel() to return a LanguageModel, or override onChatMessage() for full control.");
|
|
212
|
+
}
|
|
213
|
+
/**
|
|
214
|
+
* Return the system prompt for the assistant.
|
|
215
|
+
* Override to customize instructions.
|
|
216
|
+
*/
|
|
217
|
+
getSystemPrompt() {
|
|
218
|
+
return "You are a helpful assistant.";
|
|
219
|
+
}
|
|
220
|
+
/**
|
|
221
|
+
* Return the tools available to the assistant.
|
|
222
|
+
* Override to provide workspace tools, custom tools, etc.
|
|
223
|
+
*/
|
|
224
|
+
getTools() {
|
|
225
|
+
return {};
|
|
226
|
+
}
|
|
227
|
+
/**
|
|
228
|
+
* Return the maximum number of tool-call steps per turn.
|
|
229
|
+
*/
|
|
230
|
+
getMaxSteps() {
|
|
231
|
+
return 10;
|
|
232
|
+
}
|
|
233
|
+
/**
|
|
234
|
+
* Return the workspace instance for this session, or null if none.
|
|
235
|
+
*
|
|
236
|
+
* Override in subclasses that create a Workspace. Used by
|
|
237
|
+
* HostBridgeLoopback to provide workspace access to extension Workers.
|
|
238
|
+
*/
|
|
239
|
+
getWorkspace() {
|
|
240
|
+
return null;
|
|
241
|
+
}
|
|
242
|
+
async _hostReadFile(path) {
|
|
243
|
+
const ws = this.getWorkspace();
|
|
244
|
+
if (!ws) throw new Error("No workspace available on this agent");
|
|
245
|
+
return ws.readFile(path);
|
|
246
|
+
}
|
|
247
|
+
async _hostWriteFile(path, content) {
|
|
248
|
+
const ws = this.getWorkspace();
|
|
249
|
+
if (!ws) throw new Error("No workspace available on this agent");
|
|
250
|
+
await ws.writeFile(path, content);
|
|
251
|
+
}
|
|
252
|
+
async _hostDeleteFile(path) {
|
|
253
|
+
const ws = this.getWorkspace();
|
|
254
|
+
if (!ws) throw new Error("No workspace available on this agent");
|
|
255
|
+
return ws.deleteFile(path);
|
|
256
|
+
}
|
|
257
|
+
_hostListFiles(dir) {
|
|
258
|
+
const ws = this.getWorkspace();
|
|
259
|
+
if (!ws) throw new Error("No workspace available on this agent");
|
|
260
|
+
return ws.readDir(dir);
|
|
261
|
+
}
|
|
262
|
+
/**
|
|
263
|
+
* Assemble the model messages from the current conversation history.
|
|
264
|
+
* Override to customize context assembly (e.g. inject memory,
|
|
265
|
+
* project context, or apply compaction).
|
|
266
|
+
*/
|
|
267
|
+
async assembleContext() {
|
|
268
|
+
return pruneMessages({
|
|
269
|
+
messages: await convertToModelMessages(this.messages),
|
|
270
|
+
toolCalls: "before-last-2-messages"
|
|
271
|
+
});
|
|
272
|
+
}
|
|
273
|
+
/**
|
|
274
|
+
* Handle a chat turn and return the streaming result.
|
|
275
|
+
*
|
|
276
|
+
* The default implementation runs the agentic loop:
|
|
277
|
+
* 1. Assemble context from `this.messages`
|
|
278
|
+
* 2. Call `streamText` with the model, system prompt, tools, and step limit
|
|
279
|
+
*
|
|
280
|
+
* Override for full control over inference (e.g. different models per turn,
|
|
281
|
+
* RAG pipelines, routing to specialized sub-agents, etc.).
|
|
282
|
+
*
|
|
283
|
+
* When this is called, `this.messages` already contains the user's
|
|
284
|
+
* latest message persisted to the current session.
|
|
285
|
+
*
|
|
286
|
+
* @returns A result with `toUIMessageStream()` — AI SDK's `streamText()`
|
|
287
|
+
* return value satisfies this interface.
|
|
288
|
+
*/
|
|
289
|
+
async onChatMessage(options) {
|
|
290
|
+
const baseTools = this.getTools();
|
|
291
|
+
const tools = options?.tools ? {
|
|
292
|
+
...baseTools,
|
|
293
|
+
...options.tools
|
|
294
|
+
} : baseTools;
|
|
295
|
+
return streamText({
|
|
296
|
+
model: this.getModel(),
|
|
297
|
+
system: this.getSystemPrompt(),
|
|
298
|
+
messages: await this.assembleContext(),
|
|
299
|
+
tools,
|
|
300
|
+
stopWhen: stepCountIs(this.getMaxSteps()),
|
|
301
|
+
abortSignal: options?.signal
|
|
302
|
+
});
|
|
303
|
+
}
|
|
304
|
+
/**
|
|
305
|
+
* Handle an error that occurred during a chat turn.
|
|
306
|
+
* Override to customize error handling (e.g. logging, metrics).
|
|
307
|
+
*
|
|
308
|
+
* @param error The error that occurred
|
|
309
|
+
* @returns The error (or a wrapped version) to propagate
|
|
310
|
+
*/
|
|
311
|
+
onChatError(error) {
|
|
312
|
+
return error;
|
|
313
|
+
}
|
|
314
|
+
/**
|
|
315
|
+
* Run a chat turn: persist the user message, run the agentic loop,
|
|
316
|
+
* stream UIMessageChunk events via callback, and persist the
|
|
317
|
+
* assistant's response.
|
|
318
|
+
*
|
|
319
|
+
* On error or abort, the partial assistant message is still persisted
|
|
320
|
+
* so the user doesn't lose context.
|
|
321
|
+
*
|
|
322
|
+
* @param userMessage The user's message (string or UIMessage for multi-modal)
|
|
323
|
+
* @param callback Streaming callback (typically an RpcTarget from the parent)
|
|
324
|
+
* @param options Optional chat options (e.g. AbortSignal)
|
|
325
|
+
*/
|
|
326
|
+
async chat(userMessage, callback, options) {
|
|
327
|
+
if (!this._sessionId) this._sessionId = this.sessions.create("default").id;
|
|
328
|
+
const userMsg = typeof userMessage === "string" ? {
|
|
329
|
+
id: crypto.randomUUID(),
|
|
330
|
+
role: "user",
|
|
331
|
+
parts: [{
|
|
332
|
+
type: "text",
|
|
333
|
+
text: userMessage
|
|
334
|
+
}]
|
|
335
|
+
} : userMessage;
|
|
336
|
+
this.sessions.append(this._sessionId, userMsg);
|
|
337
|
+
this.messages = this.sessions.getHistory(this._sessionId);
|
|
338
|
+
const assistantMsg = {
|
|
339
|
+
id: crypto.randomUUID(),
|
|
340
|
+
role: "assistant",
|
|
341
|
+
parts: []
|
|
342
|
+
};
|
|
343
|
+
try {
|
|
344
|
+
const result = await this.onChatMessage({
|
|
345
|
+
signal: options?.signal,
|
|
346
|
+
tools: options?.tools
|
|
347
|
+
});
|
|
348
|
+
let aborted = false;
|
|
349
|
+
for await (const chunk of result.toUIMessageStream()) {
|
|
350
|
+
if (options?.signal?.aborted) {
|
|
351
|
+
aborted = true;
|
|
352
|
+
break;
|
|
353
|
+
}
|
|
354
|
+
applyChunkToParts(assistantMsg.parts, chunk);
|
|
355
|
+
await callback.onEvent(JSON.stringify(chunk));
|
|
356
|
+
}
|
|
357
|
+
this._persistAssistantMessage(assistantMsg);
|
|
358
|
+
if (!aborted) await callback.onDone();
|
|
359
|
+
} catch (error) {
|
|
360
|
+
if (assistantMsg.parts.length > 0) this._persistAssistantMessage(assistantMsg);
|
|
361
|
+
const wrapped = this.onChatError(error);
|
|
362
|
+
const errorMessage = wrapped instanceof Error ? wrapped.message : String(wrapped);
|
|
363
|
+
if (callback.onError) await callback.onError(errorMessage);
|
|
364
|
+
else throw wrapped;
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
getSessions() {
|
|
368
|
+
return this.sessions.list();
|
|
369
|
+
}
|
|
370
|
+
createSession(name) {
|
|
371
|
+
const session = this.sessions.create(name);
|
|
372
|
+
this._sessionId = session.id;
|
|
373
|
+
this.messages = [];
|
|
374
|
+
this._broadcastMessages();
|
|
375
|
+
return session;
|
|
376
|
+
}
|
|
377
|
+
switchSession(sessionId) {
|
|
378
|
+
if (!this.sessions.get(sessionId)) throw new Error(`Session not found: ${sessionId}`);
|
|
379
|
+
this._sessionId = sessionId;
|
|
380
|
+
this.messages = this.sessions.getHistory(sessionId);
|
|
381
|
+
this._broadcastMessages();
|
|
382
|
+
return this.messages;
|
|
383
|
+
}
|
|
384
|
+
deleteSession(sessionId) {
|
|
385
|
+
if (!this.sessions.get(sessionId)) throw new Error(`Session not found: ${sessionId}`);
|
|
386
|
+
this.sessions.delete(sessionId);
|
|
387
|
+
if (this._sessionId === sessionId) {
|
|
388
|
+
this._sessionId = null;
|
|
389
|
+
this.messages = [];
|
|
390
|
+
this._broadcastMessages();
|
|
391
|
+
}
|
|
392
|
+
}
|
|
393
|
+
renameSession(sessionId, name) {
|
|
394
|
+
if (!this.sessions.get(sessionId)) throw new Error(`Session not found: ${sessionId}`);
|
|
395
|
+
this.sessions.rename(sessionId, name);
|
|
396
|
+
}
|
|
397
|
+
getCurrentSessionId() {
|
|
398
|
+
return this._sessionId;
|
|
399
|
+
}
|
|
400
|
+
/**
|
|
401
|
+
* Get the current session info, or null if no session exists yet.
|
|
402
|
+
*/
|
|
403
|
+
getSession() {
|
|
404
|
+
if (!this._sessionId) return null;
|
|
405
|
+
return this.sessions.get(this._sessionId);
|
|
406
|
+
}
|
|
407
|
+
/**
|
|
408
|
+
* Get the conversation history as UIMessage[].
|
|
409
|
+
*/
|
|
410
|
+
getHistory() {
|
|
411
|
+
if (!this._sessionId) return [];
|
|
412
|
+
return this.sessions.getHistory(this._sessionId);
|
|
413
|
+
}
|
|
414
|
+
/**
|
|
415
|
+
* Get the total message count for this session.
|
|
416
|
+
*/
|
|
417
|
+
getMessageCount() {
|
|
418
|
+
if (!this._sessionId) return 0;
|
|
419
|
+
return this.sessions.getMessageCount(this._sessionId);
|
|
420
|
+
}
|
|
421
|
+
/**
|
|
422
|
+
* Clear all messages from this session (preserves the session itself).
|
|
423
|
+
*/
|
|
424
|
+
clearMessages() {
|
|
425
|
+
if (!this._sessionId) return;
|
|
426
|
+
this.sessions.clearMessages(this._sessionId);
|
|
427
|
+
this.messages = [];
|
|
428
|
+
this._persistedMessageCache.clear();
|
|
429
|
+
}
|
|
430
|
+
/**
|
|
431
|
+
* Wrap onMessage and onRequest to intercept the chat protocol.
|
|
432
|
+
* Unrecognized messages are forwarded to the user's handlers.
|
|
433
|
+
* @internal
|
|
434
|
+
*/
|
|
435
|
+
_setupProtocolHandlers() {
|
|
436
|
+
const _onMessage = this.onMessage.bind(this);
|
|
437
|
+
this.onMessage = async (connection, message) => {
|
|
438
|
+
if (typeof message === "string") try {
|
|
439
|
+
const data = JSON.parse(message);
|
|
440
|
+
if (await this._handleProtocol(connection, data)) return;
|
|
441
|
+
} catch {}
|
|
442
|
+
return _onMessage(connection, message);
|
|
443
|
+
};
|
|
444
|
+
const _onRequest = this.onRequest.bind(this);
|
|
445
|
+
this.onRequest = async (request) => {
|
|
446
|
+
const url = new URL(request.url);
|
|
447
|
+
if (url.pathname === "/get-messages" || url.pathname.endsWith("/get-messages")) {
|
|
448
|
+
const sessionId = url.searchParams.get("sessionId");
|
|
449
|
+
if (sessionId) {
|
|
450
|
+
if (!this.sessions.get(sessionId)) return Response.json({ error: "Session not found" }, { status: 404 });
|
|
451
|
+
return Response.json(this.sessions.getHistory(sessionId));
|
|
452
|
+
}
|
|
453
|
+
return Response.json(this.messages);
|
|
454
|
+
}
|
|
455
|
+
return _onRequest(request);
|
|
456
|
+
};
|
|
457
|
+
}
|
|
458
|
+
/**
|
|
459
|
+
* Route an incoming WebSocket message to the appropriate handler.
|
|
460
|
+
* Returns true if the message was handled by the protocol.
|
|
461
|
+
* @internal
|
|
462
|
+
*/
|
|
463
|
+
async _handleProtocol(connection, data) {
|
|
464
|
+
const type = data.type;
|
|
465
|
+
if (type === MSG_CHAT_REQUEST) {
|
|
466
|
+
if (data.init?.method === "POST") {
|
|
467
|
+
await this._handleChatRequest(connection, data);
|
|
468
|
+
return true;
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
if (type === MSG_CHAT_CLEAR) {
|
|
472
|
+
this._handleClear();
|
|
473
|
+
return true;
|
|
474
|
+
}
|
|
475
|
+
if (type === MSG_CHAT_CANCEL) {
|
|
476
|
+
this._handleCancel(data.id);
|
|
477
|
+
return true;
|
|
478
|
+
}
|
|
479
|
+
return false;
|
|
480
|
+
}
|
|
481
|
+
/**
|
|
482
|
+
* Handle CF_AGENT_USE_CHAT_REQUEST:
|
|
483
|
+
* 1. Parse incoming messages
|
|
484
|
+
* 2. Ensure a session exists
|
|
485
|
+
* 3. Persist user messages to session
|
|
486
|
+
* 4. Call onChatMessage
|
|
487
|
+
* 5. Stream response back to clients
|
|
488
|
+
* 6. Persist assistant message to session
|
|
489
|
+
* @internal
|
|
490
|
+
*/
|
|
491
|
+
async _handleChatRequest(connection, data) {
|
|
492
|
+
const init = data.init;
|
|
493
|
+
if (!init?.body) return;
|
|
494
|
+
let parsed;
|
|
495
|
+
try {
|
|
496
|
+
parsed = JSON.parse(init.body);
|
|
497
|
+
} catch {
|
|
498
|
+
return;
|
|
499
|
+
}
|
|
500
|
+
const incomingMessages = parsed.messages;
|
|
501
|
+
if (!Array.isArray(incomingMessages)) return;
|
|
502
|
+
if (!this._sessionId) this._sessionId = this.sessions.create("New Chat").id;
|
|
503
|
+
this.sessions.appendAll(this._sessionId, incomingMessages);
|
|
504
|
+
this.messages = this.sessions.getHistory(this._sessionId);
|
|
505
|
+
this._broadcastMessages([connection.id]);
|
|
506
|
+
const requestId = data.id;
|
|
507
|
+
const abortController = new AbortController();
|
|
508
|
+
this._abortControllers.set(requestId, abortController);
|
|
509
|
+
try {
|
|
510
|
+
await this.keepAliveWhile(async () => {
|
|
511
|
+
const result = await __DO_NOT_USE_WILL_BREAK__agentContext.run({
|
|
512
|
+
agent: this,
|
|
513
|
+
connection,
|
|
514
|
+
request: void 0,
|
|
515
|
+
email: void 0
|
|
516
|
+
}, () => this.onChatMessage({ signal: abortController.signal }));
|
|
517
|
+
if (result) await this._streamResult(requestId, result, abortController.signal);
|
|
518
|
+
else this._broadcast({
|
|
519
|
+
type: MSG_CHAT_RESPONSE,
|
|
520
|
+
id: requestId,
|
|
521
|
+
body: "No response was generated.",
|
|
522
|
+
done: true
|
|
523
|
+
});
|
|
524
|
+
});
|
|
525
|
+
} catch (error) {
|
|
526
|
+
this._broadcast({
|
|
527
|
+
type: MSG_CHAT_RESPONSE,
|
|
528
|
+
id: requestId,
|
|
529
|
+
body: error instanceof Error ? error.message : "Error",
|
|
530
|
+
done: true,
|
|
531
|
+
error: true
|
|
532
|
+
});
|
|
533
|
+
} finally {
|
|
534
|
+
this._abortControllers.delete(requestId);
|
|
535
|
+
}
|
|
536
|
+
}
|
|
537
|
+
/**
|
|
538
|
+
* Handle CF_AGENT_CHAT_CLEAR: abort streams, clear current session messages.
|
|
539
|
+
* @internal
|
|
540
|
+
*/
|
|
541
|
+
_handleClear() {
|
|
542
|
+
for (const controller of this._abortControllers.values()) controller.abort();
|
|
543
|
+
this._abortControllers.clear();
|
|
544
|
+
if (this._sessionId) this.sessions.clearMessages(this._sessionId);
|
|
545
|
+
this.messages = [];
|
|
546
|
+
this._persistedMessageCache.clear();
|
|
547
|
+
this._clearGeneration++;
|
|
548
|
+
this._broadcast({ type: MSG_CHAT_CLEAR });
|
|
549
|
+
}
|
|
550
|
+
/**
|
|
551
|
+
* Handle CF_AGENT_CHAT_REQUEST_CANCEL: abort a specific request.
|
|
552
|
+
* @internal
|
|
553
|
+
*/
|
|
554
|
+
_handleCancel(requestId) {
|
|
555
|
+
const controller = this._abortControllers.get(requestId);
|
|
556
|
+
if (controller) controller.abort();
|
|
557
|
+
}
|
|
558
|
+
/**
|
|
559
|
+
* Iterate a StreamableResult, broadcast chunks to clients,
|
|
560
|
+
* build a UIMessage, and persist it to the session.
|
|
561
|
+
* @internal
|
|
562
|
+
*/
|
|
563
|
+
async _streamResult(requestId, result, abortSignal) {
|
|
564
|
+
const clearGen = this._clearGeneration;
|
|
565
|
+
const message = {
|
|
566
|
+
id: crypto.randomUUID(),
|
|
567
|
+
role: "assistant",
|
|
568
|
+
parts: []
|
|
569
|
+
};
|
|
570
|
+
let doneSent = false;
|
|
571
|
+
try {
|
|
572
|
+
for await (const chunk of result.toUIMessageStream()) {
|
|
573
|
+
if (abortSignal?.aborted) break;
|
|
574
|
+
const data = chunk;
|
|
575
|
+
if (!applyChunkToParts(message.parts, data)) switch (data.type) {
|
|
576
|
+
case "start":
|
|
577
|
+
if (data.messageId != null) message.id = data.messageId;
|
|
578
|
+
if (data.messageMetadata != null) message.metadata = message.metadata ? {
|
|
579
|
+
...message.metadata,
|
|
580
|
+
...data.messageMetadata
|
|
581
|
+
} : data.messageMetadata;
|
|
582
|
+
break;
|
|
583
|
+
case "finish":
|
|
584
|
+
case "message-metadata":
|
|
585
|
+
if (data.messageMetadata != null) message.metadata = message.metadata ? {
|
|
586
|
+
...message.metadata,
|
|
587
|
+
...data.messageMetadata
|
|
588
|
+
} : data.messageMetadata;
|
|
589
|
+
break;
|
|
590
|
+
case "error":
|
|
591
|
+
this._broadcast({
|
|
592
|
+
type: MSG_CHAT_RESPONSE,
|
|
593
|
+
id: requestId,
|
|
594
|
+
body: data.errorText ?? JSON.stringify(data),
|
|
595
|
+
done: false,
|
|
596
|
+
error: true
|
|
597
|
+
});
|
|
598
|
+
continue;
|
|
599
|
+
}
|
|
600
|
+
this._broadcast({
|
|
601
|
+
type: MSG_CHAT_RESPONSE,
|
|
602
|
+
id: requestId,
|
|
603
|
+
body: JSON.stringify(chunk),
|
|
604
|
+
done: false
|
|
605
|
+
});
|
|
606
|
+
}
|
|
607
|
+
this._broadcast({
|
|
608
|
+
type: MSG_CHAT_RESPONSE,
|
|
609
|
+
id: requestId,
|
|
610
|
+
body: "",
|
|
611
|
+
done: true
|
|
612
|
+
});
|
|
613
|
+
doneSent = true;
|
|
614
|
+
} catch (error) {
|
|
615
|
+
if (!doneSent) {
|
|
616
|
+
this._broadcast({
|
|
617
|
+
type: MSG_CHAT_RESPONSE,
|
|
618
|
+
id: requestId,
|
|
619
|
+
body: error instanceof Error ? error.message : "Stream error",
|
|
620
|
+
done: true,
|
|
621
|
+
error: true
|
|
622
|
+
});
|
|
623
|
+
doneSent = true;
|
|
624
|
+
}
|
|
625
|
+
} finally {
|
|
626
|
+
if (!doneSent) this._broadcast({
|
|
627
|
+
type: MSG_CHAT_RESPONSE,
|
|
628
|
+
id: requestId,
|
|
629
|
+
body: "",
|
|
630
|
+
done: true
|
|
631
|
+
});
|
|
632
|
+
}
|
|
633
|
+
if (message.parts.length > 0 && this._sessionId && this._clearGeneration === clearGen) try {
|
|
634
|
+
this._persistAssistantMessage(message);
|
|
635
|
+
this._broadcastMessages();
|
|
636
|
+
} catch (e) {
|
|
637
|
+
console.error("Failed to persist assistant message:", e);
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
/**
|
|
641
|
+
* Persist an assistant message with sanitization, size enforcement,
|
|
642
|
+
* and incremental persistence.
|
|
643
|
+
* @internal
|
|
644
|
+
*/
|
|
645
|
+
_persistAssistantMessage(msg) {
|
|
646
|
+
if (!this._sessionId) return;
|
|
647
|
+
const safe = enforceRowSizeLimit(sanitizeMessage(msg));
|
|
648
|
+
const json = JSON.stringify(safe);
|
|
649
|
+
if (this._persistedMessageCache.get(safe.id) !== json) {
|
|
650
|
+
this.sessions.upsert(this._sessionId, safe);
|
|
651
|
+
this._persistedMessageCache.set(safe.id, json);
|
|
652
|
+
}
|
|
653
|
+
if (this.maxPersistedMessages != null) this._enforceMaxPersistedMessages();
|
|
654
|
+
this.messages = this.sessions.getHistory(this._sessionId);
|
|
655
|
+
}
|
|
656
|
+
/**
|
|
657
|
+
* Rebuild the persistence cache from current messages.
|
|
658
|
+
* Called on startup to enable incremental persistence.
|
|
659
|
+
* @internal
|
|
660
|
+
*/
|
|
661
|
+
_rebuildPersistenceCache() {
|
|
662
|
+
this._persistedMessageCache.clear();
|
|
663
|
+
for (const msg of this.messages) this._persistedMessageCache.set(msg.id, JSON.stringify(msg));
|
|
664
|
+
}
|
|
665
|
+
/**
|
|
666
|
+
* Delete oldest messages on the current branch when count exceeds
|
|
667
|
+
* maxPersistedMessages. Uses path-based count (not total across all
|
|
668
|
+
* branches) and individual deletes to preserve branch structure.
|
|
669
|
+
* @internal
|
|
670
|
+
*/
|
|
671
|
+
_enforceMaxPersistedMessages() {
|
|
672
|
+
if (this.maxPersistedMessages == null || !this._sessionId) return;
|
|
673
|
+
const history = this.sessions.getHistory(this._sessionId);
|
|
674
|
+
if (history.length <= this.maxPersistedMessages) return;
|
|
675
|
+
const excess = history.length - this.maxPersistedMessages;
|
|
676
|
+
const toRemove = history.slice(0, excess);
|
|
677
|
+
this.sessions.deleteMessages(toRemove.map((m) => m.id));
|
|
678
|
+
for (const msg of toRemove) this._persistedMessageCache.delete(msg.id);
|
|
679
|
+
}
|
|
680
|
+
/**
|
|
681
|
+
* Broadcast a JSON message to all connected clients.
|
|
682
|
+
* @internal
|
|
683
|
+
*/
|
|
684
|
+
_broadcast(message, exclude) {
|
|
685
|
+
this.broadcast(JSON.stringify(message), exclude);
|
|
686
|
+
}
|
|
687
|
+
/**
|
|
688
|
+
* Broadcast the current message list to all connected clients.
|
|
689
|
+
* @internal
|
|
690
|
+
*/
|
|
691
|
+
_broadcastMessages(exclude) {
|
|
692
|
+
this._broadcast({
|
|
693
|
+
type: MSG_CHAT_MESSAGES,
|
|
694
|
+
messages: this.messages
|
|
695
|
+
}, exclude);
|
|
696
|
+
}
|
|
697
|
+
};
|
|
698
|
+
//#endregion
|
|
699
|
+
export { Think };
|
|
700
|
+
|
|
701
|
+
//# sourceMappingURL=think.js.map
|