agents 0.8.7 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat/index.d.ts +603 -0
- package/dist/chat/index.js +1285 -0
- package/dist/chat/index.js.map +1 -0
- package/dist/{client-BwgM3cRz.js → client-QBjFV5de.js} +161 -49
- package/dist/client-QBjFV5de.js.map +1 -0
- package/dist/client.d.ts +2 -2
- package/dist/{compaction-helpers-BFTBIzpK.js → compaction-helpers-BPE1_ziA.js} +1 -1
- package/dist/{compaction-helpers-BFTBIzpK.js.map → compaction-helpers-BPE1_ziA.js.map} +1 -1
- package/dist/{compaction-helpers-DkJreaDR.d.ts → compaction-helpers-CHNQeyRm.d.ts} +1 -1
- package/dist/{do-oauth-client-provider-C2jurFjW.d.ts → do-oauth-client-provider-31gqR33H.d.ts} +1 -1
- package/dist/{email-DwPlM0bQ.d.ts → email-Cql45SKP.d.ts} +1 -1
- package/dist/email.d.ts +2 -2
- package/dist/experimental/memory/session/index.d.ts +298 -73
- package/dist/experimental/memory/session/index.js +754 -66
- package/dist/experimental/memory/session/index.js.map +1 -1
- package/dist/experimental/memory/utils/index.d.ts +1 -1
- package/dist/experimental/memory/utils/index.js +1 -1
- package/dist/{index-C-6EMK-E.d.ts → index-BPkkIqMn.d.ts} +209 -76
- package/dist/{index-Ua2Nfvbm.d.ts → index-DDSX-g7W.d.ts} +11 -1
- package/dist/index.d.ts +30 -26
- package/dist/index.js +2 -3049
- package/dist/{internal_context-DT8RxmAN.d.ts → internal_context-DuQZFvWI.d.ts} +1 -1
- package/dist/internal_context.d.ts +1 -1
- package/dist/mcp/client.d.ts +2 -2
- package/dist/mcp/client.js +1 -1
- package/dist/mcp/do-oauth-client-provider.d.ts +1 -1
- package/dist/mcp/index.d.ts +1 -1
- package/dist/mcp/index.js +2 -2
- package/dist/observability/index.d.ts +1 -1
- package/dist/react.d.ts +3 -1
- package/dist/react.js +3 -0
- package/dist/react.js.map +1 -1
- package/dist/{retries-DXMQGhG3.d.ts → retries-B_CN5KM9.d.ts} +1 -1
- package/dist/retries.d.ts +1 -1
- package/dist/{serializable-8Jt1B04R.d.ts → serializable-DGdO8CDh.d.ts} +1 -1
- package/dist/serializable.d.ts +1 -1
- package/dist/src-B8NZxxsO.js +3217 -0
- package/dist/src-B8NZxxsO.js.map +1 -0
- package/dist/{types-C-m0II8i.d.ts → types-B9A8AU7B.d.ts} +1 -1
- package/dist/types.d.ts +1 -1
- package/dist/{workflow-types-CZNXKj_D.d.ts → workflow-types-XmOkuI7A.d.ts} +1 -1
- package/dist/workflow-types.d.ts +1 -1
- package/dist/workflows.d.ts +2 -2
- package/dist/workflows.js +1 -1
- package/package.json +20 -18
- package/dist/client-BwgM3cRz.js.map +0 -1
- package/dist/experimental/forever.d.ts +0 -64
- package/dist/experimental/forever.js +0 -338
- package/dist/experimental/forever.js.map +0 -1
- package/dist/index.js.map +0 -1
|
@@ -0,0 +1,1285 @@
|
|
|
1
|
+
import { nanoid } from "nanoid";
|
|
2
|
+
import { jsonSchema, tool } from "ai";
|
|
3
|
+
//#region src/chat/message-builder.ts
|
|
4
|
+
/**
|
|
5
|
+
* Applies a stream chunk to a mutable parts array, building up the message
|
|
6
|
+
* incrementally. Returns true if the chunk was handled, false if it was
|
|
7
|
+
* an unrecognized type (caller may handle it with additional logic).
|
|
8
|
+
*
|
|
9
|
+
* Handles all common chunk types that both server and client need:
|
|
10
|
+
* - text-start / text-delta / text-end
|
|
11
|
+
* - reasoning-start / reasoning-delta / reasoning-end
|
|
12
|
+
* - file
|
|
13
|
+
* - source-url / source-document
|
|
14
|
+
* - tool-input-start / tool-input-delta / tool-input-available / tool-input-error
|
|
15
|
+
* - tool-output-available / tool-output-error
|
|
16
|
+
* - step-start (aliased from start-step)
|
|
17
|
+
* - data-* (developer-defined typed JSON blobs)
|
|
18
|
+
*
|
|
19
|
+
* @param parts - The mutable parts array to update
|
|
20
|
+
* @param chunk - The parsed stream chunk data
|
|
21
|
+
* @returns true if handled, false if the chunk type is not recognized
|
|
22
|
+
*/
|
|
23
|
+
function applyChunkToParts(parts, chunk) {
|
|
24
|
+
switch (chunk.type) {
|
|
25
|
+
case "text-start":
|
|
26
|
+
parts.push({
|
|
27
|
+
type: "text",
|
|
28
|
+
text: "",
|
|
29
|
+
state: "streaming"
|
|
30
|
+
});
|
|
31
|
+
return true;
|
|
32
|
+
case "text-delta": {
|
|
33
|
+
const lastTextPart = findLastPartByType(parts, "text");
|
|
34
|
+
if (lastTextPart && lastTextPart.type === "text") lastTextPart.text += chunk.delta ?? "";
|
|
35
|
+
else parts.push({
|
|
36
|
+
type: "text",
|
|
37
|
+
text: chunk.delta ?? "",
|
|
38
|
+
state: "streaming"
|
|
39
|
+
});
|
|
40
|
+
return true;
|
|
41
|
+
}
|
|
42
|
+
case "text-end": {
|
|
43
|
+
const lastTextPart = findLastPartByType(parts, "text");
|
|
44
|
+
if (lastTextPart && "state" in lastTextPart) lastTextPart.state = "done";
|
|
45
|
+
return true;
|
|
46
|
+
}
|
|
47
|
+
case "reasoning-start":
|
|
48
|
+
parts.push({
|
|
49
|
+
type: "reasoning",
|
|
50
|
+
text: "",
|
|
51
|
+
state: "streaming"
|
|
52
|
+
});
|
|
53
|
+
return true;
|
|
54
|
+
case "reasoning-delta": {
|
|
55
|
+
const lastReasoningPart = findLastPartByType(parts, "reasoning");
|
|
56
|
+
if (lastReasoningPart && lastReasoningPart.type === "reasoning") lastReasoningPart.text += chunk.delta ?? "";
|
|
57
|
+
else parts.push({
|
|
58
|
+
type: "reasoning",
|
|
59
|
+
text: chunk.delta ?? "",
|
|
60
|
+
state: "streaming"
|
|
61
|
+
});
|
|
62
|
+
return true;
|
|
63
|
+
}
|
|
64
|
+
case "reasoning-end": {
|
|
65
|
+
const lastReasoningPart = findLastPartByType(parts, "reasoning");
|
|
66
|
+
if (lastReasoningPart && "state" in lastReasoningPart) lastReasoningPart.state = "done";
|
|
67
|
+
return true;
|
|
68
|
+
}
|
|
69
|
+
case "file":
|
|
70
|
+
parts.push({
|
|
71
|
+
type: "file",
|
|
72
|
+
mediaType: chunk.mediaType,
|
|
73
|
+
url: chunk.url
|
|
74
|
+
});
|
|
75
|
+
return true;
|
|
76
|
+
case "source-url":
|
|
77
|
+
parts.push({
|
|
78
|
+
type: "source-url",
|
|
79
|
+
sourceId: chunk.sourceId,
|
|
80
|
+
url: chunk.url,
|
|
81
|
+
title: chunk.title,
|
|
82
|
+
providerMetadata: chunk.providerMetadata
|
|
83
|
+
});
|
|
84
|
+
return true;
|
|
85
|
+
case "source-document":
|
|
86
|
+
parts.push({
|
|
87
|
+
type: "source-document",
|
|
88
|
+
sourceId: chunk.sourceId,
|
|
89
|
+
mediaType: chunk.mediaType,
|
|
90
|
+
title: chunk.title,
|
|
91
|
+
filename: chunk.filename,
|
|
92
|
+
providerMetadata: chunk.providerMetadata
|
|
93
|
+
});
|
|
94
|
+
return true;
|
|
95
|
+
case "tool-input-start":
|
|
96
|
+
parts.push({
|
|
97
|
+
type: `tool-${chunk.toolName}`,
|
|
98
|
+
toolCallId: chunk.toolCallId,
|
|
99
|
+
toolName: chunk.toolName,
|
|
100
|
+
state: "input-streaming",
|
|
101
|
+
input: void 0,
|
|
102
|
+
...chunk.providerExecuted != null ? { providerExecuted: chunk.providerExecuted } : {},
|
|
103
|
+
...chunk.providerMetadata != null ? { callProviderMetadata: chunk.providerMetadata } : {},
|
|
104
|
+
...chunk.title != null ? { title: chunk.title } : {}
|
|
105
|
+
});
|
|
106
|
+
return true;
|
|
107
|
+
case "tool-input-delta": {
|
|
108
|
+
const toolPart = findToolPartByCallId(parts, chunk.toolCallId);
|
|
109
|
+
if (toolPart) toolPart.input = chunk.input;
|
|
110
|
+
return true;
|
|
111
|
+
}
|
|
112
|
+
case "tool-input-available": {
|
|
113
|
+
const existing = findToolPartByCallId(parts, chunk.toolCallId);
|
|
114
|
+
if (existing) {
|
|
115
|
+
const p = existing;
|
|
116
|
+
p.state = "input-available";
|
|
117
|
+
p.input = chunk.input;
|
|
118
|
+
if (chunk.providerExecuted != null) p.providerExecuted = chunk.providerExecuted;
|
|
119
|
+
if (chunk.providerMetadata != null) p.callProviderMetadata = chunk.providerMetadata;
|
|
120
|
+
if (chunk.title != null) p.title = chunk.title;
|
|
121
|
+
} else parts.push({
|
|
122
|
+
type: `tool-${chunk.toolName}`,
|
|
123
|
+
toolCallId: chunk.toolCallId,
|
|
124
|
+
toolName: chunk.toolName,
|
|
125
|
+
state: "input-available",
|
|
126
|
+
input: chunk.input,
|
|
127
|
+
...chunk.providerExecuted != null ? { providerExecuted: chunk.providerExecuted } : {},
|
|
128
|
+
...chunk.providerMetadata != null ? { callProviderMetadata: chunk.providerMetadata } : {},
|
|
129
|
+
...chunk.title != null ? { title: chunk.title } : {}
|
|
130
|
+
});
|
|
131
|
+
return true;
|
|
132
|
+
}
|
|
133
|
+
case "tool-input-error": {
|
|
134
|
+
const existing = findToolPartByCallId(parts, chunk.toolCallId);
|
|
135
|
+
if (existing) {
|
|
136
|
+
const p = existing;
|
|
137
|
+
p.state = "output-error";
|
|
138
|
+
p.errorText = chunk.errorText;
|
|
139
|
+
p.input = chunk.input;
|
|
140
|
+
if (chunk.providerExecuted != null) p.providerExecuted = chunk.providerExecuted;
|
|
141
|
+
if (chunk.providerMetadata != null) p.callProviderMetadata = chunk.providerMetadata;
|
|
142
|
+
} else parts.push({
|
|
143
|
+
type: `tool-${chunk.toolName}`,
|
|
144
|
+
toolCallId: chunk.toolCallId,
|
|
145
|
+
toolName: chunk.toolName,
|
|
146
|
+
state: "output-error",
|
|
147
|
+
input: chunk.input,
|
|
148
|
+
errorText: chunk.errorText,
|
|
149
|
+
...chunk.providerExecuted != null ? { providerExecuted: chunk.providerExecuted } : {},
|
|
150
|
+
...chunk.providerMetadata != null ? { callProviderMetadata: chunk.providerMetadata } : {}
|
|
151
|
+
});
|
|
152
|
+
return true;
|
|
153
|
+
}
|
|
154
|
+
case "tool-approval-request": {
|
|
155
|
+
const toolPart = findToolPartByCallId(parts, chunk.toolCallId);
|
|
156
|
+
if (toolPart) {
|
|
157
|
+
const p = toolPart;
|
|
158
|
+
p.state = "approval-requested";
|
|
159
|
+
p.approval = { id: chunk.approvalId };
|
|
160
|
+
}
|
|
161
|
+
return true;
|
|
162
|
+
}
|
|
163
|
+
case "tool-output-denied": {
|
|
164
|
+
const toolPart = findToolPartByCallId(parts, chunk.toolCallId);
|
|
165
|
+
if (toolPart) {
|
|
166
|
+
const p = toolPart;
|
|
167
|
+
p.state = "output-denied";
|
|
168
|
+
}
|
|
169
|
+
return true;
|
|
170
|
+
}
|
|
171
|
+
case "tool-output-available": {
|
|
172
|
+
const toolPart = findToolPartByCallId(parts, chunk.toolCallId);
|
|
173
|
+
if (toolPart) {
|
|
174
|
+
const p = toolPart;
|
|
175
|
+
p.state = "output-available";
|
|
176
|
+
p.output = chunk.output;
|
|
177
|
+
if (chunk.preliminary !== void 0) p.preliminary = chunk.preliminary;
|
|
178
|
+
}
|
|
179
|
+
return true;
|
|
180
|
+
}
|
|
181
|
+
case "tool-output-error": {
|
|
182
|
+
const toolPart = findToolPartByCallId(parts, chunk.toolCallId);
|
|
183
|
+
if (toolPart) {
|
|
184
|
+
const p = toolPart;
|
|
185
|
+
p.state = "output-error";
|
|
186
|
+
p.errorText = chunk.errorText;
|
|
187
|
+
}
|
|
188
|
+
return true;
|
|
189
|
+
}
|
|
190
|
+
case "step-start":
|
|
191
|
+
case "start-step":
|
|
192
|
+
parts.push({ type: "step-start" });
|
|
193
|
+
return true;
|
|
194
|
+
default:
|
|
195
|
+
if (chunk.type.startsWith("data-")) {
|
|
196
|
+
if (chunk.transient) return true;
|
|
197
|
+
if (chunk.id != null) {
|
|
198
|
+
const existing = findDataPartByTypeAndId(parts, chunk.type, chunk.id);
|
|
199
|
+
if (existing) {
|
|
200
|
+
existing.data = chunk.data;
|
|
201
|
+
return true;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
parts.push({
|
|
205
|
+
type: chunk.type,
|
|
206
|
+
...chunk.id != null && { id: chunk.id },
|
|
207
|
+
data: chunk.data
|
|
208
|
+
});
|
|
209
|
+
return true;
|
|
210
|
+
}
|
|
211
|
+
return false;
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* Finds the last part in the array matching the given type.
|
|
216
|
+
* Searches from the end for efficiency (the part we want is usually recent).
|
|
217
|
+
*/
|
|
218
|
+
function findLastPartByType(parts, type) {
|
|
219
|
+
for (let i = parts.length - 1; i >= 0; i--) if (parts[i].type === type) return parts[i];
|
|
220
|
+
}
|
|
221
|
+
/**
|
|
222
|
+
* Finds a tool part by its toolCallId.
|
|
223
|
+
* Searches from the end since the tool part is usually recent.
|
|
224
|
+
*/
|
|
225
|
+
function findToolPartByCallId(parts, toolCallId) {
|
|
226
|
+
if (!toolCallId) return void 0;
|
|
227
|
+
for (let i = parts.length - 1; i >= 0; i--) {
|
|
228
|
+
const p = parts[i];
|
|
229
|
+
if ("toolCallId" in p && p.toolCallId === toolCallId) return p;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
/**
|
|
233
|
+
* Finds a data part by its type and id for reconciliation.
|
|
234
|
+
* Data parts use type+id as a composite key so when the same combination
|
|
235
|
+
* is seen again, the existing part's data is updated in-place.
|
|
236
|
+
*/
|
|
237
|
+
function findDataPartByTypeAndId(parts, type, id) {
|
|
238
|
+
for (let i = parts.length - 1; i >= 0; i--) {
|
|
239
|
+
const p = parts[i];
|
|
240
|
+
if (p.type === type && "id" in p && p.id === id) return p;
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
//#endregion
|
|
244
|
+
//#region src/chat/sanitize.ts
|
|
245
|
+
const textEncoder$1 = new TextEncoder();
|
|
246
|
+
/** Maximum serialized message size before compaction (bytes). 1.8MB with headroom below SQLite's 2MB limit. */
|
|
247
|
+
const ROW_MAX_BYTES = 18e5;
|
|
248
|
+
/** Measure UTF-8 byte length of a string. */
|
|
249
|
+
function byteLength(s) {
|
|
250
|
+
return textEncoder$1.encode(s).byteLength;
|
|
251
|
+
}
|
|
252
|
+
/**
|
|
253
|
+
* Sanitize a message for persistence by removing ephemeral provider-specific
|
|
254
|
+
* data that should not be stored or sent back in subsequent requests.
|
|
255
|
+
*
|
|
256
|
+
* 1. Strips OpenAI ephemeral fields (itemId, reasoningEncryptedContent)
|
|
257
|
+
* 2. Filters truly empty reasoning parts (no text, no remaining providerMetadata)
|
|
258
|
+
*/
|
|
259
|
+
function sanitizeMessage(message) {
|
|
260
|
+
const sanitizedParts = message.parts.map((part) => {
|
|
261
|
+
let sanitizedPart = part;
|
|
262
|
+
if ("providerMetadata" in sanitizedPart && sanitizedPart.providerMetadata && typeof sanitizedPart.providerMetadata === "object" && "openai" in sanitizedPart.providerMetadata) sanitizedPart = stripOpenAIMetadata(sanitizedPart, "providerMetadata");
|
|
263
|
+
if ("callProviderMetadata" in sanitizedPart && sanitizedPart.callProviderMetadata && typeof sanitizedPart.callProviderMetadata === "object" && "openai" in sanitizedPart.callProviderMetadata) sanitizedPart = stripOpenAIMetadata(sanitizedPart, "callProviderMetadata");
|
|
264
|
+
return sanitizedPart;
|
|
265
|
+
}).filter((part) => {
|
|
266
|
+
if (part.type === "reasoning") {
|
|
267
|
+
const reasoningPart = part;
|
|
268
|
+
if (!reasoningPart.text || reasoningPart.text.trim() === "") {
|
|
269
|
+
if ("providerMetadata" in reasoningPart && reasoningPart.providerMetadata && typeof reasoningPart.providerMetadata === "object" && Object.keys(reasoningPart.providerMetadata).length > 0) return true;
|
|
270
|
+
return false;
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
return true;
|
|
274
|
+
});
|
|
275
|
+
return {
|
|
276
|
+
...message,
|
|
277
|
+
parts: sanitizedParts
|
|
278
|
+
};
|
|
279
|
+
}
|
|
280
|
+
function stripOpenAIMetadata(part, metadataKey) {
|
|
281
|
+
const metadata = part[metadataKey];
|
|
282
|
+
if (!metadata?.openai) return part;
|
|
283
|
+
const { itemId: _itemId, reasoningEncryptedContent: _rec, ...restOpenai } = metadata.openai;
|
|
284
|
+
const hasOtherOpenaiFields = Object.keys(restOpenai).length > 0;
|
|
285
|
+
const { openai: _openai, ...restMetadata } = metadata;
|
|
286
|
+
let newMetadata;
|
|
287
|
+
if (hasOtherOpenaiFields) newMetadata = {
|
|
288
|
+
...restMetadata,
|
|
289
|
+
openai: restOpenai
|
|
290
|
+
};
|
|
291
|
+
else if (Object.keys(restMetadata).length > 0) newMetadata = restMetadata;
|
|
292
|
+
const { [metadataKey]: _oldMeta, ...restPart } = part;
|
|
293
|
+
if (newMetadata) return {
|
|
294
|
+
...restPart,
|
|
295
|
+
[metadataKey]: newMetadata
|
|
296
|
+
};
|
|
297
|
+
return restPart;
|
|
298
|
+
}
|
|
299
|
+
/**
|
|
300
|
+
* Enforce SQLite row size limits by compacting tool outputs and text parts
|
|
301
|
+
* when a serialized message exceeds the safety threshold (1.8MB).
|
|
302
|
+
*
|
|
303
|
+
* Compaction strategy:
|
|
304
|
+
* 1. Compact tool outputs over 1KB (replace with summary)
|
|
305
|
+
* 2. If still too big, truncate text parts from oldest to newest
|
|
306
|
+
*/
|
|
307
|
+
function enforceRowSizeLimit(message) {
|
|
308
|
+
let json = JSON.stringify(message);
|
|
309
|
+
let size = byteLength(json);
|
|
310
|
+
if (size <= 18e5) return message;
|
|
311
|
+
if (message.role !== "assistant") return truncateTextParts(message);
|
|
312
|
+
const compactedParts = message.parts.map((part) => {
|
|
313
|
+
if ("output" in part && "toolCallId" in part && "state" in part && part.state === "output-available") {
|
|
314
|
+
const outputJson = JSON.stringify(part.output);
|
|
315
|
+
if (outputJson.length > 1e3) return {
|
|
316
|
+
...part,
|
|
317
|
+
output: `This tool output was too large to persist in storage (${outputJson.length} bytes). If the user asks about this data, suggest re-running the tool. Preview: ${outputJson.slice(0, 500)}...`
|
|
318
|
+
};
|
|
319
|
+
}
|
|
320
|
+
return part;
|
|
321
|
+
});
|
|
322
|
+
let result = {
|
|
323
|
+
...message,
|
|
324
|
+
parts: compactedParts
|
|
325
|
+
};
|
|
326
|
+
json = JSON.stringify(result);
|
|
327
|
+
size = byteLength(json);
|
|
328
|
+
if (size <= 18e5) return result;
|
|
329
|
+
return truncateTextParts(result);
|
|
330
|
+
}
|
|
331
|
+
function truncateTextParts(message) {
|
|
332
|
+
const parts = [...message.parts];
|
|
333
|
+
for (let i = 0; i < parts.length; i++) {
|
|
334
|
+
const part = parts[i];
|
|
335
|
+
if (part.type === "text" && "text" in part) {
|
|
336
|
+
const text = part.text;
|
|
337
|
+
if (text.length > 1e3) {
|
|
338
|
+
parts[i] = {
|
|
339
|
+
...part,
|
|
340
|
+
text: `[Text truncated for storage (${text.length} chars). First 500 chars: ${text.slice(0, 500)}...]`
|
|
341
|
+
};
|
|
342
|
+
const candidate = {
|
|
343
|
+
...message,
|
|
344
|
+
parts
|
|
345
|
+
};
|
|
346
|
+
if (byteLength(JSON.stringify(candidate)) <= 18e5) break;
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
return {
|
|
351
|
+
...message,
|
|
352
|
+
parts
|
|
353
|
+
};
|
|
354
|
+
}
|
|
355
|
+
//#endregion
|
|
356
|
+
//#region src/chat/stream-accumulator.ts
|
|
357
|
+
function asMetadata(value) {
|
|
358
|
+
if (value != null && typeof value === "object" && !Array.isArray(value)) return value;
|
|
359
|
+
}
|
|
360
|
+
var StreamAccumulator = class {
|
|
361
|
+
constructor(options) {
|
|
362
|
+
this.messageId = options.messageId;
|
|
363
|
+
this._isContinuation = options.continuation ?? false;
|
|
364
|
+
this.parts = options.existingParts ? [...options.existingParts] : [];
|
|
365
|
+
this.metadata = options.existingMetadata ? { ...options.existingMetadata } : void 0;
|
|
366
|
+
}
|
|
367
|
+
applyChunk(chunk) {
|
|
368
|
+
const handled = applyChunkToParts(this.parts, chunk);
|
|
369
|
+
if (chunk.type === "tool-approval-request" && chunk.toolCallId) return {
|
|
370
|
+
handled,
|
|
371
|
+
action: {
|
|
372
|
+
type: "tool-approval-request",
|
|
373
|
+
toolCallId: chunk.toolCallId
|
|
374
|
+
}
|
|
375
|
+
};
|
|
376
|
+
if ((chunk.type === "tool-output-available" || chunk.type === "tool-output-error") && chunk.toolCallId) {
|
|
377
|
+
if (!this.parts.some((p) => "toolCallId" in p && p.toolCallId === chunk.toolCallId)) return {
|
|
378
|
+
handled,
|
|
379
|
+
action: {
|
|
380
|
+
type: "cross-message-tool-update",
|
|
381
|
+
updateType: chunk.type === "tool-output-available" ? "output-available" : "output-error",
|
|
382
|
+
toolCallId: chunk.toolCallId,
|
|
383
|
+
output: chunk.output,
|
|
384
|
+
errorText: chunk.errorText,
|
|
385
|
+
preliminary: chunk.preliminary
|
|
386
|
+
}
|
|
387
|
+
};
|
|
388
|
+
}
|
|
389
|
+
if (!handled) switch (chunk.type) {
|
|
390
|
+
case "start": {
|
|
391
|
+
if (chunk.messageId != null && !this._isContinuation) this.messageId = chunk.messageId;
|
|
392
|
+
const startMeta = asMetadata(chunk.messageMetadata);
|
|
393
|
+
if (startMeta) this.metadata = this.metadata ? {
|
|
394
|
+
...this.metadata,
|
|
395
|
+
...startMeta
|
|
396
|
+
} : { ...startMeta };
|
|
397
|
+
return {
|
|
398
|
+
handled: true,
|
|
399
|
+
action: {
|
|
400
|
+
type: "start",
|
|
401
|
+
messageId: chunk.messageId,
|
|
402
|
+
metadata: startMeta
|
|
403
|
+
}
|
|
404
|
+
};
|
|
405
|
+
}
|
|
406
|
+
case "finish": {
|
|
407
|
+
const finishMeta = asMetadata(chunk.messageMetadata);
|
|
408
|
+
if (finishMeta) this.metadata = this.metadata ? {
|
|
409
|
+
...this.metadata,
|
|
410
|
+
...finishMeta
|
|
411
|
+
} : { ...finishMeta };
|
|
412
|
+
return {
|
|
413
|
+
handled: true,
|
|
414
|
+
action: {
|
|
415
|
+
type: "finish",
|
|
416
|
+
finishReason: "finishReason" in chunk ? chunk.finishReason : void 0,
|
|
417
|
+
metadata: finishMeta
|
|
418
|
+
}
|
|
419
|
+
};
|
|
420
|
+
}
|
|
421
|
+
case "message-metadata": {
|
|
422
|
+
const msgMeta = asMetadata(chunk.messageMetadata);
|
|
423
|
+
if (msgMeta) this.metadata = this.metadata ? {
|
|
424
|
+
...this.metadata,
|
|
425
|
+
...msgMeta
|
|
426
|
+
} : { ...msgMeta };
|
|
427
|
+
return {
|
|
428
|
+
handled: true,
|
|
429
|
+
action: {
|
|
430
|
+
type: "message-metadata",
|
|
431
|
+
metadata: msgMeta ?? {}
|
|
432
|
+
}
|
|
433
|
+
};
|
|
434
|
+
}
|
|
435
|
+
case "finish-step": return { handled: true };
|
|
436
|
+
case "error": return {
|
|
437
|
+
handled: true,
|
|
438
|
+
action: {
|
|
439
|
+
type: "error",
|
|
440
|
+
error: chunk.errorText ?? JSON.stringify(chunk)
|
|
441
|
+
}
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
return { handled };
|
|
445
|
+
}
|
|
446
|
+
/** Snapshot the current state as a UIMessage. */
|
|
447
|
+
toMessage() {
|
|
448
|
+
return {
|
|
449
|
+
id: this.messageId,
|
|
450
|
+
role: "assistant",
|
|
451
|
+
parts: [...this.parts],
|
|
452
|
+
...this.metadata != null && { metadata: this.metadata }
|
|
453
|
+
};
|
|
454
|
+
}
|
|
455
|
+
/**
|
|
456
|
+
* Merge this accumulator's message into an existing message array.
|
|
457
|
+
* Handles continuation (walk backward for last assistant), replacement
|
|
458
|
+
* (update existing by messageId), or append (new message).
|
|
459
|
+
*/
|
|
460
|
+
mergeInto(messages) {
|
|
461
|
+
let existingIdx = messages.findIndex((m) => m.id === this.messageId);
|
|
462
|
+
if (existingIdx < 0 && this._isContinuation) {
|
|
463
|
+
for (let i = messages.length - 1; i >= 0; i--) if (messages[i].role === "assistant") {
|
|
464
|
+
existingIdx = i;
|
|
465
|
+
break;
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
const partialMessage = {
|
|
469
|
+
id: existingIdx >= 0 ? messages[existingIdx].id : this.messageId,
|
|
470
|
+
role: "assistant",
|
|
471
|
+
parts: [...this.parts],
|
|
472
|
+
...this.metadata != null && { metadata: this.metadata }
|
|
473
|
+
};
|
|
474
|
+
if (existingIdx >= 0) {
|
|
475
|
+
const updated = [...messages];
|
|
476
|
+
updated[existingIdx] = partialMessage;
|
|
477
|
+
return updated;
|
|
478
|
+
}
|
|
479
|
+
return [...messages, partialMessage];
|
|
480
|
+
}
|
|
481
|
+
};
|
|
482
|
+
//#endregion
|
|
483
|
+
//#region src/chat/turn-queue.ts
|
|
484
|
+
var TurnQueue = class {
|
|
485
|
+
constructor() {
|
|
486
|
+
this._queue = Promise.resolve();
|
|
487
|
+
this._generation = 0;
|
|
488
|
+
this._activeRequestId = null;
|
|
489
|
+
this._countsByGeneration = /* @__PURE__ */ new Map();
|
|
490
|
+
}
|
|
491
|
+
get generation() {
|
|
492
|
+
return this._generation;
|
|
493
|
+
}
|
|
494
|
+
get activeRequestId() {
|
|
495
|
+
return this._activeRequestId;
|
|
496
|
+
}
|
|
497
|
+
get isActive() {
|
|
498
|
+
return this._activeRequestId !== null;
|
|
499
|
+
}
|
|
500
|
+
async enqueue(requestId, fn, options) {
|
|
501
|
+
const previousTurn = this._queue;
|
|
502
|
+
let releaseTurn;
|
|
503
|
+
const capturedGeneration = options?.generation ?? this._generation;
|
|
504
|
+
this._countsByGeneration.set(capturedGeneration, (this._countsByGeneration.get(capturedGeneration) ?? 0) + 1);
|
|
505
|
+
this._queue = new Promise((resolve) => {
|
|
506
|
+
releaseTurn = resolve;
|
|
507
|
+
});
|
|
508
|
+
await previousTurn;
|
|
509
|
+
if (this._generation !== capturedGeneration) {
|
|
510
|
+
this._decrementCount(capturedGeneration);
|
|
511
|
+
releaseTurn();
|
|
512
|
+
return { status: "stale" };
|
|
513
|
+
}
|
|
514
|
+
this._activeRequestId = requestId;
|
|
515
|
+
try {
|
|
516
|
+
return {
|
|
517
|
+
status: "completed",
|
|
518
|
+
value: await fn()
|
|
519
|
+
};
|
|
520
|
+
} finally {
|
|
521
|
+
this._activeRequestId = null;
|
|
522
|
+
this._decrementCount(capturedGeneration);
|
|
523
|
+
releaseTurn();
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
/**
|
|
527
|
+
* Advance the generation counter. All turns enqueued under older
|
|
528
|
+
* generations will be skipped when they reach the front of the queue.
|
|
529
|
+
*/
|
|
530
|
+
reset() {
|
|
531
|
+
this._generation++;
|
|
532
|
+
}
|
|
533
|
+
/**
|
|
534
|
+
* Wait until the queue is fully drained (no pending or active turns).
|
|
535
|
+
*/
|
|
536
|
+
async waitForIdle() {
|
|
537
|
+
let queue;
|
|
538
|
+
do {
|
|
539
|
+
queue = this._queue;
|
|
540
|
+
await queue;
|
|
541
|
+
} while (this._queue !== queue);
|
|
542
|
+
}
|
|
543
|
+
/**
|
|
544
|
+
* Number of active + queued turns for a given generation.
|
|
545
|
+
* Defaults to the current generation.
|
|
546
|
+
*/
|
|
547
|
+
queuedCount(generation) {
|
|
548
|
+
return this._countsByGeneration.get(generation ?? this._generation) ?? 0;
|
|
549
|
+
}
|
|
550
|
+
_decrementCount(generation) {
|
|
551
|
+
const count = (this._countsByGeneration.get(generation) ?? 1) - 1;
|
|
552
|
+
if (count <= 0) this._countsByGeneration.delete(generation);
|
|
553
|
+
else this._countsByGeneration.set(generation, count);
|
|
554
|
+
}
|
|
555
|
+
};
|
|
556
|
+
//#endregion
|
|
557
|
+
//#region src/chat/broadcast-state.ts
|
|
558
|
+
function transition(state, event) {
|
|
559
|
+
switch (event.type) {
|
|
560
|
+
case "clear": return {
|
|
561
|
+
state: { status: "idle" },
|
|
562
|
+
isStreaming: false
|
|
563
|
+
};
|
|
564
|
+
case "resume-fallback": {
|
|
565
|
+
const accumulator = new StreamAccumulator({ messageId: event.messageId });
|
|
566
|
+
return {
|
|
567
|
+
state: {
|
|
568
|
+
status: "observing",
|
|
569
|
+
streamId: event.streamId,
|
|
570
|
+
accumulator
|
|
571
|
+
},
|
|
572
|
+
isStreaming: true
|
|
573
|
+
};
|
|
574
|
+
}
|
|
575
|
+
case "response": {
|
|
576
|
+
let accumulator;
|
|
577
|
+
if (state.status === "idle" || state.streamId !== event.streamId) {
|
|
578
|
+
let messageId = event.messageId;
|
|
579
|
+
let existingParts;
|
|
580
|
+
let existingMetadata;
|
|
581
|
+
if (event.continuation && event.currentMessages) {
|
|
582
|
+
for (let i = event.currentMessages.length - 1; i >= 0; i--) if (event.currentMessages[i].role === "assistant") {
|
|
583
|
+
messageId = event.currentMessages[i].id;
|
|
584
|
+
existingParts = [...event.currentMessages[i].parts];
|
|
585
|
+
if (event.currentMessages[i].metadata != null) existingMetadata = { ...event.currentMessages[i].metadata };
|
|
586
|
+
break;
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
accumulator = new StreamAccumulator({
|
|
590
|
+
messageId,
|
|
591
|
+
continuation: event.continuation,
|
|
592
|
+
existingParts,
|
|
593
|
+
existingMetadata
|
|
594
|
+
});
|
|
595
|
+
} else accumulator = state.accumulator;
|
|
596
|
+
if (event.chunkData) accumulator.applyChunk(event.chunkData);
|
|
597
|
+
let messagesUpdate;
|
|
598
|
+
if (event.done) {
|
|
599
|
+
messagesUpdate = (prev) => accumulator.mergeInto(prev);
|
|
600
|
+
return {
|
|
601
|
+
state: { status: "idle" },
|
|
602
|
+
messagesUpdate,
|
|
603
|
+
isStreaming: false
|
|
604
|
+
};
|
|
605
|
+
}
|
|
606
|
+
if (event.chunkData && !event.replay) messagesUpdate = (prev) => accumulator.mergeInto(prev);
|
|
607
|
+
else if (event.replayComplete) messagesUpdate = (prev) => accumulator.mergeInto(prev);
|
|
608
|
+
return {
|
|
609
|
+
state: {
|
|
610
|
+
status: "observing",
|
|
611
|
+
streamId: event.streamId,
|
|
612
|
+
accumulator
|
|
613
|
+
},
|
|
614
|
+
messagesUpdate,
|
|
615
|
+
isStreaming: true
|
|
616
|
+
};
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
//#endregion
|
|
621
|
+
//#region src/chat/protocol.ts
|
|
622
|
+
/**
|
|
623
|
+
* Wire protocol message type constants for the cf_agent_chat_* protocol.
|
|
624
|
+
*
|
|
625
|
+
* These are the string values used on the wire between agent servers and
|
|
626
|
+
* clients. Both @cloudflare/ai-chat (via its MessageType enum) and
|
|
627
|
+
* @cloudflare/think use these values.
|
|
628
|
+
*/
|
|
629
|
+
const CHAT_MESSAGE_TYPES = {
|
|
630
|
+
CHAT_MESSAGES: "cf_agent_chat_messages",
|
|
631
|
+
USE_CHAT_REQUEST: "cf_agent_use_chat_request",
|
|
632
|
+
USE_CHAT_RESPONSE: "cf_agent_use_chat_response",
|
|
633
|
+
CHAT_CLEAR: "cf_agent_chat_clear",
|
|
634
|
+
CHAT_REQUEST_CANCEL: "cf_agent_chat_request_cancel",
|
|
635
|
+
STREAM_RESUMING: "cf_agent_stream_resuming",
|
|
636
|
+
STREAM_RESUME_ACK: "cf_agent_stream_resume_ack",
|
|
637
|
+
STREAM_RESUME_REQUEST: "cf_agent_stream_resume_request",
|
|
638
|
+
STREAM_RESUME_NONE: "cf_agent_stream_resume_none",
|
|
639
|
+
TOOL_RESULT: "cf_agent_tool_result",
|
|
640
|
+
TOOL_APPROVAL: "cf_agent_tool_approval",
|
|
641
|
+
MESSAGE_UPDATED: "cf_agent_message_updated"
|
|
642
|
+
};
|
|
643
|
+
//#endregion
|
|
644
|
+
//#region src/chat/resumable-stream.ts
|
|
645
|
+
/**
|
|
646
|
+
* ResumableStream: Standalone class for buffering, persisting, and replaying
|
|
647
|
+
* stream chunks in SQLite. Extracted from AIChatAgent to separate concerns.
|
|
648
|
+
*
|
|
649
|
+
* Handles:
|
|
650
|
+
* - Chunk buffering (batched writes to SQLite for performance)
|
|
651
|
+
* - Stream lifecycle (start, complete, error)
|
|
652
|
+
* - Chunk replay for reconnecting clients
|
|
653
|
+
* - Stale stream cleanup
|
|
654
|
+
* - Active stream restoration after agent restart
|
|
655
|
+
*/
|
|
656
|
+
/** Number of chunks to buffer before flushing to SQLite */
|
|
657
|
+
const CHUNK_BUFFER_SIZE = 10;
|
|
658
|
+
/** Maximum buffer size to prevent memory issues on rapid reconnections */
|
|
659
|
+
const CHUNK_BUFFER_MAX_SIZE = 100;
|
|
660
|
+
/** Default cleanup interval for old streams (ms) - every 10 minutes */
|
|
661
|
+
const CLEANUP_INTERVAL_MS = 600 * 1e3;
|
|
662
|
+
/** Default age threshold for cleaning up completed streams (ms) - 24 hours */
|
|
663
|
+
const CLEANUP_AGE_THRESHOLD_MS = 1440 * 60 * 1e3;
|
|
664
|
+
/** Shared encoder for UTF-8 byte length measurement */
|
|
665
|
+
const textEncoder = new TextEncoder();
|
|
666
|
+
var ResumableStream = class ResumableStream {
|
|
667
|
+
constructor(sql) {
|
|
668
|
+
this.sql = sql;
|
|
669
|
+
this._activeStreamId = null;
|
|
670
|
+
this._activeRequestId = null;
|
|
671
|
+
this._streamChunkIndex = 0;
|
|
672
|
+
this._isLive = false;
|
|
673
|
+
this._chunkBuffer = [];
|
|
674
|
+
this._isFlushingChunks = false;
|
|
675
|
+
this._lastCleanupTime = 0;
|
|
676
|
+
this.sql`create table if not exists cf_ai_chat_stream_chunks (
|
|
677
|
+
id text primary key,
|
|
678
|
+
stream_id text not null,
|
|
679
|
+
body text not null,
|
|
680
|
+
chunk_index integer not null,
|
|
681
|
+
created_at integer not null
|
|
682
|
+
)`;
|
|
683
|
+
this.sql`create table if not exists cf_ai_chat_stream_metadata (
|
|
684
|
+
id text primary key,
|
|
685
|
+
request_id text not null,
|
|
686
|
+
status text not null,
|
|
687
|
+
created_at integer not null,
|
|
688
|
+
completed_at integer
|
|
689
|
+
)`;
|
|
690
|
+
this.sql`create index if not exists idx_stream_chunks_stream_id
|
|
691
|
+
on cf_ai_chat_stream_chunks(stream_id, chunk_index)`;
|
|
692
|
+
this.restore();
|
|
693
|
+
}
|
|
694
|
+
get activeStreamId() {
|
|
695
|
+
return this._activeStreamId;
|
|
696
|
+
}
|
|
697
|
+
get activeRequestId() {
|
|
698
|
+
return this._activeRequestId;
|
|
699
|
+
}
|
|
700
|
+
hasActiveStream() {
|
|
701
|
+
return this._activeStreamId !== null;
|
|
702
|
+
}
|
|
703
|
+
/**
|
|
704
|
+
* Whether the active stream has a live LLM reader (started in this
|
|
705
|
+
* instance) vs being restored from SQLite after hibernation (orphaned).
|
|
706
|
+
*/
|
|
707
|
+
get isLive() {
|
|
708
|
+
return this._isLive;
|
|
709
|
+
}
|
|
710
|
+
/**
|
|
711
|
+
* Start tracking a new stream for resumable streaming.
|
|
712
|
+
* Creates metadata entry in SQLite and sets up tracking state.
|
|
713
|
+
* @param requestId - The unique ID of the chat request
|
|
714
|
+
* @returns The generated stream ID
|
|
715
|
+
*/
|
|
716
|
+
start(requestId) {
|
|
717
|
+
this.flushBuffer();
|
|
718
|
+
const streamId = nanoid();
|
|
719
|
+
this._activeStreamId = streamId;
|
|
720
|
+
this._activeRequestId = requestId;
|
|
721
|
+
this._streamChunkIndex = 0;
|
|
722
|
+
this._isLive = true;
|
|
723
|
+
this.sql`
|
|
724
|
+
insert into cf_ai_chat_stream_metadata (id, request_id, status, created_at)
|
|
725
|
+
values (${streamId}, ${requestId}, 'streaming', ${Date.now()})
|
|
726
|
+
`;
|
|
727
|
+
return streamId;
|
|
728
|
+
}
|
|
729
|
+
/**
|
|
730
|
+
* Mark a stream as completed and flush any pending chunks.
|
|
731
|
+
* @param streamId - The stream to mark as completed
|
|
732
|
+
*/
|
|
733
|
+
complete(streamId) {
|
|
734
|
+
this.flushBuffer();
|
|
735
|
+
this.sql`
|
|
736
|
+
update cf_ai_chat_stream_metadata
|
|
737
|
+
set status = 'completed', completed_at = ${Date.now()}
|
|
738
|
+
where id = ${streamId}
|
|
739
|
+
`;
|
|
740
|
+
this._activeStreamId = null;
|
|
741
|
+
this._activeRequestId = null;
|
|
742
|
+
this._streamChunkIndex = 0;
|
|
743
|
+
this._isLive = false;
|
|
744
|
+
this._maybeCleanupOldStreams();
|
|
745
|
+
}
|
|
746
|
+
/**
|
|
747
|
+
* Mark a stream as errored and clean up state.
|
|
748
|
+
* @param streamId - The stream to mark as errored
|
|
749
|
+
*/
|
|
750
|
+
markError(streamId) {
|
|
751
|
+
this.flushBuffer();
|
|
752
|
+
this.sql`
|
|
753
|
+
update cf_ai_chat_stream_metadata
|
|
754
|
+
set status = 'error', completed_at = ${Date.now()}
|
|
755
|
+
where id = ${streamId}
|
|
756
|
+
`;
|
|
757
|
+
this._activeStreamId = null;
|
|
758
|
+
this._activeRequestId = null;
|
|
759
|
+
this._streamChunkIndex = 0;
|
|
760
|
+
this._isLive = false;
|
|
761
|
+
}
|
|
762
|
+
/**
|
|
763
|
+
* Buffer a stream chunk for batch write to SQLite.
|
|
764
|
+
* Chunks exceeding the row size limit are skipped to prevent crashes.
|
|
765
|
+
* The chunk is still broadcast to live clients (caller handles that),
|
|
766
|
+
* but will be missing from replay on reconnection.
|
|
767
|
+
* @param streamId - The stream this chunk belongs to
|
|
768
|
+
* @param body - The serialized chunk body
|
|
769
|
+
*/
|
|
770
|
+
storeChunk(streamId, body) {
|
|
771
|
+
const bodyBytes = textEncoder.encode(body).byteLength;
|
|
772
|
+
if (bodyBytes > ResumableStream.CHUNK_MAX_BYTES) {
|
|
773
|
+
console.warn(`[ResumableStream] Skipping oversized chunk (${bodyBytes} bytes) to prevent SQLite row limit crash. Live clients still receive it.`);
|
|
774
|
+
return;
|
|
775
|
+
}
|
|
776
|
+
if (this._chunkBuffer.length >= CHUNK_BUFFER_MAX_SIZE) this.flushBuffer();
|
|
777
|
+
this._chunkBuffer.push({
|
|
778
|
+
id: nanoid(),
|
|
779
|
+
streamId,
|
|
780
|
+
body,
|
|
781
|
+
index: this._streamChunkIndex
|
|
782
|
+
});
|
|
783
|
+
this._streamChunkIndex++;
|
|
784
|
+
if (this._chunkBuffer.length >= CHUNK_BUFFER_SIZE) this.flushBuffer();
|
|
785
|
+
}
|
|
786
|
+
/**
|
|
787
|
+
* Flush buffered chunks to SQLite in a single batch.
|
|
788
|
+
* Uses a lock to prevent concurrent flush operations.
|
|
789
|
+
*/
|
|
790
|
+
flushBuffer() {
|
|
791
|
+
if (this._isFlushingChunks || this._chunkBuffer.length === 0) return;
|
|
792
|
+
this._isFlushingChunks = true;
|
|
793
|
+
try {
|
|
794
|
+
const chunks = this._chunkBuffer;
|
|
795
|
+
this._chunkBuffer = [];
|
|
796
|
+
const now = Date.now();
|
|
797
|
+
for (const chunk of chunks) this.sql`
|
|
798
|
+
insert into cf_ai_chat_stream_chunks (id, stream_id, body, chunk_index, created_at)
|
|
799
|
+
values (${chunk.id}, ${chunk.streamId}, ${chunk.body}, ${chunk.index}, ${now})
|
|
800
|
+
`;
|
|
801
|
+
} finally {
|
|
802
|
+
this._isFlushingChunks = false;
|
|
803
|
+
}
|
|
804
|
+
}
|
|
805
|
+
/**
|
|
806
|
+
* Send stored stream chunks to a connection for replay.
|
|
807
|
+
* Chunks are marked with replay: true so the client can batch-apply them.
|
|
808
|
+
*
|
|
809
|
+
* Three outcomes:
|
|
810
|
+
* - **Live stream**: sends chunks + `replayComplete` — client flushes and
|
|
811
|
+
* continues receiving live chunks from the LLM reader.
|
|
812
|
+
* - **Orphaned stream** (restored from SQLite after hibernation, no reader):
|
|
813
|
+
* sends chunks + `done` and completes the stream. The caller should
|
|
814
|
+
* reconstruct and persist the partial message from the stored chunks.
|
|
815
|
+
* - **Completed during replay** (defensive): sends chunks + `done`.
|
|
816
|
+
*
|
|
817
|
+
* @param connection - The WebSocket connection
|
|
818
|
+
* @param requestId - The original request ID
|
|
819
|
+
* @returns The stream ID if the stream was orphaned and finalized, null otherwise.
|
|
820
|
+
* When non-null the caller should reconstruct the message from chunks.
|
|
821
|
+
*/
|
|
822
|
+
replayChunks(connection, requestId) {
|
|
823
|
+
const streamId = this._activeStreamId;
|
|
824
|
+
if (!streamId) return null;
|
|
825
|
+
this.flushBuffer();
|
|
826
|
+
const chunks = this.sql`
|
|
827
|
+
select * from cf_ai_chat_stream_chunks
|
|
828
|
+
where stream_id = ${streamId}
|
|
829
|
+
order by chunk_index asc
|
|
830
|
+
`;
|
|
831
|
+
for (const chunk of chunks || []) connection.send(JSON.stringify({
|
|
832
|
+
body: chunk.body,
|
|
833
|
+
done: false,
|
|
834
|
+
id: requestId,
|
|
835
|
+
type: CHAT_MESSAGE_TYPES.USE_CHAT_RESPONSE,
|
|
836
|
+
replay: true
|
|
837
|
+
}));
|
|
838
|
+
if (this._activeStreamId !== streamId) {
|
|
839
|
+
connection.send(JSON.stringify({
|
|
840
|
+
body: "",
|
|
841
|
+
done: true,
|
|
842
|
+
id: requestId,
|
|
843
|
+
type: CHAT_MESSAGE_TYPES.USE_CHAT_RESPONSE,
|
|
844
|
+
replay: true
|
|
845
|
+
}));
|
|
846
|
+
return null;
|
|
847
|
+
}
|
|
848
|
+
if (!this._isLive) {
|
|
849
|
+
connection.send(JSON.stringify({
|
|
850
|
+
body: "",
|
|
851
|
+
done: true,
|
|
852
|
+
id: requestId,
|
|
853
|
+
type: CHAT_MESSAGE_TYPES.USE_CHAT_RESPONSE,
|
|
854
|
+
replay: true
|
|
855
|
+
}));
|
|
856
|
+
this.complete(streamId);
|
|
857
|
+
return streamId;
|
|
858
|
+
}
|
|
859
|
+
connection.send(JSON.stringify({
|
|
860
|
+
body: "",
|
|
861
|
+
done: false,
|
|
862
|
+
id: requestId,
|
|
863
|
+
type: CHAT_MESSAGE_TYPES.USE_CHAT_RESPONSE,
|
|
864
|
+
replay: true,
|
|
865
|
+
replayComplete: true
|
|
866
|
+
}));
|
|
867
|
+
return null;
|
|
868
|
+
}
|
|
869
|
+
/**
|
|
870
|
+
* Restore active stream state if the agent was restarted during streaming.
|
|
871
|
+
* All streams are restored regardless of age — stale cleanup happens
|
|
872
|
+
* lazily in _maybeCleanupOldStreams after recovery has had its chance.
|
|
873
|
+
*/
|
|
874
|
+
restore() {
|
|
875
|
+
const activeStreams = this.sql`
|
|
876
|
+
select * from cf_ai_chat_stream_metadata
|
|
877
|
+
where status = 'streaming'
|
|
878
|
+
order by created_at desc
|
|
879
|
+
limit 1
|
|
880
|
+
`;
|
|
881
|
+
if (activeStreams && activeStreams.length > 0) {
|
|
882
|
+
const stream = activeStreams[0];
|
|
883
|
+
this._activeStreamId = stream.id;
|
|
884
|
+
this._activeRequestId = stream.request_id;
|
|
885
|
+
const lastChunk = this.sql`
|
|
886
|
+
select max(chunk_index) as max_index
|
|
887
|
+
from cf_ai_chat_stream_chunks
|
|
888
|
+
where stream_id = ${this._activeStreamId}
|
|
889
|
+
`;
|
|
890
|
+
this._streamChunkIndex = lastChunk && lastChunk[0]?.max_index != null ? lastChunk[0].max_index + 1 : 0;
|
|
891
|
+
}
|
|
892
|
+
}
|
|
893
|
+
/**
|
|
894
|
+
* Clear all stream data (called on chat history clear).
|
|
895
|
+
*/
|
|
896
|
+
clearAll() {
|
|
897
|
+
this._chunkBuffer = [];
|
|
898
|
+
this.sql`delete from cf_ai_chat_stream_chunks`;
|
|
899
|
+
this.sql`delete from cf_ai_chat_stream_metadata`;
|
|
900
|
+
this._activeStreamId = null;
|
|
901
|
+
this._activeRequestId = null;
|
|
902
|
+
this._streamChunkIndex = 0;
|
|
903
|
+
}
|
|
904
|
+
/**
|
|
905
|
+
* Drop all stream tables (called on destroy).
|
|
906
|
+
*/
|
|
907
|
+
destroy() {
|
|
908
|
+
this.flushBuffer();
|
|
909
|
+
this.sql`drop table if exists cf_ai_chat_stream_chunks`;
|
|
910
|
+
this.sql`drop table if exists cf_ai_chat_stream_metadata`;
|
|
911
|
+
this._activeStreamId = null;
|
|
912
|
+
this._activeRequestId = null;
|
|
913
|
+
}
|
|
914
|
+
_maybeCleanupOldStreams() {
|
|
915
|
+
const now = Date.now();
|
|
916
|
+
if (now - this._lastCleanupTime < CLEANUP_INTERVAL_MS) return;
|
|
917
|
+
this._lastCleanupTime = now;
|
|
918
|
+
const cutoff = now - CLEANUP_AGE_THRESHOLD_MS;
|
|
919
|
+
this.sql`
|
|
920
|
+
delete from cf_ai_chat_stream_chunks
|
|
921
|
+
where stream_id in (
|
|
922
|
+
select id from cf_ai_chat_stream_metadata
|
|
923
|
+
where status in ('completed', 'error') and completed_at < ${cutoff}
|
|
924
|
+
)
|
|
925
|
+
`;
|
|
926
|
+
this.sql`
|
|
927
|
+
delete from cf_ai_chat_stream_metadata
|
|
928
|
+
where status in ('completed', 'error') and completed_at < ${cutoff}
|
|
929
|
+
`;
|
|
930
|
+
this.sql`
|
|
931
|
+
delete from cf_ai_chat_stream_chunks
|
|
932
|
+
where stream_id in (
|
|
933
|
+
select id from cf_ai_chat_stream_metadata
|
|
934
|
+
where status = 'streaming' and created_at < ${cutoff}
|
|
935
|
+
)
|
|
936
|
+
`;
|
|
937
|
+
this.sql`
|
|
938
|
+
delete from cf_ai_chat_stream_metadata
|
|
939
|
+
where status = 'streaming' and created_at < ${cutoff}
|
|
940
|
+
`;
|
|
941
|
+
}
|
|
942
|
+
/** @internal For testing only */
|
|
943
|
+
getStreamChunks(streamId) {
|
|
944
|
+
return this.sql`
|
|
945
|
+
select body, chunk_index from cf_ai_chat_stream_chunks
|
|
946
|
+
where stream_id = ${streamId}
|
|
947
|
+
order by chunk_index asc
|
|
948
|
+
` || [];
|
|
949
|
+
}
|
|
950
|
+
/** @internal For testing only */
|
|
951
|
+
getStreamMetadata(streamId) {
|
|
952
|
+
const result = this.sql`
|
|
953
|
+
select status, request_id from cf_ai_chat_stream_metadata
|
|
954
|
+
where id = ${streamId}
|
|
955
|
+
`;
|
|
956
|
+
return result && result.length > 0 ? result[0] : null;
|
|
957
|
+
}
|
|
958
|
+
/** @internal For testing only */
|
|
959
|
+
getAllStreamMetadata() {
|
|
960
|
+
return this.sql`select id, status, request_id, created_at from cf_ai_chat_stream_metadata` || [];
|
|
961
|
+
}
|
|
962
|
+
/** @internal For testing only */
|
|
963
|
+
insertStaleStream(streamId, requestId, ageMs) {
|
|
964
|
+
const createdAt = Date.now() - ageMs;
|
|
965
|
+
this.sql`
|
|
966
|
+
insert into cf_ai_chat_stream_metadata (id, request_id, status, created_at)
|
|
967
|
+
values (${streamId}, ${requestId}, 'streaming', ${createdAt})
|
|
968
|
+
`;
|
|
969
|
+
}
|
|
970
|
+
};
|
|
971
|
+
ResumableStream.CHUNK_MAX_BYTES = 18e5;
|
|
972
|
+
//#endregion
|
|
973
|
+
//#region src/chat/client-tools.ts
|
|
974
|
+
/**
|
|
975
|
+
* Converts client tool schemas to AI SDK tool format.
|
|
976
|
+
*
|
|
977
|
+
* These tools have no `execute` function — when the AI model calls them,
|
|
978
|
+
* the tool call is sent back to the client for execution.
|
|
979
|
+
*
|
|
980
|
+
* @param clientTools - Array of tool schemas from the client
|
|
981
|
+
* @returns Record of AI SDK tools that can be spread into your tools object
|
|
982
|
+
*/
|
|
983
|
+
function createToolsFromClientSchemas(clientTools) {
|
|
984
|
+
if (!clientTools || clientTools.length === 0) return {};
|
|
985
|
+
const seenNames = /* @__PURE__ */ new Set();
|
|
986
|
+
for (const t of clientTools) {
|
|
987
|
+
if (seenNames.has(t.name)) console.warn(`[createToolsFromClientSchemas] Duplicate tool name "${t.name}" found. Later definitions will override earlier ones.`);
|
|
988
|
+
seenNames.add(t.name);
|
|
989
|
+
}
|
|
990
|
+
return Object.fromEntries(clientTools.map((t) => [t.name, tool({
|
|
991
|
+
description: t.description ?? "",
|
|
992
|
+
inputSchema: jsonSchema(t.parameters ?? { type: "object" })
|
|
993
|
+
})]));
|
|
994
|
+
}
|
|
995
|
+
//#endregion
|
|
996
|
+
//#region src/chat/continuation-state.ts
|
|
997
|
+
/**
|
|
998
|
+
* ContinuationState — shared state container for auto-continuation lifecycle.
|
|
999
|
+
*
|
|
1000
|
+
* Tracks pending, deferred, and active continuation state for the
|
|
1001
|
+
* tool-result → auto-continue flow. Both AIChatAgent and Think use this
|
|
1002
|
+
* to manage which connection/tools/body a continuation turn should use
|
|
1003
|
+
* and to coordinate with clients requesting stream resume.
|
|
1004
|
+
*
|
|
1005
|
+
* The scheduling algorithm (prerequisite chaining, debounce, TurnQueue
|
|
1006
|
+
* enrollment) stays in the host — this class only manages the data.
|
|
1007
|
+
*/
|
|
1008
|
+
const MSG_STREAM_RESUME_NONE = CHAT_MESSAGE_TYPES.STREAM_RESUME_NONE;
|
|
1009
|
+
var ContinuationState = class {
|
|
1010
|
+
constructor() {
|
|
1011
|
+
this.pending = null;
|
|
1012
|
+
this.deferred = null;
|
|
1013
|
+
this.activeRequestId = null;
|
|
1014
|
+
this.activeConnectionId = null;
|
|
1015
|
+
this.awaitingConnections = /* @__PURE__ */ new Map();
|
|
1016
|
+
}
|
|
1017
|
+
/** Clear pending state and awaiting connections (without sending RESUME_NONE). */
|
|
1018
|
+
clearPending() {
|
|
1019
|
+
this.pending = null;
|
|
1020
|
+
this.awaitingConnections.clear();
|
|
1021
|
+
}
|
|
1022
|
+
clearDeferred() {
|
|
1023
|
+
this.deferred = null;
|
|
1024
|
+
}
|
|
1025
|
+
clearAll() {
|
|
1026
|
+
this.clearPending();
|
|
1027
|
+
this.clearDeferred();
|
|
1028
|
+
this.activeRequestId = null;
|
|
1029
|
+
this.activeConnectionId = null;
|
|
1030
|
+
}
|
|
1031
|
+
/**
|
|
1032
|
+
* Send STREAM_RESUME_NONE to all connections waiting for a
|
|
1033
|
+
* continuation stream to start, then clear the map.
|
|
1034
|
+
*/
|
|
1035
|
+
sendResumeNone() {
|
|
1036
|
+
const msg = JSON.stringify({ type: MSG_STREAM_RESUME_NONE });
|
|
1037
|
+
for (const connection of this.awaitingConnections.values()) connection.send(msg);
|
|
1038
|
+
this.awaitingConnections.clear();
|
|
1039
|
+
}
|
|
1040
|
+
/**
|
|
1041
|
+
* Flush awaiting connections by notifying each one via the provided
|
|
1042
|
+
* callback (typically sends STREAM_RESUMING), then clear.
|
|
1043
|
+
*/
|
|
1044
|
+
flushAwaitingConnections(notify) {
|
|
1045
|
+
for (const connection of this.awaitingConnections.values()) notify(connection);
|
|
1046
|
+
this.awaitingConnections.clear();
|
|
1047
|
+
}
|
|
1048
|
+
/**
|
|
1049
|
+
* Transition pending → active. Called when the continuation stream
|
|
1050
|
+
* actually starts. Moves request/connection IDs to active slots,
|
|
1051
|
+
* clears pending fields.
|
|
1052
|
+
*/
|
|
1053
|
+
activatePending() {
|
|
1054
|
+
if (!this.pending) return;
|
|
1055
|
+
this.activeRequestId = this.pending.requestId;
|
|
1056
|
+
this.activeConnectionId = this.pending.connectionId;
|
|
1057
|
+
this.pending = null;
|
|
1058
|
+
}
|
|
1059
|
+
/**
|
|
1060
|
+
* Transition deferred → pending. Called when a continuation turn
|
|
1061
|
+
* completes and there's a deferred follow-up waiting.
|
|
1062
|
+
*
|
|
1063
|
+
* Returns the new pending state (so the host can enqueue the turn),
|
|
1064
|
+
* or null if there was nothing deferred.
|
|
1065
|
+
*/
|
|
1066
|
+
activateDeferred(generateRequestId) {
|
|
1067
|
+
if (this.pending || !this.deferred) return null;
|
|
1068
|
+
const d = this.deferred;
|
|
1069
|
+
this.deferred = null;
|
|
1070
|
+
this.activeRequestId = null;
|
|
1071
|
+
this.activeConnectionId = null;
|
|
1072
|
+
this.pending = {
|
|
1073
|
+
connection: d.connection,
|
|
1074
|
+
connectionId: d.connectionId,
|
|
1075
|
+
requestId: generateRequestId(),
|
|
1076
|
+
clientTools: d.clientTools,
|
|
1077
|
+
body: d.body,
|
|
1078
|
+
errorPrefix: d.errorPrefix,
|
|
1079
|
+
prerequisite: d.prerequisite,
|
|
1080
|
+
pastCoalesce: false
|
|
1081
|
+
};
|
|
1082
|
+
this.awaitingConnections.set(d.connectionId, d.connection);
|
|
1083
|
+
return this.pending;
|
|
1084
|
+
}
|
|
1085
|
+
};
|
|
1086
|
+
//#endregion
|
|
1087
|
+
//#region src/chat/abort-registry.ts
|
|
1088
|
+
/**
|
|
1089
|
+
* AbortRegistry — manages per-request AbortControllers.
|
|
1090
|
+
*
|
|
1091
|
+
* Shared between AIChatAgent and Think for chat turn cancellation.
|
|
1092
|
+
* Each request gets its own AbortController keyed by request ID.
|
|
1093
|
+
* Controllers are created lazily on first signal access.
|
|
1094
|
+
*/
|
|
1095
|
+
var AbortRegistry = class {
|
|
1096
|
+
constructor() {
|
|
1097
|
+
this.controllers = /* @__PURE__ */ new Map();
|
|
1098
|
+
}
|
|
1099
|
+
/**
|
|
1100
|
+
* Get or create an AbortController for the given ID and return its signal.
|
|
1101
|
+
* Creates the controller lazily on first access.
|
|
1102
|
+
*/
|
|
1103
|
+
getSignal(id) {
|
|
1104
|
+
if (typeof id !== "string") return;
|
|
1105
|
+
if (!this.controllers.has(id)) this.controllers.set(id, new AbortController());
|
|
1106
|
+
return this.controllers.get(id).signal;
|
|
1107
|
+
}
|
|
1108
|
+
/**
|
|
1109
|
+
* Get the signal for an existing controller without creating one.
|
|
1110
|
+
* Returns undefined if no controller exists for this ID.
|
|
1111
|
+
*/
|
|
1112
|
+
getExistingSignal(id) {
|
|
1113
|
+
return this.controllers.get(id)?.signal;
|
|
1114
|
+
}
|
|
1115
|
+
/** Cancel a specific request by aborting its controller. */
|
|
1116
|
+
cancel(id) {
|
|
1117
|
+
this.controllers.get(id)?.abort();
|
|
1118
|
+
}
|
|
1119
|
+
/** Remove a controller after the request completes. */
|
|
1120
|
+
remove(id) {
|
|
1121
|
+
this.controllers.delete(id);
|
|
1122
|
+
}
|
|
1123
|
+
/** Abort all pending requests and clear the registry. */
|
|
1124
|
+
destroyAll() {
|
|
1125
|
+
for (const controller of this.controllers.values()) controller.abort();
|
|
1126
|
+
this.controllers.clear();
|
|
1127
|
+
}
|
|
1128
|
+
/** Check if a controller exists for the given ID. */
|
|
1129
|
+
has(id) {
|
|
1130
|
+
return this.controllers.has(id);
|
|
1131
|
+
}
|
|
1132
|
+
/** Number of tracked controllers. */
|
|
1133
|
+
get size() {
|
|
1134
|
+
return this.controllers.size;
|
|
1135
|
+
}
|
|
1136
|
+
};
|
|
1137
|
+
//#endregion
|
|
1138
|
+
//#region src/chat/tool-state.ts
|
|
1139
|
+
/**
|
|
1140
|
+
* Apply a tool part update to a parts array.
|
|
1141
|
+
* Finds the first part matching `update.toolCallId` in one of `update.matchStates`,
|
|
1142
|
+
* applies the update immutably, and returns the new parts array with the index.
|
|
1143
|
+
*
|
|
1144
|
+
* Returns `null` if no matching part was found.
|
|
1145
|
+
*/
|
|
1146
|
+
function applyToolUpdate(parts, update) {
|
|
1147
|
+
for (let i = 0; i < parts.length; i++) {
|
|
1148
|
+
const part = parts[i];
|
|
1149
|
+
if ("toolCallId" in part && part.toolCallId === update.toolCallId && "state" in part && update.matchStates.includes(part.state)) {
|
|
1150
|
+
const updatedParts = [...parts];
|
|
1151
|
+
updatedParts[i] = update.apply(part);
|
|
1152
|
+
return {
|
|
1153
|
+
parts: updatedParts,
|
|
1154
|
+
index: i
|
|
1155
|
+
};
|
|
1156
|
+
}
|
|
1157
|
+
}
|
|
1158
|
+
return null;
|
|
1159
|
+
}
|
|
1160
|
+
/**
|
|
1161
|
+
* Build an update descriptor for applying a tool result.
|
|
1162
|
+
*
|
|
1163
|
+
* Matches parts in `input-available`, `approval-requested`, or `approval-responded` state.
|
|
1164
|
+
* Sets state to `output-available` (with output) or `output-error` (with errorText).
|
|
1165
|
+
*/
|
|
1166
|
+
function toolResultUpdate(toolCallId, output, overrideState, errorText) {
|
|
1167
|
+
return {
|
|
1168
|
+
toolCallId,
|
|
1169
|
+
matchStates: [
|
|
1170
|
+
"input-available",
|
|
1171
|
+
"approval-requested",
|
|
1172
|
+
"approval-responded"
|
|
1173
|
+
],
|
|
1174
|
+
apply: (part) => ({
|
|
1175
|
+
...part,
|
|
1176
|
+
...overrideState === "output-error" ? {
|
|
1177
|
+
state: "output-error",
|
|
1178
|
+
errorText: errorText ?? "Tool execution denied by user"
|
|
1179
|
+
} : {
|
|
1180
|
+
state: "output-available",
|
|
1181
|
+
output,
|
|
1182
|
+
preliminary: false
|
|
1183
|
+
}
|
|
1184
|
+
})
|
|
1185
|
+
};
|
|
1186
|
+
}
|
|
1187
|
+
/**
|
|
1188
|
+
* Build an update descriptor for applying a tool approval.
|
|
1189
|
+
*
|
|
1190
|
+
* Matches parts in `input-available` or `approval-requested` state.
|
|
1191
|
+
* Sets state to `approval-responded` (if approved) or `output-denied` (if denied).
|
|
1192
|
+
*/
|
|
1193
|
+
function toolApprovalUpdate(toolCallId, approved) {
|
|
1194
|
+
return {
|
|
1195
|
+
toolCallId,
|
|
1196
|
+
matchStates: ["input-available", "approval-requested"],
|
|
1197
|
+
apply: (part) => ({
|
|
1198
|
+
...part,
|
|
1199
|
+
state: approved ? "approval-responded" : "output-denied",
|
|
1200
|
+
approval: {
|
|
1201
|
+
...part.approval,
|
|
1202
|
+
approved
|
|
1203
|
+
}
|
|
1204
|
+
})
|
|
1205
|
+
};
|
|
1206
|
+
}
|
|
1207
|
+
//#endregion
|
|
1208
|
+
//#region src/chat/parse-protocol.ts
|
|
1209
|
+
/**
|
|
1210
|
+
* Protocol Message Parser — typed parsing of cf_agent_chat_* WebSocket messages.
|
|
1211
|
+
*
|
|
1212
|
+
* Parses raw WebSocket messages into a discriminated union of protocol events.
|
|
1213
|
+
* Both AIChatAgent and Think can use this instead of manual JSON.parse + type checking.
|
|
1214
|
+
*/
|
|
1215
|
+
/**
|
|
1216
|
+
* Parse a raw WebSocket message string into a typed protocol event.
|
|
1217
|
+
*
|
|
1218
|
+
* Returns `null` if the message is not valid JSON or not a recognized
|
|
1219
|
+
* protocol message type. Callers should fall through to the user's
|
|
1220
|
+
* `onMessage` handler when `null` is returned.
|
|
1221
|
+
*
|
|
1222
|
+
* @example
|
|
1223
|
+
* ```typescript
|
|
1224
|
+
* const event = parseProtocolMessage(rawMessage);
|
|
1225
|
+
* if (!event) return userOnMessage(connection, rawMessage);
|
|
1226
|
+
*
|
|
1227
|
+
* switch (event.type) {
|
|
1228
|
+
* case "chat-request": { ... }
|
|
1229
|
+
* case "clear": { ... }
|
|
1230
|
+
* case "tool-result": { ... }
|
|
1231
|
+
* }
|
|
1232
|
+
* ```
|
|
1233
|
+
*/
|
|
1234
|
+
function parseProtocolMessage(raw) {
|
|
1235
|
+
let data;
|
|
1236
|
+
try {
|
|
1237
|
+
data = JSON.parse(raw);
|
|
1238
|
+
} catch {
|
|
1239
|
+
return null;
|
|
1240
|
+
}
|
|
1241
|
+
const wireType = data.type;
|
|
1242
|
+
if (!wireType) return null;
|
|
1243
|
+
switch (wireType) {
|
|
1244
|
+
case CHAT_MESSAGE_TYPES.USE_CHAT_REQUEST: return {
|
|
1245
|
+
type: "chat-request",
|
|
1246
|
+
id: data.id,
|
|
1247
|
+
init: data.init ?? {}
|
|
1248
|
+
};
|
|
1249
|
+
case CHAT_MESSAGE_TYPES.CHAT_CLEAR: return { type: "clear" };
|
|
1250
|
+
case CHAT_MESSAGE_TYPES.CHAT_REQUEST_CANCEL: return {
|
|
1251
|
+
type: "cancel",
|
|
1252
|
+
id: data.id
|
|
1253
|
+
};
|
|
1254
|
+
case CHAT_MESSAGE_TYPES.TOOL_RESULT: return {
|
|
1255
|
+
type: "tool-result",
|
|
1256
|
+
toolCallId: data.toolCallId,
|
|
1257
|
+
toolName: data.toolName ?? "",
|
|
1258
|
+
output: data.output,
|
|
1259
|
+
state: data.state,
|
|
1260
|
+
errorText: data.errorText,
|
|
1261
|
+
autoContinue: data.autoContinue,
|
|
1262
|
+
clientTools: data.clientTools
|
|
1263
|
+
};
|
|
1264
|
+
case CHAT_MESSAGE_TYPES.TOOL_APPROVAL: return {
|
|
1265
|
+
type: "tool-approval",
|
|
1266
|
+
toolCallId: data.toolCallId,
|
|
1267
|
+
approved: data.approved,
|
|
1268
|
+
autoContinue: data.autoContinue
|
|
1269
|
+
};
|
|
1270
|
+
case CHAT_MESSAGE_TYPES.STREAM_RESUME_REQUEST: return { type: "stream-resume-request" };
|
|
1271
|
+
case CHAT_MESSAGE_TYPES.STREAM_RESUME_ACK: return {
|
|
1272
|
+
type: "stream-resume-ack",
|
|
1273
|
+
id: data.id
|
|
1274
|
+
};
|
|
1275
|
+
case CHAT_MESSAGE_TYPES.CHAT_MESSAGES: return {
|
|
1276
|
+
type: "messages",
|
|
1277
|
+
messages: data.messages ?? []
|
|
1278
|
+
};
|
|
1279
|
+
default: return null;
|
|
1280
|
+
}
|
|
1281
|
+
}
|
|
1282
|
+
//#endregion
|
|
1283
|
+
export { AbortRegistry, CHAT_MESSAGE_TYPES, ContinuationState, ROW_MAX_BYTES, ResumableStream, StreamAccumulator, TurnQueue, applyChunkToParts, applyToolUpdate, transition as broadcastTransition, byteLength, createToolsFromClientSchemas, enforceRowSizeLimit, parseProtocolMessage, sanitizeMessage, toolApprovalUpdate, toolResultUpdate };
|
|
1284
|
+
|
|
1285
|
+
//# sourceMappingURL=index.js.map
|