agents 0.8.5 → 0.8.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/dist/client.d.ts +2 -2
  2. package/dist/compaction-helpers-BFTBIzpK.js +312 -0
  3. package/dist/compaction-helpers-BFTBIzpK.js.map +1 -0
  4. package/dist/compaction-helpers-DkJreaDR.d.ts +139 -0
  5. package/dist/{do-oauth-client-provider-D7F2Pw40.d.ts → do-oauth-client-provider-C2jurFjW.d.ts} +1 -1
  6. package/dist/{email-YAQhwwXb.d.ts → email-DwPlM0bQ.d.ts} +1 -1
  7. package/dist/email.d.ts +2 -2
  8. package/dist/experimental/forever.d.ts +1 -1
  9. package/dist/experimental/memory/session/index.d.ts +193 -203
  10. package/dist/experimental/memory/session/index.js +673 -294
  11. package/dist/experimental/memory/session/index.js.map +1 -1
  12. package/dist/experimental/memory/utils/index.d.ts +59 -0
  13. package/dist/experimental/memory/utils/index.js +69 -0
  14. package/dist/experimental/memory/utils/index.js.map +1 -0
  15. package/dist/{index-DynYigzs.d.ts → index-C-6EMK-E.d.ts} +11 -11
  16. package/dist/{index-OtkSCU2A.d.ts → index-Ua2Nfvbm.d.ts} +1 -1
  17. package/dist/index.d.ts +7 -5
  18. package/dist/index.js +1 -1
  19. package/dist/index.js.map +1 -1
  20. package/dist/{internal_context-DgcmHqS1.d.ts → internal_context-DT8RxmAN.d.ts} +1 -1
  21. package/dist/internal_context.d.ts +1 -1
  22. package/dist/mcp/client.d.ts +1 -1
  23. package/dist/mcp/do-oauth-client-provider.d.ts +1 -1
  24. package/dist/mcp/index.d.ts +1 -1
  25. package/dist/mcp/index.js +45 -3
  26. package/dist/mcp/index.js.map +1 -1
  27. package/dist/observability/index.d.ts +1 -1
  28. package/dist/react.d.ts +1 -1
  29. package/dist/{retries-JxhDYtYL.d.ts → retries-DXMQGhG3.d.ts} +1 -1
  30. package/dist/retries.d.ts +1 -1
  31. package/dist/{serializable-Ch19yA6_.d.ts → serializable-8Jt1B04R.d.ts} +1 -1
  32. package/dist/serializable.d.ts +1 -1
  33. package/dist/{types-2lHHE_uh.d.ts → types-C-m0II8i.d.ts} +3 -1
  34. package/dist/types.d.ts +1 -1
  35. package/dist/types.js +2 -0
  36. package/dist/types.js.map +1 -1
  37. package/dist/{workflow-types-BVKtSaA7.d.ts → workflow-types-CZNXKj_D.d.ts} +1 -1
  38. package/dist/workflow-types.d.ts +1 -1
  39. package/dist/workflows.d.ts +2 -2
  40. package/package.json +12 -7
package/dist/client.d.ts CHANGED
@@ -1,10 +1,10 @@
1
- import { r as Agent } from "./index-DynYigzs.js";
1
+ import { r as Agent } from "./index-C-6EMK-E.js";
2
2
  import {
3
3
  i as SerializableValue,
4
4
  n as RPCMethod,
5
5
  r as SerializableReturnValue,
6
6
  t as Method
7
- } from "./serializable-Ch19yA6_.js";
7
+ } from "./serializable-8Jt1B04R.js";
8
8
  import {
9
9
  PartyFetchOptions,
10
10
  PartySocket,
@@ -0,0 +1,312 @@
1
+ //#region src/experimental/memory/utils/tokens.ts
2
+ /** Approximate characters per token for English text */
3
+ const CHARS_PER_TOKEN = 4;
4
+ /** Approximate token multiplier per whitespace-separated word */
5
+ const WORDS_TOKEN_MULTIPLIER = 1.3;
6
+ /** Approximate overhead tokens per message (role, framing) */
7
+ const TOKENS_PER_MESSAGE = 4;
8
+ /**
9
+ * Estimate token count for a string using a hybrid heuristic.
10
+ *
11
+ * Takes the max of two estimates:
12
+ * - Character-based: `length / 4` — better for dense content (JSON, code, URLs)
13
+ * - Word-based: `words * 1.3` — better for natural language prose
14
+ *
15
+ * This is a heuristic. Do not use where exact counts are required.
16
+ */
17
+ function estimateStringTokens(text) {
18
+ if (!text) return 0;
19
+ const charEstimate = text.length / 4;
20
+ const wordEstimate = text.split(/\s+/).filter(Boolean).length * WORDS_TOKEN_MULTIPLIER;
21
+ return Math.ceil(Math.max(charEstimate, wordEstimate));
22
+ }
23
+ /**
24
+ * Estimate total token count for an array of UIMessages.
25
+ *
26
+ * Walks each message's parts (text, tool invocations, tool results)
27
+ * and applies per-message overhead.
28
+ *
29
+ * This is a heuristic. Do not use where exact counts are required.
30
+ */
31
+ function estimateMessageTokens(messages) {
32
+ let tokens = 0;
33
+ for (const msg of messages) {
34
+ tokens += 4;
35
+ for (const part of msg.parts) if (part.type === "text") tokens += estimateStringTokens(part.text);
36
+ else if (part.type.startsWith("tool-") || part.type === "dynamic-tool") {
37
+ const toolPart = part;
38
+ if (toolPart.input) tokens += estimateStringTokens(JSON.stringify(toolPart.input));
39
+ if (toolPart.output) tokens += estimateStringTokens(JSON.stringify(toolPart.output));
40
+ }
41
+ }
42
+ return tokens;
43
+ }
44
+ //#endregion
45
+ //#region src/experimental/memory/utils/compaction-helpers.ts
46
+ /** Prefix for all compaction messages (overlays and summaries) */
47
+ const COMPACTION_PREFIX = "compaction_";
48
+ /** Check if a message is a compaction message */
49
+ function isCompactionMessage(msg) {
50
+ return msg.id.startsWith(COMPACTION_PREFIX);
51
+ }
52
+ /**
53
+ * Check if a message contains tool invocations.
54
+ */
55
+ function hasToolCalls(msg) {
56
+ return msg.parts.some((p) => p.type.startsWith("tool-") || p.type === "dynamic-tool");
57
+ }
58
+ /**
59
+ * Get tool call IDs from a message's parts.
60
+ */
61
+ function getToolCallIds(msg) {
62
+ const ids = /* @__PURE__ */ new Set();
63
+ for (const part of msg.parts) if ((part.type.startsWith("tool-") || part.type === "dynamic-tool") && "toolCallId" in part) ids.add(part.toolCallId);
64
+ return ids;
65
+ }
66
+ /**
67
+ * Check if a message is a tool result referencing a specific call ID.
68
+ */
69
+ function isToolResultFor(msg, callIds) {
70
+ return msg.parts.some((p) => (p.type.startsWith("tool-") || p.type === "dynamic-tool") && "toolCallId" in p && callIds.has(p.toolCallId));
71
+ }
72
+ /**
73
+ * Align a boundary index forward to avoid splitting tool call/result groups.
74
+ * If the boundary falls between an assistant message with tool calls and its
75
+ * tool results, move it forward past the results.
76
+ */
77
+ function alignBoundaryForward(messages, idx) {
78
+ if (idx <= 0 || idx >= messages.length) return idx;
79
+ const prev = messages[idx - 1];
80
+ if (prev.role === "assistant" && hasToolCalls(prev)) {
81
+ const callIds = getToolCallIds(prev);
82
+ while (idx < messages.length && isToolResultFor(messages[idx], callIds)) idx++;
83
+ }
84
+ return idx;
85
+ }
86
+ /**
87
+ * Align a boundary index backward to avoid splitting tool call/result groups.
88
+ * If the boundary falls in the middle of tool results, move it backward to
89
+ * include the assistant message that made the calls.
90
+ */
91
+ function alignBoundaryBackward(messages, idx) {
92
+ if (idx <= 0 || idx >= messages.length) return idx;
93
+ while (idx > 0) {
94
+ const msg = messages[idx];
95
+ if (msg.role === "assistant" && hasToolCalls(msg)) break;
96
+ const prev = messages[idx - 1];
97
+ if (prev.role === "assistant" && hasToolCalls(prev)) {
98
+ if (isToolResultFor(msg, getToolCallIds(prev))) {
99
+ idx--;
100
+ continue;
101
+ }
102
+ }
103
+ break;
104
+ }
105
+ return idx;
106
+ }
107
+ /**
108
+ * Find the compression end boundary using a token budget for the tail.
109
+ * Walks backward from the end, accumulating tokens until budget is reached.
110
+ * Returns the index where compression should stop (everything from this
111
+ * index onward is protected).
112
+ *
113
+ * @param messages All messages
114
+ * @param headEnd Index where the protected head ends (compression starts here)
115
+ * @param tailTokenBudget Maximum tokens to keep in the tail
116
+ * @param minTailMessages Minimum messages to protect in the tail (fallback)
117
+ */
118
+ function findTailCutByTokens(messages, headEnd, tailTokenBudget = 2e4, minTailMessages = 2) {
119
+ const n = messages.length;
120
+ let accumulated = 0;
121
+ let tokenCut = n;
122
+ for (let i = n - 1; i >= headEnd; i--) {
123
+ const msgTokens = estimateMessageTokens([messages[i]]);
124
+ if (accumulated + msgTokens > tailTokenBudget && tokenCut < n) break;
125
+ accumulated += msgTokens;
126
+ tokenCut = i;
127
+ }
128
+ const minCut = n - minTailMessages;
129
+ return alignBoundaryBackward(messages, minCut >= headEnd ? Math.min(tokenCut, minCut) : tokenCut);
130
+ }
131
+ /**
132
+ * Fix orphaned tool call/result pairs after compaction.
133
+ *
134
+ * Two failure modes:
135
+ * 1. Tool result references a call_id whose assistant tool_call was removed
136
+ * → Remove the orphaned result
137
+ * 2. Assistant has tool_calls whose results were dropped
138
+ * → Add stub results so the API doesn't error
139
+ *
140
+ * @param messages Messages after compaction
141
+ * @returns Sanitized messages with no orphaned pairs
142
+ */
143
+ function sanitizeToolPairs(messages) {
144
+ const survivingCallIds = /* @__PURE__ */ new Set();
145
+ for (const msg of messages) if (msg.role === "assistant") for (const id of getToolCallIds(msg)) survivingCallIds.add(id);
146
+ const resultCallIds = /* @__PURE__ */ new Set();
147
+ for (const msg of messages) for (const part of msg.parts) if ((part.type.startsWith("tool-") || part.type === "dynamic-tool") && "toolCallId" in part && "output" in part) resultCallIds.add(part.toolCallId);
148
+ const orphanedResults = /* @__PURE__ */ new Set();
149
+ for (const id of resultCallIds) if (!survivingCallIds.has(id)) orphanedResults.add(id);
150
+ let result = messages;
151
+ if (orphanedResults.size > 0) result = result.map((msg) => {
152
+ const filteredParts = msg.parts.filter((part) => {
153
+ if ((part.type.startsWith("tool-") || part.type === "dynamic-tool") && "toolCallId" in part && "output" in part) return !orphanedResults.has(part.toolCallId);
154
+ return true;
155
+ });
156
+ if (filteredParts.length !== msg.parts.length) return {
157
+ ...msg,
158
+ parts: filteredParts
159
+ };
160
+ return msg;
161
+ });
162
+ const missingResults = /* @__PURE__ */ new Set();
163
+ for (const id of survivingCallIds) if (!resultCallIds.has(id) && !orphanedResults.has(id)) missingResults.add(id);
164
+ if (missingResults.size > 0) {
165
+ const patched = [];
166
+ for (const msg of result) {
167
+ patched.push(msg);
168
+ if (msg.role === "assistant") {
169
+ for (const id of getToolCallIds(msg)) if (missingResults.has(id)) {
170
+ const callPart = msg.parts.find((p) => "toolCallId" in p && p.toolCallId === id);
171
+ patched.push({
172
+ id: `stub-${id}`,
173
+ role: "assistant",
174
+ parts: [{
175
+ type: "tool-result",
176
+ toolCallId: id,
177
+ toolName: callPart?.toolName ?? "unknown",
178
+ result: "[Result from earlier conversation — see context summary above]"
179
+ }],
180
+ createdAt: /* @__PURE__ */ new Date()
181
+ });
182
+ }
183
+ }
184
+ }
185
+ result = patched;
186
+ }
187
+ return result.filter((msg) => msg.parts.length > 0);
188
+ }
189
+ /**
190
+ * Compute a summary token budget based on the content being compressed.
191
+ * 20% of the compressed content, clamped to 2K-8K tokens.
192
+ */
193
+ function computeSummaryBudget(messages) {
194
+ const contentTokens = estimateMessageTokens(messages);
195
+ const budget = Math.floor(contentTokens * .2);
196
+ return Math.max(100, budget);
197
+ }
198
+ /**
199
+ * Build a prompt for LLM summarization of compressed messages.
200
+ *
201
+ * @param messages Messages to summarize
202
+ * @param previousSummary Previous summary for iterative updates (or null for first compaction)
203
+ * @param budget Target token count for the summary
204
+ */
205
+ function buildSummaryPrompt(messages, previousSummary, budget) {
206
+ const content = messages.map((msg) => {
207
+ const textParts = msg.parts.filter((p) => p.type === "text").map((p) => p.text).join("\n");
208
+ const toolParts = msg.parts.filter((p) => p.type.startsWith("tool-") || p.type === "dynamic-tool").map((p) => {
209
+ const tp = p;
210
+ const parts = [`[Tool: ${tp.toolName ?? "unknown"}]`];
211
+ if (tp.input) parts.push(`Input: ${JSON.stringify(tp.input).slice(0, 500)}`);
212
+ if (tp.output) parts.push(`Output: ${String(tp.output).slice(0, 500)}`);
213
+ return parts.join("\n");
214
+ }).join("\n");
215
+ return `[${msg.role}]\n${textParts}${toolParts ? "\n" + toolParts : ""}`;
216
+ }).join("\n\n---\n\n");
217
+ if (previousSummary) return `You are updating a conversation summary. A previous summary exists below. New conversation turns have occurred since then and need to be incorporated.
218
+
219
+ PREVIOUS SUMMARY:
220
+ ${previousSummary}
221
+
222
+ NEW TURNS TO INCORPORATE:
223
+ ${content}
224
+
225
+ Update the summary. PRESERVE existing information that is still relevant. ADD new information. Remove information only if it is clearly obsolete.
226
+
227
+ ## Topic
228
+ [What the conversation is about]
229
+
230
+ ## Key Points
231
+ [Important information, decisions, and conclusions from the conversation]
232
+
233
+ ## Current State
234
+ [Where things stand now — what has been done, what is in progress]
235
+
236
+ ## Open Items
237
+ [Unresolved questions, pending tasks, or next steps discussed]
238
+
239
+ Target ~${budget} tokens. Be factual — only include information that was explicitly discussed in the conversation. Do NOT invent file paths, commands, or details that were not mentioned. Write only the summary body.`;
240
+ return `Create a concise summary of this conversation that preserves the important information for future context.
241
+
242
+ CONVERSATION TO SUMMARIZE:
243
+ ${content}
244
+
245
+ Use this structure:
246
+
247
+ ## Topic
248
+ [What the conversation is about]
249
+
250
+ ## Key Points
251
+ [Important information, decisions, and conclusions from the conversation]
252
+
253
+ ## Current State
254
+ [Where things stand now — what has been done, what is in progress]
255
+
256
+ ## Open Items
257
+ [Unresolved questions, pending tasks, or next steps discussed]
258
+
259
+ Target ~${budget} tokens. Be factual — only include information that was explicitly discussed in the conversation. Do NOT invent file paths, commands, or details that were not mentioned. Write only the summary body.`;
260
+ }
261
+ /**
262
+ * Reference compaction implementation.
263
+ *
264
+ * Implements the full hermes-style compaction algorithm:
265
+ * 1. Protect head messages (first N)
266
+ * 2. Protect tail by token budget (walk backward)
267
+ * 3. Align boundaries to tool call groups
268
+ * 4. Summarize middle section with LLM (structured format)
269
+ * 5. Sanitize orphaned tool pairs
270
+ * 6. Iterative summary updates on subsequent compactions
271
+ *
272
+ * @example
273
+ * ```typescript
274
+ * import { createCompactFunction } from "agents/experimental/memory/utils";
275
+ *
276
+ * const session = new Session(provider, {
277
+ * compaction: {
278
+ * tokenThreshold: 100000,
279
+ * fn: createCompactFunction({
280
+ * summarize: (prompt) => generateText({ model, prompt }).then(r => r.text)
281
+ * })
282
+ * }
283
+ * });
284
+ * ```
285
+ */
286
+ function createCompactFunction(opts) {
287
+ const protectHead = opts.protectHead ?? 3;
288
+ const tailTokenBudget = opts.tailTokenBudget ?? 2e4;
289
+ const minTailMessages = opts.minTailMessages ?? 2;
290
+ return async (messages) => {
291
+ if (messages.length <= protectHead + minTailMessages) return null;
292
+ let compressStart = protectHead;
293
+ compressStart = alignBoundaryForward(messages, compressStart);
294
+ let compressEnd = findTailCutByTokens(messages, compressStart, tailTokenBudget, minTailMessages);
295
+ if (compressEnd <= compressStart) return null;
296
+ const middleMessages = messages.slice(compressStart, compressEnd).filter((m) => !isCompactionMessage(m));
297
+ if (middleMessages.length === 0) return null;
298
+ const existingCompaction = messages.find(isCompactionMessage);
299
+ const prompt = buildSummaryPrompt(middleMessages, existingCompaction ? existingCompaction.parts.filter((p) => p.type === "text").map((p) => p.text).join("\n") : null, computeSummaryBudget(middleMessages));
300
+ const summary = await opts.summarize(prompt);
301
+ if (!summary.trim()) return null;
302
+ return {
303
+ fromMessageId: middleMessages[0].id,
304
+ toMessageId: middleMessages[middleMessages.length - 1].id,
305
+ summary
306
+ };
307
+ };
308
+ }
309
+ //#endregion
310
+ export { computeSummaryBudget as a, isCompactionMessage as c, TOKENS_PER_MESSAGE as d, WORDS_TOKEN_MULTIPLIER as f, buildSummaryPrompt as i, sanitizeToolPairs as l, estimateStringTokens as m, alignBoundaryBackward as n, createCompactFunction as o, estimateMessageTokens as p, alignBoundaryForward as r, findTailCutByTokens as s, COMPACTION_PREFIX as t, CHARS_PER_TOKEN as u };
311
+
312
+ //# sourceMappingURL=compaction-helpers-BFTBIzpK.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"compaction-helpers-BFTBIzpK.js","names":[],"sources":["../src/experimental/memory/utils/tokens.ts","../src/experimental/memory/utils/compaction-helpers.ts"],"sourcesContent":["/**\n * Token Estimation Utilities\n *\n * IMPORTANT: These are heuristic estimates, not actual tokenizer counts.\n *\n * We intentionally avoid real tokenizers (e.g. tiktoken, sentencepiece) because:\n * - A single tiktoken instance costs ~80-120MB of heap\n * - Cloudflare Workers have tight memory limits (128MB)\n * - For compaction thresholds, a conservative estimate is sufficient\n *\n * The hybrid approach (max of character-based and word-based estimates) handles\n * both dense token content (JSON, code) and natural language reasonably well.\n *\n * Calibration notes:\n * - Character-based: ~4 chars per token (conservative, from OpenAI guidance)\n * - Word-based: ~1.3 tokens per word (empirical, from Mastra's memory system)\n * - Per-message overhead: ~4 tokens for role/framing (empirical)\n *\n * These ratios are tuned for English. CJK, emoji-heavy, or highly technical\n * content may have different ratios. The conservative estimates help ensure\n * compaction triggers before context windows are actually exceeded.\n */\n\nimport type { UIMessage } from \"ai\";\n\n/** Approximate characters per token for English text */\nexport const CHARS_PER_TOKEN = 4;\n\n/** Approximate token multiplier per whitespace-separated word */\nexport const WORDS_TOKEN_MULTIPLIER = 1.3;\n\n/** Approximate overhead tokens per message (role, framing) */\nexport const TOKENS_PER_MESSAGE = 4;\n\n/**\n * Estimate token count for a string using a hybrid heuristic.\n *\n * Takes the max of two estimates:\n * - Character-based: `length / 4` — better for dense content (JSON, code, URLs)\n * - Word-based: `words * 1.3` — better for natural language prose\n *\n * This is a heuristic. Do not use where exact counts are required.\n */\nexport function estimateStringTokens(text: string): number {\n if (!text) return 0;\n const charEstimate = text.length / CHARS_PER_TOKEN;\n const wordEstimate =\n text.split(/\\s+/).filter(Boolean).length * WORDS_TOKEN_MULTIPLIER;\n return Math.ceil(Math.max(charEstimate, wordEstimate));\n}\n\n/**\n * Estimate total token count for an array of UIMessages.\n *\n * Walks each message's parts (text, tool invocations, tool results)\n * and applies per-message overhead.\n *\n * This is a heuristic. Do not use where exact counts are required.\n */\nexport function estimateMessageTokens(messages: UIMessage[]): number {\n let tokens = 0;\n for (const msg of messages) {\n tokens += TOKENS_PER_MESSAGE;\n for (const part of msg.parts) {\n if (part.type === \"text\") {\n tokens += estimateStringTokens(\n (part as { type: \"text\"; text: string }).text\n );\n } else if (\n part.type.startsWith(\"tool-\") ||\n part.type === \"dynamic-tool\"\n ) {\n const toolPart = part as { input?: unknown; output?: unknown };\n if (toolPart.input) {\n tokens += estimateStringTokens(JSON.stringify(toolPart.input));\n }\n if (toolPart.output) {\n tokens += estimateStringTokens(JSON.stringify(toolPart.output));\n }\n }\n }\n }\n return tokens;\n}\n","/**\n * Compaction Helpers\n *\n * Utilities for full compaction (LLM-based summarization).\n * Used by the reference compaction implementation and available\n * for custom CompactFunction implementations.\n */\n\nimport type { UIMessage } from \"ai\";\nimport { estimateMessageTokens } from \"./tokens\";\n\n// ── Compaction ID constants ─────────────────────────────────────────\n\n/** Prefix for all compaction messages (overlays and summaries) */\nexport const COMPACTION_PREFIX = \"compaction_\";\n\n/** Check if a message is a compaction message */\nexport function isCompactionMessage(msg: UIMessage): boolean {\n return msg.id.startsWith(COMPACTION_PREFIX);\n}\n\n// ── Tool Pair Alignment ──────────────────────────────────────────────\n\n/**\n * Check if a message contains tool invocations.\n */\nfunction hasToolCalls(msg: UIMessage): boolean {\n return msg.parts.some(\n (p) => p.type.startsWith(\"tool-\") || p.type === \"dynamic-tool\"\n );\n}\n\n/**\n * Get tool call IDs from a message's parts.\n */\nfunction getToolCallIds(msg: UIMessage): Set<string> {\n const ids = new Set<string>();\n for (const part of msg.parts) {\n if (\n (part.type.startsWith(\"tool-\") || part.type === \"dynamic-tool\") &&\n \"toolCallId\" in part\n ) {\n ids.add((part as { toolCallId: string }).toolCallId);\n }\n }\n return ids;\n}\n\n/**\n * Check if a message is a tool result referencing a specific call ID.\n */\nfunction isToolResultFor(msg: UIMessage, callIds: Set<string>): boolean {\n return msg.parts.some(\n (p) =>\n (p.type.startsWith(\"tool-\") || p.type === \"dynamic-tool\") &&\n \"toolCallId\" in p &&\n callIds.has((p as { toolCallId: string }).toolCallId)\n );\n}\n\n/**\n * Align a boundary index forward to avoid splitting tool call/result groups.\n * If the boundary falls between an assistant message with tool calls and its\n * tool results, move it forward past the results.\n */\nexport function alignBoundaryForward(\n messages: UIMessage[],\n idx: number\n): number {\n if (idx <= 0 || idx >= messages.length) return idx;\n\n // Check if the message before the boundary has tool calls\n const prev = messages[idx - 1];\n if (prev.role === \"assistant\" && hasToolCalls(prev)) {\n const callIds = getToolCallIds(prev);\n // Skip forward past any tool results for these calls\n while (idx < messages.length && isToolResultFor(messages[idx], callIds)) {\n idx++;\n }\n }\n\n return idx;\n}\n\n/**\n * Align a boundary index backward to avoid splitting tool call/result groups.\n * If the boundary falls in the middle of tool results, move it backward to\n * include the assistant message that made the calls.\n */\nexport function alignBoundaryBackward(\n messages: UIMessage[],\n idx: number\n): number {\n if (idx <= 0 || idx >= messages.length) return idx;\n\n // If the message at idx is a tool result, walk backward to find the call\n while (idx > 0) {\n const msg = messages[idx];\n if (msg.role === \"assistant\" && hasToolCalls(msg)) {\n break; // This is a tool call message — include it\n }\n // Check if this looks like a tool result (assistant message following another)\n const prev = messages[idx - 1];\n if (prev.role === \"assistant\" && hasToolCalls(prev)) {\n const callIds = getToolCallIds(prev);\n if (isToolResultFor(msg, callIds)) {\n idx--; // Move back to include the call\n continue;\n }\n }\n break;\n }\n\n return idx;\n}\n\n// ── Token-Budget Tail Protection ─────────────────────────────────────\n\n/**\n * Find the compression end boundary using a token budget for the tail.\n * Walks backward from the end, accumulating tokens until budget is reached.\n * Returns the index where compression should stop (everything from this\n * index onward is protected).\n *\n * @param messages All messages\n * @param headEnd Index where the protected head ends (compression starts here)\n * @param tailTokenBudget Maximum tokens to keep in the tail\n * @param minTailMessages Minimum messages to protect in the tail (fallback)\n */\nexport function findTailCutByTokens(\n messages: UIMessage[],\n headEnd: number,\n tailTokenBudget = 20000,\n minTailMessages = 2\n): number {\n const n = messages.length;\n let accumulated = 0;\n let tokenCut = n;\n\n for (let i = n - 1; i >= headEnd; i--) {\n const msgTokens = estimateMessageTokens([messages[i]]);\n\n if (accumulated + msgTokens > tailTokenBudget && tokenCut < n) {\n // Budget exceeded and we already have at least one tail message\n break;\n }\n accumulated += msgTokens;\n tokenCut = i;\n }\n\n // Protect whichever is larger: token-based tail or minTailMessages\n const minCut = n - minTailMessages;\n const cutIdx = minCut >= headEnd ? Math.min(tokenCut, minCut) : tokenCut;\n\n // Align to avoid splitting tool groups\n return alignBoundaryBackward(messages, cutIdx);\n}\n\n// ── Tool Pair Sanitization ───────────────────────────────────────────\n\n/**\n * Fix orphaned tool call/result pairs after compaction.\n *\n * Two failure modes:\n * 1. Tool result references a call_id whose assistant tool_call was removed\n * → Remove the orphaned result\n * 2. Assistant has tool_calls whose results were dropped\n * → Add stub results so the API doesn't error\n *\n * @param messages Messages after compaction\n * @returns Sanitized messages with no orphaned pairs\n */\nexport function sanitizeToolPairs(messages: UIMessage[]): UIMessage[] {\n // Build set of surviving tool call IDs (from assistant messages)\n const survivingCallIds = new Set<string>();\n for (const msg of messages) {\n if (msg.role === \"assistant\") {\n for (const id of getToolCallIds(msg)) {\n survivingCallIds.add(id);\n }\n }\n }\n\n // Build set of tool result IDs\n const resultCallIds = new Set<string>();\n for (const msg of messages) {\n for (const part of msg.parts) {\n if (\n (part.type.startsWith(\"tool-\") || part.type === \"dynamic-tool\") &&\n \"toolCallId\" in part &&\n \"output\" in part\n ) {\n resultCallIds.add((part as { toolCallId: string }).toolCallId);\n }\n }\n }\n\n // Remove orphaned results (results whose calls were dropped)\n const orphanedResults = new Set<string>();\n for (const id of resultCallIds) {\n if (!survivingCallIds.has(id)) {\n orphanedResults.add(id);\n }\n }\n\n let result = messages;\n if (orphanedResults.size > 0) {\n result = result.map((msg) => {\n const filteredParts = msg.parts.filter((part) => {\n if (\n (part.type.startsWith(\"tool-\") || part.type === \"dynamic-tool\") &&\n \"toolCallId\" in part &&\n \"output\" in part\n ) {\n return !orphanedResults.has(\n (part as { toolCallId: string }).toolCallId\n );\n }\n return true;\n });\n if (filteredParts.length !== msg.parts.length) {\n return { ...msg, parts: filteredParts } as UIMessage;\n }\n return msg;\n });\n }\n\n // Add stub results for calls whose results were dropped\n const missingResults = new Set<string>();\n for (const id of survivingCallIds) {\n if (!resultCallIds.has(id) && !orphanedResults.has(id)) {\n missingResults.add(id);\n }\n }\n\n if (missingResults.size > 0) {\n const patched: UIMessage[] = [];\n for (const msg of result) {\n patched.push(msg);\n if (msg.role === \"assistant\") {\n for (const id of getToolCallIds(msg)) {\n if (missingResults.has(id)) {\n // Find the tool name from the call\n const callPart = msg.parts.find(\n (p) =>\n \"toolCallId\" in p &&\n (p as { toolCallId: string }).toolCallId === id\n ) as { toolName?: string } | undefined;\n\n patched.push({\n id: `stub-${id}`,\n role: \"assistant\",\n parts: [\n {\n type: \"tool-result\" as const,\n toolCallId: id,\n toolName: callPart?.toolName ?? \"unknown\",\n result:\n \"[Result from earlier conversation — see context summary above]\"\n } as unknown as UIMessage[\"parts\"][number]\n ],\n createdAt: new Date()\n } as UIMessage);\n }\n }\n }\n }\n result = patched;\n }\n\n // Remove empty messages (all parts filtered out)\n return result.filter((msg) => msg.parts.length > 0);\n}\n\n// ── Summary Budget ───────────────────────────────────────────────────\n\n/**\n * Compute a summary token budget based on the content being compressed.\n * 20% of the compressed content, clamped to 2K-8K tokens.\n */\nexport function computeSummaryBudget(messages: UIMessage[]): number {\n const contentTokens = estimateMessageTokens(messages);\n // Summary is ~20% of the content being compressed.\n // The summary replaces the compressed middle, so it's sized relative\n // to what it's replacing — not the tail budget (they occupy different\n // slots in the context window).\n const budget = Math.floor(contentTokens * 0.2);\n return Math.max(100, budget);\n}\n\n// ── Structured Summary Prompt ────────────────────────────────────────\n\n/**\n * Build a prompt for LLM summarization of compressed messages.\n *\n * @param messages Messages to summarize\n * @param previousSummary Previous summary for iterative updates (or null for first compaction)\n * @param budget Target token count for the summary\n */\nexport function buildSummaryPrompt(\n messages: UIMessage[],\n previousSummary: string | null,\n budget: number\n): string {\n const content = messages\n .map((msg) => {\n const textParts = msg.parts\n .filter((p) => p.type === \"text\")\n .map((p) => (p as { text: string }).text)\n .join(\"\\n\");\n\n const toolParts = msg.parts\n .filter((p) => p.type.startsWith(\"tool-\") || p.type === \"dynamic-tool\")\n .map((p) => {\n const tp = p as {\n toolName?: string;\n input?: unknown;\n output?: unknown;\n };\n const parts = [`[Tool: ${tp.toolName ?? \"unknown\"}]`];\n if (tp.input)\n parts.push(`Input: ${JSON.stringify(tp.input).slice(0, 500)}`);\n if (tp.output)\n parts.push(`Output: ${String(tp.output).slice(0, 500)}`);\n return parts.join(\"\\n\");\n })\n .join(\"\\n\");\n\n return `[${msg.role}]\\n${textParts}${toolParts ? \"\\n\" + toolParts : \"\"}`;\n })\n .join(\"\\n\\n---\\n\\n\");\n\n if (previousSummary) {\n return `You are updating a conversation summary. A previous summary exists below. New conversation turns have occurred since then and need to be incorporated.\n\nPREVIOUS SUMMARY:\n${previousSummary}\n\nNEW TURNS TO INCORPORATE:\n${content}\n\nUpdate the summary. PRESERVE existing information that is still relevant. ADD new information. Remove information only if it is clearly obsolete.\n\n## Topic\n[What the conversation is about]\n\n## Key Points\n[Important information, decisions, and conclusions from the conversation]\n\n## Current State\n[Where things stand now — what has been done, what is in progress]\n\n## Open Items\n[Unresolved questions, pending tasks, or next steps discussed]\n\nTarget ~${budget} tokens. Be factual — only include information that was explicitly discussed in the conversation. Do NOT invent file paths, commands, or details that were not mentioned. Write only the summary body.`;\n }\n\n return `Create a concise summary of this conversation that preserves the important information for future context.\n\nCONVERSATION TO SUMMARIZE:\n${content}\n\nUse this structure:\n\n## Topic\n[What the conversation is about]\n\n## Key Points\n[Important information, decisions, and conclusions from the conversation]\n\n## Current State\n[Where things stand now — what has been done, what is in progress]\n\n## Open Items\n[Unresolved questions, pending tasks, or next steps discussed]\n\nTarget ~${budget} tokens. Be factual — only include information that was explicitly discussed in the conversation. Do NOT invent file paths, commands, or details that were not mentioned. Write only the summary body.`;\n}\n\n// ── Reference Compaction Implementation ──────────────────────────────\n\n/**\n * Result of a compaction function — describes the overlay to store.\n */\nexport interface CompactResult {\n /** First message ID in the compacted range */\n fromMessageId: string;\n /** Last message ID in the compacted range */\n toMessageId: string;\n /** Summary text to store as the overlay */\n summary: string;\n}\n\nexport interface CompactOptions {\n /**\n * Function to call the LLM for summarization.\n * Takes a user prompt string, returns the LLM's text response.\n */\n summarize: (prompt: string) => Promise<string>;\n\n /** Number of head messages to protect (default: 2) */\n protectHead?: number;\n\n /** Token budget for tail protection (default: 20000) */\n tailTokenBudget?: number;\n\n /** Minimum tail messages to protect (default: 2) */\n minTailMessages?: number;\n}\n\n/**\n * Reference compaction implementation.\n *\n * Implements the full hermes-style compaction algorithm:\n * 1. Protect head messages (first N)\n * 2. Protect tail by token budget (walk backward)\n * 3. Align boundaries to tool call groups\n * 4. Summarize middle section with LLM (structured format)\n * 5. Sanitize orphaned tool pairs\n * 6. Iterative summary updates on subsequent compactions\n *\n * @example\n * ```typescript\n * import { createCompactFunction } from \"agents/experimental/memory/utils\";\n *\n * const session = new Session(provider, {\n * compaction: {\n * tokenThreshold: 100000,\n * fn: createCompactFunction({\n * summarize: (prompt) => generateText({ model, prompt }).then(r => r.text)\n * })\n * }\n * });\n * ```\n */\nexport function createCompactFunction(opts: CompactOptions) {\n const protectHead = opts.protectHead ?? 3;\n const tailTokenBudget = opts.tailTokenBudget ?? 20000;\n const minTailMessages = opts.minTailMessages ?? 2;\n\n return async (messages: UIMessage[]): Promise<CompactResult | null> => {\n if (messages.length <= protectHead + minTailMessages) {\n return null;\n }\n\n // 1. Find compression boundaries\n let compressStart = protectHead;\n compressStart = alignBoundaryForward(messages, compressStart);\n\n let compressEnd = findTailCutByTokens(\n messages,\n compressStart,\n tailTokenBudget,\n minTailMessages\n );\n\n if (compressEnd <= compressStart) {\n return null;\n }\n\n // Filter out compaction overlay messages — they have virtual IDs\n // and should not be included in the summary prompt or used as range IDs\n const middleMessages = messages\n .slice(compressStart, compressEnd)\n .filter((m) => !isCompactionMessage(m));\n\n if (middleMessages.length === 0) return null;\n\n // 2. Generate summary — extract previous summary from compaction overlays\n const existingCompaction = messages.find(isCompactionMessage);\n const previousSummary = existingCompaction\n ? existingCompaction.parts\n .filter((p) => p.type === \"text\")\n .map((p) => (p as { text: string }).text)\n .join(\"\\n\")\n : null;\n\n const budget = computeSummaryBudget(middleMessages);\n const prompt = buildSummaryPrompt(middleMessages, previousSummary, budget);\n const summary = await opts.summarize(prompt);\n\n if (!summary.trim()) return null;\n\n return {\n fromMessageId: middleMessages[0].id,\n toMessageId: middleMessages[middleMessages.length - 1].id,\n summary\n };\n };\n}\n"],"mappings":";;AA0BA,MAAa,kBAAkB;;AAG/B,MAAa,yBAAyB;;AAGtC,MAAa,qBAAqB;;;;;;;;;;AAWlC,SAAgB,qBAAqB,MAAsB;AACzD,KAAI,CAAC,KAAM,QAAO;CAClB,MAAM,eAAe,KAAK,SAAA;CAC1B,MAAM,eACJ,KAAK,MAAM,MAAM,CAAC,OAAO,QAAQ,CAAC,SAAS;AAC7C,QAAO,KAAK,KAAK,KAAK,IAAI,cAAc,aAAa,CAAC;;;;;;;;;;AAWxD,SAAgB,sBAAsB,UAA+B;CACnE,IAAI,SAAS;AACb,MAAK,MAAM,OAAO,UAAU;AAC1B,YAAA;AACA,OAAK,MAAM,QAAQ,IAAI,MACrB,KAAI,KAAK,SAAS,OAChB,WAAU,qBACP,KAAwC,KAC1C;WAED,KAAK,KAAK,WAAW,QAAQ,IAC7B,KAAK,SAAS,gBACd;GACA,MAAM,WAAW;AACjB,OAAI,SAAS,MACX,WAAU,qBAAqB,KAAK,UAAU,SAAS,MAAM,CAAC;AAEhE,OAAI,SAAS,OACX,WAAU,qBAAqB,KAAK,UAAU,SAAS,OAAO,CAAC;;;AAKvE,QAAO;;;;;ACpET,MAAa,oBAAoB;;AAGjC,SAAgB,oBAAoB,KAAyB;AAC3D,QAAO,IAAI,GAAG,WAAW,kBAAkB;;;;;AAQ7C,SAAS,aAAa,KAAyB;AAC7C,QAAO,IAAI,MAAM,MACd,MAAM,EAAE,KAAK,WAAW,QAAQ,IAAI,EAAE,SAAS,eACjD;;;;;AAMH,SAAS,eAAe,KAA6B;CACnD,MAAM,sBAAM,IAAI,KAAa;AAC7B,MAAK,MAAM,QAAQ,IAAI,MACrB,MACG,KAAK,KAAK,WAAW,QAAQ,IAAI,KAAK,SAAS,mBAChD,gBAAgB,KAEhB,KAAI,IAAK,KAAgC,WAAW;AAGxD,QAAO;;;;;AAMT,SAAS,gBAAgB,KAAgB,SAA+B;AACtE,QAAO,IAAI,MAAM,MACd,OACE,EAAE,KAAK,WAAW,QAAQ,IAAI,EAAE,SAAS,mBAC1C,gBAAgB,KAChB,QAAQ,IAAK,EAA6B,WAAW,CACxD;;;;;;;AAQH,SAAgB,qBACd,UACA,KACQ;AACR,KAAI,OAAO,KAAK,OAAO,SAAS,OAAQ,QAAO;CAG/C,MAAM,OAAO,SAAS,MAAM;AAC5B,KAAI,KAAK,SAAS,eAAe,aAAa,KAAK,EAAE;EACnD,MAAM,UAAU,eAAe,KAAK;AAEpC,SAAO,MAAM,SAAS,UAAU,gBAAgB,SAAS,MAAM,QAAQ,CACrE;;AAIJ,QAAO;;;;;;;AAQT,SAAgB,sBACd,UACA,KACQ;AACR,KAAI,OAAO,KAAK,OAAO,SAAS,OAAQ,QAAO;AAG/C,QAAO,MAAM,GAAG;EACd,MAAM,MAAM,SAAS;AACrB,MAAI,IAAI,SAAS,eAAe,aAAa,IAAI,CAC/C;EAGF,MAAM,OAAO,SAAS,MAAM;AAC5B,MAAI,KAAK,SAAS,eAAe,aAAa,KAAK;OAE7C,gBAAgB,KADJ,eAAe,KAAK,CACH,EAAE;AACjC;AACA;;;AAGJ;;AAGF,QAAO;;;;;;;;;;;;;AAgBT,SAAgB,oBACd,UACA,SACA,kBAAkB,KAClB,kBAAkB,GACV;CACR,MAAM,IAAI,SAAS;CACnB,IAAI,cAAc;CAClB,IAAI,WAAW;AAEf,MAAK,IAAI,IAAI,IAAI,GAAG,KAAK,SAAS,KAAK;EACrC,MAAM,YAAY,sBAAsB,CAAC,SAAS,GAAG,CAAC;AAEtD,MAAI,cAAc,YAAY,mBAAmB,WAAW,EAE1D;AAEF,iBAAe;AACf,aAAW;;CAIb,MAAM,SAAS,IAAI;AAInB,QAAO,sBAAsB,UAHd,UAAU,UAAU,KAAK,IAAI,UAAU,OAAO,GAAG,SAGlB;;;;;;;;;;;;;;AAiBhD,SAAgB,kBAAkB,UAAoC;CAEpE,MAAM,mCAAmB,IAAI,KAAa;AAC1C,MAAK,MAAM,OAAO,SAChB,KAAI,IAAI,SAAS,YACf,MAAK,MAAM,MAAM,eAAe,IAAI,CAClC,kBAAiB,IAAI,GAAG;CAM9B,MAAM,gCAAgB,IAAI,KAAa;AACvC,MAAK,MAAM,OAAO,SAChB,MAAK,MAAM,QAAQ,IAAI,MACrB,MACG,KAAK,KAAK,WAAW,QAAQ,IAAI,KAAK,SAAS,mBAChD,gBAAgB,QAChB,YAAY,KAEZ,eAAc,IAAK,KAAgC,WAAW;CAMpE,MAAM,kCAAkB,IAAI,KAAa;AACzC,MAAK,MAAM,MAAM,cACf,KAAI,CAAC,iBAAiB,IAAI,GAAG,CAC3B,iBAAgB,IAAI,GAAG;CAI3B,IAAI,SAAS;AACb,KAAI,gBAAgB,OAAO,EACzB,UAAS,OAAO,KAAK,QAAQ;EAC3B,MAAM,gBAAgB,IAAI,MAAM,QAAQ,SAAS;AAC/C,QACG,KAAK,KAAK,WAAW,QAAQ,IAAI,KAAK,SAAS,mBAChD,gBAAgB,QAChB,YAAY,KAEZ,QAAO,CAAC,gBAAgB,IACrB,KAAgC,WAClC;AAEH,UAAO;IACP;AACF,MAAI,cAAc,WAAW,IAAI,MAAM,OACrC,QAAO;GAAE,GAAG;GAAK,OAAO;GAAe;AAEzC,SAAO;GACP;CAIJ,MAAM,iCAAiB,IAAI,KAAa;AACxC,MAAK,MAAM,MAAM,iBACf,KAAI,CAAC,cAAc,IAAI,GAAG,IAAI,CAAC,gBAAgB,IAAI,GAAG,CACpD,gBAAe,IAAI,GAAG;AAI1B,KAAI,eAAe,OAAO,GAAG;EAC3B,MAAM,UAAuB,EAAE;AAC/B,OAAK,MAAM,OAAO,QAAQ;AACxB,WAAQ,KAAK,IAAI;AACjB,OAAI,IAAI,SAAS;SACV,MAAM,MAAM,eAAe,IAAI,CAClC,KAAI,eAAe,IAAI,GAAG,EAAE;KAE1B,MAAM,WAAW,IAAI,MAAM,MACxB,MACC,gBAAgB,KACf,EAA6B,eAAe,GAChD;AAED,aAAQ,KAAK;MACX,IAAI,QAAQ;MACZ,MAAM;MACN,OAAO,CACL;OACE,MAAM;OACN,YAAY;OACZ,UAAU,UAAU,YAAY;OAChC,QACE;OACH,CACF;MACD,2BAAW,IAAI,MAAM;MACtB,CAAc;;;;AAKvB,WAAS;;AAIX,QAAO,OAAO,QAAQ,QAAQ,IAAI,MAAM,SAAS,EAAE;;;;;;AASrD,SAAgB,qBAAqB,UAA+B;CAClE,MAAM,gBAAgB,sBAAsB,SAAS;CAKrD,MAAM,SAAS,KAAK,MAAM,gBAAgB,GAAI;AAC9C,QAAO,KAAK,IAAI,KAAK,OAAO;;;;;;;;;AAY9B,SAAgB,mBACd,UACA,iBACA,QACQ;CACR,MAAM,UAAU,SACb,KAAK,QAAQ;EACZ,MAAM,YAAY,IAAI,MACnB,QAAQ,MAAM,EAAE,SAAS,OAAO,CAChC,KAAK,MAAO,EAAuB,KAAK,CACxC,KAAK,KAAK;EAEb,MAAM,YAAY,IAAI,MACnB,QAAQ,MAAM,EAAE,KAAK,WAAW,QAAQ,IAAI,EAAE,SAAS,eAAe,CACtE,KAAK,MAAM;GACV,MAAM,KAAK;GAKX,MAAM,QAAQ,CAAC,UAAU,GAAG,YAAY,UAAU,GAAG;AACrD,OAAI,GAAG,MACL,OAAM,KAAK,UAAU,KAAK,UAAU,GAAG,MAAM,CAAC,MAAM,GAAG,IAAI,GAAG;AAChE,OAAI,GAAG,OACL,OAAM,KAAK,WAAW,OAAO,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,GAAG;AAC1D,UAAO,MAAM,KAAK,KAAK;IACvB,CACD,KAAK,KAAK;AAEb,SAAO,IAAI,IAAI,KAAK,KAAK,YAAY,YAAY,OAAO,YAAY;GACpE,CACD,KAAK,cAAc;AAEtB,KAAI,gBACF,QAAO;;;EAGT,gBAAgB;;;EAGhB,QAAQ;;;;;;;;;;;;;;;;UAgBA,OAAO;AAGf,QAAO;;;EAGP,QAAQ;;;;;;;;;;;;;;;;UAgBA,OAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2DjB,SAAgB,sBAAsB,MAAsB;CAC1D,MAAM,cAAc,KAAK,eAAe;CACxC,MAAM,kBAAkB,KAAK,mBAAmB;CAChD,MAAM,kBAAkB,KAAK,mBAAmB;AAEhD,QAAO,OAAO,aAAyD;AACrE,MAAI,SAAS,UAAU,cAAc,gBACnC,QAAO;EAIT,IAAI,gBAAgB;AACpB,kBAAgB,qBAAqB,UAAU,cAAc;EAE7D,IAAI,cAAc,oBAChB,UACA,eACA,iBACA,gBACD;AAED,MAAI,eAAe,cACjB,QAAO;EAKT,MAAM,iBAAiB,SACpB,MAAM,eAAe,YAAY,CACjC,QAAQ,MAAM,CAAC,oBAAoB,EAAE,CAAC;AAEzC,MAAI,eAAe,WAAW,EAAG,QAAO;EAGxC,MAAM,qBAAqB,SAAS,KAAK,oBAAoB;EAS7D,MAAM,SAAS,mBAAmB,gBARV,qBACpB,mBAAmB,MAChB,QAAQ,MAAM,EAAE,SAAS,OAAO,CAChC,KAAK,MAAO,EAAuB,KAAK,CACxC,KAAK,KAAK,GACb,MAEW,qBAAqB,eAAe,CACuB;EAC1E,MAAM,UAAU,MAAM,KAAK,UAAU,OAAO;AAE5C,MAAI,CAAC,QAAQ,MAAM,CAAE,QAAO;AAE5B,SAAO;GACL,eAAe,eAAe,GAAG;GACjC,aAAa,eAAe,eAAe,SAAS,GAAG;GACvD;GACD"}
@@ -0,0 +1,139 @@
1
+ import { UIMessage } from "ai";
2
+
3
+ //#region src/experimental/memory/utils/compaction-helpers.d.ts
4
+ /** Prefix for all compaction messages (overlays and summaries) */
5
+ declare const COMPACTION_PREFIX = "compaction_";
6
+ /** Check if a message is a compaction message */
7
+ declare function isCompactionMessage(msg: UIMessage): boolean;
8
+ /**
9
+ * Align a boundary index forward to avoid splitting tool call/result groups.
10
+ * If the boundary falls between an assistant message with tool calls and its
11
+ * tool results, move it forward past the results.
12
+ */
13
+ declare function alignBoundaryForward(
14
+ messages: UIMessage[],
15
+ idx: number
16
+ ): number;
17
+ /**
18
+ * Align a boundary index backward to avoid splitting tool call/result groups.
19
+ * If the boundary falls in the middle of tool results, move it backward to
20
+ * include the assistant message that made the calls.
21
+ */
22
+ declare function alignBoundaryBackward(
23
+ messages: UIMessage[],
24
+ idx: number
25
+ ): number;
26
+ /**
27
+ * Find the compression end boundary using a token budget for the tail.
28
+ * Walks backward from the end, accumulating tokens until budget is reached.
29
+ * Returns the index where compression should stop (everything from this
30
+ * index onward is protected).
31
+ *
32
+ * @param messages All messages
33
+ * @param headEnd Index where the protected head ends (compression starts here)
34
+ * @param tailTokenBudget Maximum tokens to keep in the tail
35
+ * @param minTailMessages Minimum messages to protect in the tail (fallback)
36
+ */
37
+ declare function findTailCutByTokens(
38
+ messages: UIMessage[],
39
+ headEnd: number,
40
+ tailTokenBudget?: number,
41
+ minTailMessages?: number
42
+ ): number;
43
+ /**
44
+ * Fix orphaned tool call/result pairs after compaction.
45
+ *
46
+ * Two failure modes:
47
+ * 1. Tool result references a call_id whose assistant tool_call was removed
48
+ * → Remove the orphaned result
49
+ * 2. Assistant has tool_calls whose results were dropped
50
+ * → Add stub results so the API doesn't error
51
+ *
52
+ * @param messages Messages after compaction
53
+ * @returns Sanitized messages with no orphaned pairs
54
+ */
55
+ declare function sanitizeToolPairs(messages: UIMessage[]): UIMessage[];
56
+ /**
57
+ * Compute a summary token budget based on the content being compressed.
58
+ * 20% of the compressed content, clamped to 2K-8K tokens.
59
+ */
60
+ declare function computeSummaryBudget(messages: UIMessage[]): number;
61
+ /**
62
+ * Build a prompt for LLM summarization of compressed messages.
63
+ *
64
+ * @param messages Messages to summarize
65
+ * @param previousSummary Previous summary for iterative updates (or null for first compaction)
66
+ * @param budget Target token count for the summary
67
+ */
68
+ declare function buildSummaryPrompt(
69
+ messages: UIMessage[],
70
+ previousSummary: string | null,
71
+ budget: number
72
+ ): string;
73
+ /**
74
+ * Result of a compaction function — describes the overlay to store.
75
+ */
76
+ interface CompactResult {
77
+ /** First message ID in the compacted range */
78
+ fromMessageId: string;
79
+ /** Last message ID in the compacted range */
80
+ toMessageId: string;
81
+ /** Summary text to store as the overlay */
82
+ summary: string;
83
+ }
84
+ interface CompactOptions {
85
+ /**
86
+ * Function to call the LLM for summarization.
87
+ * Takes a user prompt string, returns the LLM's text response.
88
+ */
89
+ summarize: (prompt: string) => Promise<string>;
90
+ /** Number of head messages to protect (default: 2) */
91
+ protectHead?: number;
92
+ /** Token budget for tail protection (default: 20000) */
93
+ tailTokenBudget?: number;
94
+ /** Minimum tail messages to protect (default: 2) */
95
+ minTailMessages?: number;
96
+ }
97
+ /**
98
+ * Reference compaction implementation.
99
+ *
100
+ * Implements the full hermes-style compaction algorithm:
101
+ * 1. Protect head messages (first N)
102
+ * 2. Protect tail by token budget (walk backward)
103
+ * 3. Align boundaries to tool call groups
104
+ * 4. Summarize middle section with LLM (structured format)
105
+ * 5. Sanitize orphaned tool pairs
106
+ * 6. Iterative summary updates on subsequent compactions
107
+ *
108
+ * @example
109
+ * ```typescript
110
+ * import { createCompactFunction } from "agents/experimental/memory/utils";
111
+ *
112
+ * const session = new Session(provider, {
113
+ * compaction: {
114
+ * tokenThreshold: 100000,
115
+ * fn: createCompactFunction({
116
+ * summarize: (prompt) => generateText({ model, prompt }).then(r => r.text)
117
+ * })
118
+ * }
119
+ * });
120
+ * ```
121
+ */
122
+ declare function createCompactFunction(
123
+ opts: CompactOptions
124
+ ): (messages: UIMessage[]) => Promise<CompactResult | null>;
125
+ //#endregion
126
+ export {
127
+ alignBoundaryForward as a,
128
+ createCompactFunction as c,
129
+ sanitizeToolPairs as d,
130
+ alignBoundaryBackward as i,
131
+ findTailCutByTokens as l,
132
+ CompactOptions as n,
133
+ buildSummaryPrompt as o,
134
+ CompactResult as r,
135
+ computeSummaryBudget as s,
136
+ COMPACTION_PREFIX as t,
137
+ isCompactionMessage as u
138
+ };
139
+ //# sourceMappingURL=compaction-helpers-DkJreaDR.d.ts.map
@@ -75,4 +75,4 @@ export {
75
75
  DurableObjectOAuthClientProvider as r,
76
76
  AgentMcpOAuthProvider as t
77
77
  };
78
- //# sourceMappingURL=do-oauth-client-provider-D7F2Pw40.d.ts.map
78
+ //# sourceMappingURL=do-oauth-client-provider-C2jurFjW.d.ts.map
@@ -154,4 +154,4 @@ export {
154
154
  DEFAULT_MAX_AGE_SECONDS as t,
155
155
  createSecureReplyEmailResolver as u
156
156
  };
157
- //# sourceMappingURL=email-YAQhwwXb.d.ts.map
157
+ //# sourceMappingURL=email-DwPlM0bQ.d.ts.map
package/dist/email.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { n as AgentEmail } from "./internal_context-DgcmHqS1.js";
1
+ import { n as AgentEmail } from "./internal_context-DT8RxmAN.js";
2
2
  import {
3
3
  a as SecureReplyResolverOptions,
4
4
  c as createCatchAllEmailResolver,
@@ -12,7 +12,7 @@ import {
12
12
  s as createAddressBasedEmailResolver,
13
13
  t as DEFAULT_MAX_AGE_SECONDS,
14
14
  u as createSecureReplyEmailResolver
15
- } from "./email-YAQhwwXb.js";
15
+ } from "./email-DwPlM0bQ.js";
16
16
  export {
17
17
  AgentEmail,
18
18
  DEFAULT_MAX_AGE_SECONDS,
@@ -1,4 +1,4 @@
1
- import { r as Agent } from "../index-DynYigzs.js";
1
+ import { r as Agent } from "../index-C-6EMK-E.js";
2
2
 
3
3
  //#region src/experimental/forever.d.ts
4
4
  type FiberState = {