langchain 1.0.0-alpha.4 → 1.0.0-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. package/dist/agents/RunnableCallable.cjs +5 -0
  2. package/dist/agents/RunnableCallable.cjs.map +1 -1
  3. package/dist/agents/RunnableCallable.d.cts +2 -0
  4. package/dist/agents/RunnableCallable.d.cts.map +1 -1
  5. package/dist/agents/RunnableCallable.d.ts +2 -0
  6. package/dist/agents/RunnableCallable.d.ts.map +1 -1
  7. package/dist/agents/RunnableCallable.js +5 -0
  8. package/dist/agents/RunnableCallable.js.map +1 -1
  9. package/dist/agents/annotation.d.cts +1 -1
  10. package/dist/agents/createAgent.cjs +10 -0
  11. package/dist/agents/createAgent.cjs.map +1 -0
  12. package/dist/agents/createAgent.js +10 -0
  13. package/dist/agents/createAgent.js.map +1 -0
  14. package/dist/agents/index.cjs +23 -4
  15. package/dist/agents/index.cjs.map +1 -1
  16. package/dist/agents/index.d.cts +83 -20
  17. package/dist/agents/index.d.cts.map +1 -1
  18. package/dist/agents/index.d.ts +83 -20
  19. package/dist/agents/index.d.ts.map +1 -1
  20. package/dist/agents/index.js +23 -4
  21. package/dist/agents/index.js.map +1 -1
  22. package/dist/agents/middlewareAgent/ReactAgent.cjs +255 -0
  23. package/dist/agents/middlewareAgent/ReactAgent.cjs.map +1 -0
  24. package/dist/agents/middlewareAgent/ReactAgent.d.cts +68 -0
  25. package/dist/agents/middlewareAgent/ReactAgent.d.cts.map +1 -0
  26. package/dist/agents/middlewareAgent/ReactAgent.d.ts +68 -0
  27. package/dist/agents/middlewareAgent/ReactAgent.d.ts.map +1 -0
  28. package/dist/agents/middlewareAgent/ReactAgent.js +254 -0
  29. package/dist/agents/middlewareAgent/ReactAgent.js.map +1 -0
  30. package/dist/agents/middlewareAgent/annotation.cjs +39 -0
  31. package/dist/agents/middlewareAgent/annotation.cjs.map +1 -0
  32. package/dist/agents/middlewareAgent/annotation.js +38 -0
  33. package/dist/agents/middlewareAgent/annotation.js.map +1 -0
  34. package/dist/agents/middlewareAgent/index.cjs +11 -0
  35. package/dist/agents/middlewareAgent/index.cjs.map +1 -0
  36. package/dist/agents/middlewareAgent/index.js +11 -0
  37. package/dist/agents/middlewareAgent/index.js.map +1 -0
  38. package/dist/agents/middlewareAgent/middleware.cjs +47 -0
  39. package/dist/agents/middlewareAgent/middleware.cjs.map +1 -0
  40. package/dist/agents/middlewareAgent/middleware.d.cts +46 -0
  41. package/dist/agents/middlewareAgent/middleware.d.cts.map +1 -0
  42. package/dist/agents/middlewareAgent/middleware.d.ts +46 -0
  43. package/dist/agents/middlewareAgent/middleware.d.ts.map +1 -0
  44. package/dist/agents/middlewareAgent/middleware.js +46 -0
  45. package/dist/agents/middlewareAgent/middleware.js.map +1 -0
  46. package/dist/agents/middlewareAgent/middlewares/hitl.cjs +235 -0
  47. package/dist/agents/middlewareAgent/middlewares/hitl.cjs.map +1 -0
  48. package/dist/agents/middlewareAgent/middlewares/hitl.d.cts +199 -0
  49. package/dist/agents/middlewareAgent/middlewares/hitl.d.cts.map +1 -0
  50. package/dist/agents/middlewareAgent/middlewares/hitl.d.ts +199 -0
  51. package/dist/agents/middlewareAgent/middlewares/hitl.d.ts.map +1 -0
  52. package/dist/agents/middlewareAgent/middlewares/hitl.js +234 -0
  53. package/dist/agents/middlewareAgent/middlewares/hitl.js.map +1 -0
  54. package/dist/agents/middlewareAgent/middlewares/index.cjs +8 -0
  55. package/dist/agents/middlewareAgent/middlewares/index.d.cts +4 -0
  56. package/dist/agents/middlewareAgent/middlewares/index.d.ts +4 -0
  57. package/dist/agents/middlewareAgent/middlewares/index.js +5 -0
  58. package/dist/agents/middlewareAgent/middlewares/promptCaching.cjs +153 -0
  59. package/dist/agents/middlewareAgent/middlewares/promptCaching.cjs.map +1 -0
  60. package/dist/agents/middlewareAgent/middlewares/promptCaching.d.cts +152 -0
  61. package/dist/agents/middlewareAgent/middlewares/promptCaching.d.cts.map +1 -0
  62. package/dist/agents/middlewareAgent/middlewares/promptCaching.d.ts +152 -0
  63. package/dist/agents/middlewareAgent/middlewares/promptCaching.d.ts.map +1 -0
  64. package/dist/agents/middlewareAgent/middlewares/promptCaching.js +152 -0
  65. package/dist/agents/middlewareAgent/middlewares/promptCaching.js.map +1 -0
  66. package/dist/agents/middlewareAgent/middlewares/summarization.cjs +262 -0
  67. package/dist/agents/middlewareAgent/middlewares/summarization.cjs.map +1 -0
  68. package/dist/agents/middlewareAgent/middlewares/summarization.d.cts +89 -0
  69. package/dist/agents/middlewareAgent/middlewares/summarization.d.cts.map +1 -0
  70. package/dist/agents/middlewareAgent/middlewares/summarization.d.ts +89 -0
  71. package/dist/agents/middlewareAgent/middlewares/summarization.d.ts.map +1 -0
  72. package/dist/agents/middlewareAgent/middlewares/summarization.js +260 -0
  73. package/dist/agents/middlewareAgent/middlewares/summarization.js.map +1 -0
  74. package/dist/agents/middlewareAgent/nodes/AfterModalNode.cjs +29 -0
  75. package/dist/agents/middlewareAgent/nodes/AfterModalNode.cjs.map +1 -0
  76. package/dist/agents/middlewareAgent/nodes/AfterModalNode.js +29 -0
  77. package/dist/agents/middlewareAgent/nodes/AfterModalNode.js.map +1 -0
  78. package/dist/agents/middlewareAgent/nodes/AgentNode.cjs +332 -0
  79. package/dist/agents/middlewareAgent/nodes/AgentNode.cjs.map +1 -0
  80. package/dist/agents/middlewareAgent/nodes/AgentNode.js +331 -0
  81. package/dist/agents/middlewareAgent/nodes/AgentNode.js.map +1 -0
  82. package/dist/agents/middlewareAgent/nodes/BeforeModalNode.cjs +27 -0
  83. package/dist/agents/middlewareAgent/nodes/BeforeModalNode.cjs.map +1 -0
  84. package/dist/agents/middlewareAgent/nodes/BeforeModalNode.js +27 -0
  85. package/dist/agents/middlewareAgent/nodes/BeforeModalNode.js.map +1 -0
  86. package/dist/agents/middlewareAgent/nodes/middleware.cjs +73 -0
  87. package/dist/agents/middlewareAgent/nodes/middleware.cjs.map +1 -0
  88. package/dist/agents/middlewareAgent/nodes/middleware.js +73 -0
  89. package/dist/agents/middlewareAgent/nodes/middleware.js.map +1 -0
  90. package/dist/agents/middlewareAgent/nodes/utils.cjs +74 -0
  91. package/dist/agents/middlewareAgent/nodes/utils.cjs.map +1 -0
  92. package/dist/agents/middlewareAgent/nodes/utils.js +70 -0
  93. package/dist/agents/middlewareAgent/nodes/utils.js.map +1 -0
  94. package/dist/agents/middlewareAgent/types.d.cts +393 -0
  95. package/dist/agents/middlewareAgent/types.d.cts.map +1 -0
  96. package/dist/agents/middlewareAgent/types.d.ts +393 -0
  97. package/dist/agents/middlewareAgent/types.d.ts.map +1 -0
  98. package/dist/agents/nodes/ToolNode.d.cts +3 -3
  99. package/dist/agents/types.d.cts +4 -4
  100. package/dist/chains/api/api_chain.d.cts +1 -1
  101. package/dist/chains/base.d.cts +1 -1
  102. package/dist/chains/combine_docs_chain.d.cts +1 -1
  103. package/dist/chains/combine_documents/stuff.d.cts +1 -1
  104. package/dist/chains/conversational_retrieval_chain.d.cts +1 -1
  105. package/dist/chains/graph_qa/cypher.d.cts +1 -1
  106. package/dist/chains/history_aware_retriever.d.cts +2 -2
  107. package/dist/chains/llm_chain.d.cts +3 -3
  108. package/dist/chains/openai_functions/base.d.cts +3 -3
  109. package/dist/chains/openai_functions/openapi.d.cts +1 -1
  110. package/dist/chains/openai_functions/structured_output.d.cts +3 -3
  111. package/dist/chains/openai_functions/tagging.d.cts +1 -1
  112. package/dist/chains/query_constructor/index.cjs +1 -1
  113. package/dist/chains/query_constructor/index.d.cts +2 -2
  114. package/dist/chains/query_constructor/index.d.ts +1 -1
  115. package/dist/chains/query_constructor/index.js +1 -1
  116. package/dist/chains/question_answering/load.d.ts +2 -2
  117. package/dist/chains/question_answering/load.d.ts.map +1 -1
  118. package/dist/chains/retrieval.d.cts +1 -1
  119. package/dist/chains/router/llm_router.d.cts +1 -1
  120. package/dist/chains/router/multi_prompt.cjs +1 -1
  121. package/dist/chains/router/multi_prompt.js +1 -1
  122. package/dist/chains/router/multi_retrieval_qa.cjs +1 -1
  123. package/dist/chains/router/multi_retrieval_qa.js +1 -1
  124. package/dist/chains/sql_db/sql_db_chain.d.cts +2 -2
  125. package/dist/chains/summarization/load.d.ts +2 -2
  126. package/dist/chains/summarization/load.d.ts.map +1 -1
  127. package/dist/chat_models/universal.d.cts +3 -3
  128. package/dist/evaluation/agents/trajectory.d.cts +3 -3
  129. package/dist/evaluation/agents/trajectory.d.ts.map +1 -1
  130. package/dist/evaluation/comparison/pairwise.d.cts +1 -1
  131. package/dist/evaluation/comparison/pairwise.d.cts.map +1 -1
  132. package/dist/evaluation/comparison/pairwise.d.ts.map +1 -1
  133. package/dist/evaluation/criteria/criteria.d.cts +1 -1
  134. package/dist/evaluation/criteria/criteria.d.cts.map +1 -1
  135. package/dist/evaluation/criteria/criteria.d.ts.map +1 -1
  136. package/dist/evaluation/qa/eval_chain.d.cts +1 -1
  137. package/dist/index.cjs +3 -0
  138. package/dist/index.cjs.map +1 -1
  139. package/dist/index.d.cts +5 -4
  140. package/dist/index.d.ts +3 -2
  141. package/dist/index.js +3 -1
  142. package/dist/index.js.map +1 -1
  143. package/dist/load/import_map.cjs +1 -1
  144. package/dist/load/import_map.js +1 -1
  145. package/dist/memory/summary.d.cts +1 -1
  146. package/dist/output_parsers/fix.d.cts +1 -1
  147. package/dist/output_parsers/http_response.d.cts +1 -1
  148. package/dist/output_parsers/structured.cjs +1 -1
  149. package/dist/output_parsers/structured.d.cts +2 -2
  150. package/dist/output_parsers/structured.d.ts +1 -1
  151. package/dist/output_parsers/structured.js +1 -1
  152. package/dist/tools/fs.d.cts +1 -1
  153. package/dist/tools/json.d.cts +1 -1
  154. package/dist/tools/retriever.d.cts +1 -1
  155. package/dist/tools/vectorstore.d.cts +1 -1
  156. package/dist/tools/webbrowser.d.cts +1 -1
  157. package/package.json +17 -6
@@ -0,0 +1,262 @@
1
+ const require_rolldown_runtime = require('../../../_virtual/rolldown_runtime.cjs');
2
+ const require_middleware = require('../middleware.cjs');
3
+ const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
4
+ const __langchain_langgraph = require_rolldown_runtime.__toESM(require("@langchain/langgraph"));
5
+ const zod = require_rolldown_runtime.__toESM(require("zod"));
6
+ const uuid = require_rolldown_runtime.__toESM(require("uuid"));
7
+
8
+ //#region src/agents/middlewareAgent/middlewares/summarization.ts
9
+ const DEFAULT_SUMMARY_PROMPT = `<role>
10
+ Context Extraction Assistant
11
+ </role>
12
+
13
+ <primary_objective>
14
+ Your sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.
15
+ </primary_objective>
16
+
17
+ <objective_information>
18
+ You're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.
19
+ This context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.
20
+ </objective_information>
21
+
22
+ <instructions>
23
+ The conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.
24
+ You want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.
25
+ </instructions>
26
+
27
+ The user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:
28
+
29
+ With all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.
30
+ Respond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.
31
+
32
+ <messages>
33
+ Messages to summarize:
34
+ {messages}
35
+ </messages>`;
36
+ const SUMMARY_PREFIX = "## Previous conversation summary:";
37
+ const DEFAULT_MESSAGES_TO_KEEP = 20;
38
+ const DEFAULT_TRIM_TOKEN_LIMIT = 4e3;
39
+ const DEFAULT_FALLBACK_MESSAGE_COUNT = 15;
40
+ const SEARCH_RANGE_FOR_TOOL_PAIRS = 5;
41
+ const contextSchema = zod.z.object({
42
+ model: zod.z.custom(),
43
+ maxTokensBeforeSummary: zod.z.number().optional(),
44
+ messagesToKeep: zod.z.number().default(DEFAULT_MESSAGES_TO_KEEP),
45
+ tokenCounter: zod.z.function().args(zod.z.array(zod.z.any())).returns(zod.z.union([zod.z.number(), zod.z.promise(zod.z.number())])).optional(),
46
+ summaryPrompt: zod.z.string().default(DEFAULT_SUMMARY_PROMPT),
47
+ summaryPrefix: zod.z.string().default(SUMMARY_PREFIX)
48
+ });
49
+ /**
50
+ * Default token counter that approximates based on character count
51
+ * @param messages Messages to count tokens for
52
+ * @returns Approximate token count
53
+ */
54
+ function countTokensApproximately(messages) {
55
+ let totalChars = 0;
56
+ for (const msg of messages) {
57
+ let textContent;
58
+ if (typeof msg.content === "string") textContent = msg.content;
59
+ else if (Array.isArray(msg.content)) textContent = msg.content.map((item) => {
60
+ if (typeof item === "string") return item;
61
+ if (item.type === "text" && "text" in item) return item.text;
62
+ return "";
63
+ }).join("");
64
+ else textContent = "";
65
+ totalChars += textContent.length;
66
+ }
67
+ return Math.ceil(totalChars / 4);
68
+ }
69
+ /**
70
+ * Summarization middleware that automatically summarizes conversation history when token limits are approached.
71
+ *
72
+ * This middleware monitors message token counts and automatically summarizes older
73
+ * messages when a threshold is reached, preserving recent messages and maintaining
74
+ * context continuity by ensuring AI/Tool message pairs remain together.
75
+ *
76
+ * @param options Configuration options for the summarization middleware
77
+ * @returns A middleware instance
78
+ *
79
+ * @example
80
+ * ```ts
81
+ * import { summarizationMiddleware } from "langchain/middleware";
82
+ * import { createAgent } from "langchain";
83
+ *
84
+ * const agent = createAgent({
85
+ * llm: model,
86
+ * tools: [getWeather],
87
+ * middlewares: [
88
+ * summarizationMiddleware({
89
+ * model: new ChatOpenAI({ model: "gpt-4o" }),
90
+ * maxTokensBeforeSummary: 4000,
91
+ * messagesToKeep: 20,
92
+ * })
93
+ * ],
94
+ * });
95
+ *
96
+ * ```
97
+ */
98
+ function summarizationMiddleware(options) {
99
+ return require_middleware.createMiddleware({
100
+ name: "SummarizationMiddleware",
101
+ contextSchema,
102
+ beforeModel: async (state, runtime) => {
103
+ const config = {
104
+ ...contextSchema.parse(options),
105
+ ...runtime.context
106
+ };
107
+ const { messages } = state;
108
+ ensureMessageIds(messages);
109
+ const tokenCounter = config.tokenCounter || countTokensApproximately;
110
+ const totalTokens = await tokenCounter(messages);
111
+ if (config.maxTokensBeforeSummary == null || totalTokens < config.maxTokensBeforeSummary) return;
112
+ const { systemMessage, conversationMessages } = splitSystemMessage(messages);
113
+ const cutoffIndex = findSafeCutoff(conversationMessages, config.messagesToKeep);
114
+ if (cutoffIndex <= 0) return;
115
+ const { messagesToSummarize, preservedMessages } = partitionMessages(systemMessage, conversationMessages, cutoffIndex);
116
+ const summary = await createSummary(messagesToSummarize, config.model, config.summaryPrompt, tokenCounter);
117
+ const updatedSystemMessage = buildUpdatedSystemMessage(systemMessage, summary, config.summaryPrefix);
118
+ return { messages: [
119
+ new __langchain_core_messages.RemoveMessage({ id: __langchain_langgraph.REMOVE_ALL_MESSAGES }),
120
+ updatedSystemMessage,
121
+ ...preservedMessages
122
+ ] };
123
+ }
124
+ });
125
+ }
126
+ /**
127
+ * Ensure all messages have unique IDs
128
+ */
129
+ function ensureMessageIds(messages) {
130
+ for (const msg of messages) if (!msg.id) msg.id = (0, uuid.v4)();
131
+ }
132
+ /**
133
+ * Separate system message from conversation messages
134
+ */
135
+ function splitSystemMessage(messages) {
136
+ if (messages.length > 0 && (0, __langchain_core_messages.isSystemMessage)(messages[0])) return {
137
+ systemMessage: messages[0],
138
+ conversationMessages: messages.slice(1)
139
+ };
140
+ return {
141
+ systemMessage: null,
142
+ conversationMessages: messages
143
+ };
144
+ }
145
+ /**
146
+ * Partition messages into those to summarize and those to preserve
147
+ */
148
+ function partitionMessages(systemMessage, conversationMessages, cutoffIndex) {
149
+ const messagesToSummarize = conversationMessages.slice(0, cutoffIndex);
150
+ const preservedMessages = conversationMessages.slice(cutoffIndex);
151
+ if (systemMessage) messagesToSummarize.unshift(systemMessage);
152
+ return {
153
+ messagesToSummarize,
154
+ preservedMessages
155
+ };
156
+ }
157
+ /**
158
+ * Build updated system message incorporating the summary
159
+ */
160
+ function buildUpdatedSystemMessage(originalSystemMessage, summary, summaryPrefix) {
161
+ let originalContent = "";
162
+ if (originalSystemMessage) {
163
+ const { content: content$1 } = originalSystemMessage;
164
+ if (typeof content$1 === "string") originalContent = content$1.split(summaryPrefix)[0].trim();
165
+ }
166
+ const content = originalContent ? `${originalContent}\n${summaryPrefix}\n${summary}` : `${summaryPrefix}\n${summary}`;
167
+ return new __langchain_core_messages.SystemMessage({
168
+ content,
169
+ id: originalSystemMessage?.id || (0, uuid.v4)()
170
+ });
171
+ }
172
+ /**
173
+ * Find safe cutoff point that preserves AI/Tool message pairs
174
+ */
175
+ function findSafeCutoff(messages, messagesToKeep) {
176
+ if (messages.length <= messagesToKeep) return 0;
177
+ const targetCutoff = messages.length - messagesToKeep;
178
+ for (let i = targetCutoff; i >= 0; i--) if (isSafeCutoffPoint(messages, i)) return i;
179
+ return 0;
180
+ }
181
+ /**
182
+ * Check if cutting at index would separate AI/Tool message pairs
183
+ */
184
+ function isSafeCutoffPoint(messages, cutoffIndex) {
185
+ if (cutoffIndex >= messages.length) return true;
186
+ const searchStart = Math.max(0, cutoffIndex - SEARCH_RANGE_FOR_TOOL_PAIRS);
187
+ const searchEnd = Math.min(messages.length, cutoffIndex + SEARCH_RANGE_FOR_TOOL_PAIRS);
188
+ for (let i = searchStart; i < searchEnd; i++) {
189
+ if (!hasToolCalls(messages[i])) continue;
190
+ const toolCallIds = extractToolCallIds(messages[i]);
191
+ if (cutoffSeparatesToolPair(messages, i, cutoffIndex, toolCallIds)) return false;
192
+ }
193
+ return true;
194
+ }
195
+ /**
196
+ * Check if message is an AI message with tool calls
197
+ */
198
+ function hasToolCalls(message) {
199
+ return (0, __langchain_core_messages.isAIMessage)(message) && "tool_calls" in message && Array.isArray(message.tool_calls) && message.tool_calls.length > 0;
200
+ }
201
+ /**
202
+ * Extract tool call IDs from an AI message
203
+ */
204
+ function extractToolCallIds(aiMessage) {
205
+ const toolCallIds = /* @__PURE__ */ new Set();
206
+ if (aiMessage.tool_calls) for (const toolCall of aiMessage.tool_calls) {
207
+ const id = typeof toolCall === "object" && "id" in toolCall ? toolCall.id : null;
208
+ if (id) toolCallIds.add(id);
209
+ }
210
+ return toolCallIds;
211
+ }
212
+ /**
213
+ * Check if cutoff separates an AI message from its corresponding tool messages
214
+ */
215
+ function cutoffSeparatesToolPair(messages, aiMessageIndex, cutoffIndex, toolCallIds) {
216
+ for (let j = aiMessageIndex + 1; j < messages.length; j++) {
217
+ const message = messages[j];
218
+ if ((0, __langchain_core_messages.isToolMessage)(message) && toolCallIds.has(message.tool_call_id)) {
219
+ const aiBeforeCutoff = aiMessageIndex < cutoffIndex;
220
+ const toolBeforeCutoff = j < cutoffIndex;
221
+ if (aiBeforeCutoff !== toolBeforeCutoff) return true;
222
+ }
223
+ }
224
+ return false;
225
+ }
226
+ /**
227
+ * Generate summary for the given messages
228
+ */
229
+ async function createSummary(messagesToSummarize, model, summaryPrompt, tokenCounter) {
230
+ if (!messagesToSummarize.length) return "No previous conversation history.";
231
+ const trimmedMessages = await trimMessagesForSummary(messagesToSummarize, tokenCounter);
232
+ if (!trimmedMessages.length) return "Previous conversation was too long to summarize.";
233
+ try {
234
+ const formattedPrompt = summaryPrompt.replace("{messages}", JSON.stringify(trimmedMessages, null, 2));
235
+ const response = await model.invoke(formattedPrompt);
236
+ const { content } = response;
237
+ return typeof content === "string" ? content.trim() : "Error generating summary: Invalid response format";
238
+ } catch (e) {
239
+ return `Error generating summary: ${e}`;
240
+ }
241
+ }
242
+ /**
243
+ * Trim messages to fit within summary generation limits
244
+ */
245
+ async function trimMessagesForSummary(messages, tokenCounter) {
246
+ try {
247
+ return await (0, __langchain_core_messages.trimMessages)(messages, {
248
+ maxTokens: DEFAULT_TRIM_TOKEN_LIMIT,
249
+ tokenCounter: async (msgs) => Promise.resolve(tokenCounter(msgs)),
250
+ strategy: "last",
251
+ allowPartial: true,
252
+ includeSystem: true
253
+ });
254
+ } catch (e) {
255
+ return messages.slice(-DEFAULT_FALLBACK_MESSAGE_COUNT);
256
+ }
257
+ }
258
+
259
+ //#endregion
260
+ exports.countTokensApproximately = countTokensApproximately;
261
+ exports.summarizationMiddleware = summarizationMiddleware;
262
+ //# sourceMappingURL=summarization.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"summarization.cjs","names":["z","messages: BaseMessage[]","textContent: string","options: z.input<typeof contextSchema>","createMiddleware","RemoveMessage","REMOVE_ALL_MESSAGES","systemMessage: SystemMessage | null","conversationMessages: BaseMessage[]","cutoffIndex: number","originalSystemMessage: SystemMessage | null","summary: string","summaryPrefix: string","content","SystemMessage","messagesToKeep: number","message: BaseMessage","aiMessage: AIMessage","aiMessageIndex: number","toolCallIds: Set<string>","messagesToSummarize: BaseMessage[]","model: BaseLanguageModel","summaryPrompt: string","tokenCounter: TokenCounter"],"sources":["../../../../src/agents/middlewareAgent/middlewares/summarization.ts"],"sourcesContent":["import { z } from \"zod\";\nimport { v4 as uuid } from \"uuid\";\nimport {\n BaseMessage,\n AIMessage,\n SystemMessage,\n isToolMessage,\n RemoveMessage,\n trimMessages,\n isSystemMessage,\n isAIMessage,\n} from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { REMOVE_ALL_MESSAGES } from \"@langchain/langgraph\";\nimport { createMiddleware } from \"../middleware.js\";\n\nconst DEFAULT_SUMMARY_PROMPT = `<role>\nContext Extraction Assistant\n</role>\n\n<primary_objective>\nYour sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.\n</primary_objective>\n\n<objective_information>\nYou're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.\nThis context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.\n</objective_information>\n\n<instructions>\nThe conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.\nYou want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.\n</instructions>\n\nThe user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:\n\nWith all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.\nRespond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.\n\n<messages>\nMessages to summarize:\n{messages}\n</messages>`;\n\nconst SUMMARY_PREFIX = \"## Previous conversation summary:\";\n\nconst DEFAULT_MESSAGES_TO_KEEP = 20;\nconst DEFAULT_TRIM_TOKEN_LIMIT = 4000;\nconst DEFAULT_FALLBACK_MESSAGE_COUNT = 15;\nconst SEARCH_RANGE_FOR_TOOL_PAIRS = 5;\n\ntype TokenCounter = (messages: BaseMessage[]) => number | Promise<number>;\n\nconst contextSchema = z.object({\n model: z.custom<BaseLanguageModel>(),\n maxTokensBeforeSummary: z.number().optional(),\n messagesToKeep: z.number().default(DEFAULT_MESSAGES_TO_KEEP),\n tokenCounter: z\n .function()\n .args(z.array(z.any()))\n .returns(z.union([z.number(), z.promise(z.number())]))\n .optional(),\n summaryPrompt: z.string().default(DEFAULT_SUMMARY_PROMPT),\n summaryPrefix: z.string().default(SUMMARY_PREFIX),\n});\n\n/**\n * Default token counter that approximates based on character count\n * @param messages Messages to count tokens for\n * @returns Approximate token count\n */\nexport function countTokensApproximately(messages: BaseMessage[]): number {\n let totalChars = 0;\n for (const msg of messages) {\n let textContent: string;\n if (typeof msg.content === \"string\") {\n textContent = msg.content;\n } else if (Array.isArray(msg.content)) {\n textContent = msg.content\n .map((item) => {\n if (typeof item === \"string\") return item;\n if (item.type === \"text\" && \"text\" in item) return item.text;\n return \"\";\n })\n .join(\"\");\n } else {\n textContent = \"\";\n }\n totalChars += textContent.length;\n }\n // Approximate 1 token = 4 characters\n return Math.ceil(totalChars / 4);\n}\n\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain/middleware\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middlewares: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * maxTokensBeforeSummary: 4000,\n * messagesToKeep: 20,\n * })\n * ],\n * });\n *\n * ```\n */\nexport function summarizationMiddleware(\n options: z.input<typeof contextSchema>\n) {\n return createMiddleware({\n name: \"SummarizationMiddleware\",\n contextSchema,\n beforeModel: async (state, runtime) => {\n const config = { ...contextSchema.parse(options), ...runtime.context };\n const { messages } = state;\n\n // Ensure all messages have IDs\n ensureMessageIds(messages);\n\n const tokenCounter = config.tokenCounter || countTokensApproximately;\n const totalTokens = await tokenCounter(messages);\n\n if (\n config.maxTokensBeforeSummary == null ||\n totalTokens < config.maxTokensBeforeSummary\n ) {\n return;\n }\n\n const { systemMessage, conversationMessages } =\n splitSystemMessage(messages);\n const cutoffIndex = findSafeCutoff(\n conversationMessages,\n config.messagesToKeep\n );\n\n if (cutoffIndex <= 0) {\n return;\n }\n\n const { messagesToSummarize, preservedMessages } = partitionMessages(\n systemMessage,\n conversationMessages,\n cutoffIndex\n );\n\n const summary = await createSummary(\n messagesToSummarize,\n config.model,\n config.summaryPrompt,\n tokenCounter\n );\n\n const updatedSystemMessage = buildUpdatedSystemMessage(\n systemMessage,\n summary,\n config.summaryPrefix\n );\n\n return {\n messages: [\n new RemoveMessage({ id: REMOVE_ALL_MESSAGES }),\n updatedSystemMessage,\n ...preservedMessages,\n ],\n };\n },\n });\n}\n\n/**\n * Ensure all messages have unique IDs\n */\nfunction ensureMessageIds(messages: BaseMessage[]): void {\n for (const msg of messages) {\n if (!msg.id) {\n msg.id = uuid();\n }\n }\n}\n\n/**\n * Separate system message from conversation messages\n */\nfunction splitSystemMessage(messages: BaseMessage[]): {\n systemMessage: SystemMessage | null;\n conversationMessages: BaseMessage[];\n} {\n if (messages.length > 0 && isSystemMessage(messages[0])) {\n return {\n systemMessage: messages[0] as SystemMessage,\n conversationMessages: messages.slice(1),\n };\n }\n return {\n systemMessage: null,\n conversationMessages: messages,\n };\n}\n\n/**\n * Partition messages into those to summarize and those to preserve\n */\nfunction partitionMessages(\n systemMessage: SystemMessage | null,\n conversationMessages: BaseMessage[],\n cutoffIndex: number\n): { messagesToSummarize: BaseMessage[]; preservedMessages: BaseMessage[] } {\n const messagesToSummarize = conversationMessages.slice(0, cutoffIndex);\n const preservedMessages = conversationMessages.slice(cutoffIndex);\n\n // Include system message in messages to summarize to capture previous summaries\n if (systemMessage) {\n messagesToSummarize.unshift(systemMessage);\n }\n\n return { messagesToSummarize, preservedMessages };\n}\n\n/**\n * Build updated system message incorporating the summary\n */\nfunction buildUpdatedSystemMessage(\n originalSystemMessage: SystemMessage | null,\n summary: string,\n summaryPrefix: string\n): SystemMessage {\n let originalContent = \"\";\n if (originalSystemMessage) {\n const { content } = originalSystemMessage;\n if (typeof content === \"string\") {\n originalContent = content.split(summaryPrefix)[0].trim();\n }\n }\n\n const content = originalContent\n ? `${originalContent}\\n${summaryPrefix}\\n${summary}`\n : `${summaryPrefix}\\n${summary}`;\n\n return new SystemMessage({\n content,\n id: originalSystemMessage?.id || uuid(),\n });\n}\n\n/**\n * Find safe cutoff point that preserves AI/Tool message pairs\n */\nfunction findSafeCutoff(\n messages: BaseMessage[],\n messagesToKeep: number\n): number {\n if (messages.length <= messagesToKeep) {\n return 0;\n }\n\n const targetCutoff = messages.length - messagesToKeep;\n\n for (let i = targetCutoff; i >= 0; i--) {\n if (isSafeCutoffPoint(messages, i)) {\n return i;\n }\n }\n\n return 0;\n}\n\n/**\n * Check if cutting at index would separate AI/Tool message pairs\n */\nfunction isSafeCutoffPoint(\n messages: BaseMessage[],\n cutoffIndex: number\n): boolean {\n if (cutoffIndex >= messages.length) {\n return true;\n }\n\n const searchStart = Math.max(0, cutoffIndex - SEARCH_RANGE_FOR_TOOL_PAIRS);\n const searchEnd = Math.min(\n messages.length,\n cutoffIndex + SEARCH_RANGE_FOR_TOOL_PAIRS\n );\n\n for (let i = searchStart; i < searchEnd; i++) {\n if (!hasToolCalls(messages[i])) {\n continue;\n }\n\n const toolCallIds = extractToolCallIds(messages[i] as AIMessage);\n if (cutoffSeparatesToolPair(messages, i, cutoffIndex, toolCallIds)) {\n return false;\n }\n }\n\n return true;\n}\n\n/**\n * Check if message is an AI message with tool calls\n */\nfunction hasToolCalls(message: BaseMessage): boolean {\n return (\n isAIMessage(message) &&\n \"tool_calls\" in message &&\n Array.isArray(message.tool_calls) &&\n message.tool_calls.length > 0\n );\n}\n\n/**\n * Extract tool call IDs from an AI message\n */\nfunction extractToolCallIds(aiMessage: AIMessage): Set<string> {\n const toolCallIds = new Set<string>();\n if (aiMessage.tool_calls) {\n for (const toolCall of aiMessage.tool_calls) {\n const id =\n typeof toolCall === \"object\" && \"id\" in toolCall ? toolCall.id : null;\n if (id) {\n toolCallIds.add(id);\n }\n }\n }\n return toolCallIds;\n}\n\n/**\n * Check if cutoff separates an AI message from its corresponding tool messages\n */\nfunction cutoffSeparatesToolPair(\n messages: BaseMessage[],\n aiMessageIndex: number,\n cutoffIndex: number,\n toolCallIds: Set<string>\n): boolean {\n for (let j = aiMessageIndex + 1; j < messages.length; j++) {\n const message = messages[j];\n if (isToolMessage(message) && toolCallIds.has(message.tool_call_id)) {\n const aiBeforeCutoff = aiMessageIndex < cutoffIndex;\n const toolBeforeCutoff = j < cutoffIndex;\n if (aiBeforeCutoff !== toolBeforeCutoff) {\n return true;\n }\n }\n }\n return false;\n}\n\n/**\n * Generate summary for the given messages\n */\nasync function createSummary(\n messagesToSummarize: BaseMessage[],\n model: BaseLanguageModel,\n summaryPrompt: string,\n tokenCounter: TokenCounter\n): Promise<string> {\n if (!messagesToSummarize.length) {\n return \"No previous conversation history.\";\n }\n\n const trimmedMessages = await trimMessagesForSummary(\n messagesToSummarize,\n tokenCounter\n );\n\n if (!trimmedMessages.length) {\n return \"Previous conversation was too long to summarize.\";\n }\n\n try {\n const formattedPrompt = summaryPrompt.replace(\n \"{messages}\",\n JSON.stringify(trimmedMessages, null, 2)\n );\n const response = await model.invoke(formattedPrompt);\n const { content } = response;\n return typeof content === \"string\"\n ? content.trim()\n : \"Error generating summary: Invalid response format\";\n } catch (e) {\n return `Error generating summary: ${e}`;\n }\n}\n\n/**\n * Trim messages to fit within summary generation limits\n */\nasync function trimMessagesForSummary(\n messages: BaseMessage[],\n tokenCounter: TokenCounter\n): Promise<BaseMessage[]> {\n try {\n return await trimMessages(messages, {\n maxTokens: DEFAULT_TRIM_TOKEN_LIMIT,\n tokenCounter: async (msgs) => Promise.resolve(tokenCounter(msgs)),\n strategy: \"last\",\n allowPartial: true,\n includeSystem: true,\n });\n } catch (e) {\n // Fallback to last N messages if trimming fails\n return messages.slice(-DEFAULT_FALLBACK_MESSAGE_COUNT);\n }\n}\n"],"mappings":";;;;;;;;AAgBA,MAAM,yBAAyB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;WA0BrB,CAAC;AAEZ,MAAM,iBAAiB;AAEvB,MAAM,2BAA2B;AACjC,MAAM,2BAA2B;AACjC,MAAM,iCAAiC;AACvC,MAAM,8BAA8B;AAIpC,MAAM,gBAAgBA,MAAE,OAAO;CAC7B,OAAOA,MAAE,QAA2B;CACpC,wBAAwBA,MAAE,QAAQ,CAAC,UAAU;CAC7C,gBAAgBA,MAAE,QAAQ,CAAC,QAAQ,yBAAyB;CAC5D,cAAcA,MACX,UAAU,CACV,KAAKA,MAAE,MAAMA,MAAE,KAAK,CAAC,CAAC,CACtB,QAAQA,MAAE,MAAM,CAACA,MAAE,QAAQ,EAAEA,MAAE,QAAQA,MAAE,QAAQ,CAAC,AAAC,EAAC,CAAC,CACrD,UAAU;CACb,eAAeA,MAAE,QAAQ,CAAC,QAAQ,uBAAuB;CACzD,eAAeA,MAAE,QAAQ,CAAC,QAAQ,eAAe;AAClD,EAAC;;;;;;AAOF,SAAgB,yBAAyBC,UAAiC;CACxE,IAAI,aAAa;AACjB,MAAK,MAAM,OAAO,UAAU;EAC1B,IAAIC;AACJ,MAAI,OAAO,IAAI,YAAY,UACzB,cAAc,IAAI;WACT,MAAM,QAAQ,IAAI,QAAQ,EACnC,cAAc,IAAI,QACf,IAAI,CAAC,SAAS;AACb,OAAI,OAAO,SAAS,SAAU,QAAO;AACrC,OAAI,KAAK,SAAS,UAAU,UAAU,KAAM,QAAO,KAAK;AACxD,UAAO;EACR,EAAC,CACD,KAAK,GAAG;OAEX,cAAc;EAEhB,cAAc,YAAY;CAC3B;AAED,QAAO,KAAK,KAAK,aAAa,EAAE;AACjC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+BD,SAAgB,wBACdC,SACA;AACA,QAAOC,oCAAiB;EACtB,MAAM;EACN;EACA,aAAa,OAAO,OAAO,YAAY;GACrC,MAAM,SAAS;IAAE,GAAG,cAAc,MAAM,QAAQ;IAAE,GAAG,QAAQ;GAAS;GACtE,MAAM,EAAE,UAAU,GAAG;GAGrB,iBAAiB,SAAS;GAE1B,MAAM,eAAe,OAAO,gBAAgB;GAC5C,MAAM,cAAc,MAAM,aAAa,SAAS;AAEhD,OACE,OAAO,0BAA0B,QACjC,cAAc,OAAO,uBAErB;GAGF,MAAM,EAAE,eAAe,sBAAsB,GAC3C,mBAAmB,SAAS;GAC9B,MAAM,cAAc,eAClB,sBACA,OAAO,eACR;AAED,OAAI,eAAe,EACjB;GAGF,MAAM,EAAE,qBAAqB,mBAAmB,GAAG,kBACjD,eACA,sBACA,YACD;GAED,MAAM,UAAU,MAAM,cACpB,qBACA,OAAO,OACP,OAAO,eACP,aACD;GAED,MAAM,uBAAuB,0BAC3B,eACA,SACA,OAAO,cACR;AAED,UAAO,EACL,UAAU;IACR,IAAIC,wCAAc,EAAE,IAAIC,0CAAqB;IAC7C;IACA,GAAG;GACJ,EACF;EACF;CACF,EAAC;AACH;;;;AAKD,SAAS,iBAAiBL,UAA+B;AACvD,MAAK,MAAM,OAAO,SAChB,KAAI,CAAC,IAAI,IACP,IAAI,mBAAW;AAGpB;;;;AAKD,SAAS,mBAAmBA,UAG1B;AACA,KAAI,SAAS,SAAS,oDAAqB,SAAS,GAAG,CACrD,QAAO;EACL,eAAe,SAAS;EACxB,sBAAsB,SAAS,MAAM,EAAE;CACxC;AAEH,QAAO;EACL,eAAe;EACf,sBAAsB;CACvB;AACF;;;;AAKD,SAAS,kBACPM,eACAC,sBACAC,aAC0E;CAC1E,MAAM,sBAAsB,qBAAqB,MAAM,GAAG,YAAY;CACtE,MAAM,oBAAoB,qBAAqB,MAAM,YAAY;AAGjE,KAAI,eACF,oBAAoB,QAAQ,cAAc;AAG5C,QAAO;EAAE;EAAqB;CAAmB;AAClD;;;;AAKD,SAAS,0BACPC,uBACAC,SACAC,eACe;CACf,IAAI,kBAAkB;AACtB,KAAI,uBAAuB;EACzB,MAAM,EAAE,oBAAS,GAAG;AACpB,MAAI,OAAOC,cAAY,UACrB,kBAAkBA,UAAQ,MAAM,cAAc,CAAC,GAAG,MAAM;CAE3D;CAED,MAAM,UAAU,kBACZ,GAAG,gBAAgB,EAAE,EAAE,cAAc,EAAE,EAAE,SAAS,GAClD,GAAG,cAAc,EAAE,EAAE,SAAS;AAElC,QAAO,IAAIC,wCAAc;EACvB;EACA,IAAI,uBAAuB,oBAAY;CACxC;AACF;;;;AAKD,SAAS,eACPb,UACAc,gBACQ;AACR,KAAI,SAAS,UAAU,eACrB,QAAO;CAGT,MAAM,eAAe,SAAS,SAAS;AAEvC,MAAK,IAAI,IAAI,cAAc,KAAK,GAAG,IACjC,KAAI,kBAAkB,UAAU,EAAE,CAChC,QAAO;AAIX,QAAO;AACR;;;;AAKD,SAAS,kBACPd,UACAQ,aACS;AACT,KAAI,eAAe,SAAS,OAC1B,QAAO;CAGT,MAAM,cAAc,KAAK,IAAI,GAAG,cAAc,4BAA4B;CAC1E,MAAM,YAAY,KAAK,IACrB,SAAS,QACT,cAAc,4BACf;AAED,MAAK,IAAI,IAAI,aAAa,IAAI,WAAW,KAAK;AAC5C,MAAI,CAAC,aAAa,SAAS,GAAG,CAC5B;EAGF,MAAM,cAAc,mBAAmB,SAAS,GAAgB;AAChE,MAAI,wBAAwB,UAAU,GAAG,aAAa,YAAY,CAChE,QAAO;CAEV;AAED,QAAO;AACR;;;;AAKD,SAAS,aAAaO,SAA+B;AACnD,mDACc,QAAQ,IACpB,gBAAgB,WAChB,MAAM,QAAQ,QAAQ,WAAW,IACjC,QAAQ,WAAW,SAAS;AAE/B;;;;AAKD,SAAS,mBAAmBC,WAAmC;CAC7D,MAAM,8BAAc,IAAI;AACxB,KAAI,UAAU,WACZ,MAAK,MAAM,YAAY,UAAU,YAAY;EAC3C,MAAM,KACJ,OAAO,aAAa,YAAY,QAAQ,WAAW,SAAS,KAAK;AACnE,MAAI,IACF,YAAY,IAAI,GAAG;CAEtB;AAEH,QAAO;AACR;;;;AAKD,SAAS,wBACPhB,UACAiB,gBACAT,aACAU,aACS;AACT,MAAK,IAAI,IAAI,iBAAiB,GAAG,IAAI,SAAS,QAAQ,KAAK;EACzD,MAAM,UAAU,SAAS;AACzB,mDAAkB,QAAQ,IAAI,YAAY,IAAI,QAAQ,aAAa,EAAE;GACnE,MAAM,iBAAiB,iBAAiB;GACxC,MAAM,mBAAmB,IAAI;AAC7B,OAAI,mBAAmB,iBACrB,QAAO;EAEV;CACF;AACD,QAAO;AACR;;;;AAKD,eAAe,cACbC,qBACAC,OACAC,eACAC,cACiB;AACjB,KAAI,CAAC,oBAAoB,OACvB,QAAO;CAGT,MAAM,kBAAkB,MAAM,uBAC5B,qBACA,aACD;AAED,KAAI,CAAC,gBAAgB,OACnB,QAAO;AAGT,KAAI;EACF,MAAM,kBAAkB,cAAc,QACpC,cACA,KAAK,UAAU,iBAAiB,MAAM,EAAE,CACzC;EACD,MAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;EACpD,MAAM,EAAE,SAAS,GAAG;AACpB,SAAO,OAAO,YAAY,WACtB,QAAQ,MAAM,GACd;CACL,SAAQ,GAAG;AACV,SAAO,CAAC,0BAA0B,EAAE,GAAG;CACxC;AACF;;;;AAKD,eAAe,uBACbtB,UACAsB,cACwB;AACxB,KAAI;AACF,SAAO,kDAAmB,UAAU;GAClC,WAAW;GACX,cAAc,OAAO,SAAS,QAAQ,QAAQ,aAAa,KAAK,CAAC;GACjE,UAAU;GACV,cAAc;GACd,eAAe;EAChB,EAAC;CACH,SAAQ,GAAG;AAEV,SAAO,SAAS,MAAM,CAAC,+BAA+B;CACvD;AACF"}
@@ -0,0 +1,89 @@
1
+ import { AgentMiddleware } from "../types.cjs";
2
+ import * as _langchain_core_language_models_base1 from "@langchain/core/language_models/base";
3
+ import { BaseLanguageModel } from "@langchain/core/language_models/base";
4
+ import { z } from "zod";
5
+ import { BaseMessage } from "@langchain/core/messages";
6
+
7
+ //#region src/agents/middlewareAgent/middlewares/summarization.d.ts
8
+ declare const contextSchema: z.ZodObject<{
9
+ model: z.ZodType<BaseLanguageModel<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>>;
10
+ maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;
11
+ messagesToKeep: z.ZodDefault<z.ZodNumber>;
12
+ tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, "many">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;
13
+ summaryPrompt: z.ZodDefault<z.ZodString>;
14
+ summaryPrefix: z.ZodDefault<z.ZodString>;
15
+ }, "strip", z.ZodTypeAny, {
16
+ model: BaseLanguageModel<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>;
17
+ maxTokensBeforeSummary?: number | undefined;
18
+ messagesToKeep: number;
19
+ tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;
20
+ summaryPrompt: string;
21
+ summaryPrefix: string;
22
+ }, {
23
+ model: BaseLanguageModel<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>;
24
+ maxTokensBeforeSummary?: number | undefined;
25
+ messagesToKeep?: number | undefined;
26
+ tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;
27
+ summaryPrompt?: string | undefined;
28
+ summaryPrefix?: string | undefined;
29
+ }>;
30
+ /**
31
+ * Default token counter that approximates based on character count
32
+ * @param messages Messages to count tokens for
33
+ * @returns Approximate token count
34
+ */
35
+ declare function countTokensApproximately(messages: BaseMessage[]): number;
36
+ /**
37
+ * Summarization middleware that automatically summarizes conversation history when token limits are approached.
38
+ *
39
+ * This middleware monitors message token counts and automatically summarizes older
40
+ * messages when a threshold is reached, preserving recent messages and maintaining
41
+ * context continuity by ensuring AI/Tool message pairs remain together.
42
+ *
43
+ * @param options Configuration options for the summarization middleware
44
+ * @returns A middleware instance
45
+ *
46
+ * @example
47
+ * ```ts
48
+ * import { summarizationMiddleware } from "langchain/middleware";
49
+ * import { createAgent } from "langchain";
50
+ *
51
+ * const agent = createAgent({
52
+ * llm: model,
53
+ * tools: [getWeather],
54
+ * middlewares: [
55
+ * summarizationMiddleware({
56
+ * model: new ChatOpenAI({ model: "gpt-4o" }),
57
+ * maxTokensBeforeSummary: 4000,
58
+ * messagesToKeep: 20,
59
+ * })
60
+ * ],
61
+ * });
62
+ *
63
+ * ```
64
+ */
65
+ declare function summarizationMiddleware(options: z.input<typeof contextSchema>): AgentMiddleware<undefined, z.ZodObject<{
66
+ model: z.ZodType<BaseLanguageModel<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>>;
67
+ maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;
68
+ messagesToKeep: z.ZodDefault<z.ZodNumber>;
69
+ tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, "many">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;
70
+ summaryPrompt: z.ZodDefault<z.ZodString>;
71
+ summaryPrefix: z.ZodDefault<z.ZodString>;
72
+ }, "strip", z.ZodTypeAny, {
73
+ model: BaseLanguageModel<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>;
74
+ maxTokensBeforeSummary?: number | undefined;
75
+ messagesToKeep: number;
76
+ tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;
77
+ summaryPrompt: string;
78
+ summaryPrefix: string;
79
+ }, {
80
+ model: BaseLanguageModel<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>;
81
+ maxTokensBeforeSummary?: number | undefined;
82
+ messagesToKeep?: number | undefined;
83
+ tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;
84
+ summaryPrompt?: string | undefined;
85
+ summaryPrefix?: string | undefined;
86
+ }>, any>;
87
+ //#endregion
88
+ export { countTokensApproximately, summarizationMiddleware };
89
+ //# sourceMappingURL=summarization.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"summarization.d.cts","names":["___types_js0","z","BaseMessage","BaseLanguageModel","contextSchema","_langchain_core_language_models_base1","BaseLanguageModelCallOptions","ZodTypeDef","ZodType","ZodNumber","ZodOptional","ZodDefault","ZodAny","ZodArray","ZodUnknown","ZodTuple","ZodPromise","ZodUnion","ZodFunction","ZodString","ZodTypeAny","Promise","ZodObject","countTokensApproximately","summarizationMiddleware","input","AgentMiddleware"],"sources":["../../../../src/agents/middlewareAgent/middlewares/summarization.d.ts"],"sourcesContent":["import { z } from \"zod\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\ndeclare const contextSchema: z.ZodObject<{\n model: z.ZodType<BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>;\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n messagesToKeep: z.ZodDefault<z.ZodNumber>;\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n summaryPrompt: z.ZodDefault<z.ZodString>;\n summaryPrefix: z.ZodDefault<z.ZodString>;\n}, \"strip\", z.ZodTypeAny, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep: number;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n summaryPrefix: string;\n}, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n summaryPrefix?: string | undefined;\n}>;\n/**\n * Default token counter that approximates based on character count\n * @param messages Messages to count tokens for\n * @returns Approximate token count\n */\nexport declare function countTokensApproximately(messages: BaseMessage[]): number;\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain/middleware\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middlewares: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * maxTokensBeforeSummary: 4000,\n * messagesToKeep: 20,\n * })\n * ],\n * });\n *\n * ```\n */\nexport declare function summarizationMiddleware(options: z.input<typeof contextSchema>): import(\"../types.js\").AgentMiddleware<undefined, z.ZodObject<{\n model: z.ZodType<BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>;\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n messagesToKeep: z.ZodDefault<z.ZodNumber>;\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n summaryPrompt: z.ZodDefault<z.ZodString>;\n summaryPrefix: z.ZodDefault<z.ZodString>;\n}, \"strip\", z.ZodTypeAny, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep: number;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n summaryPrefix: string;\n}, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n summaryPrefix?: string | undefined;\n}>, any>;\nexport {};\n"],"mappings":";;;;;;;cAGcI,eAAeH,CAAAA,CAAEqB;SACpBrB,CAAAA,CAAEO,QAAQL,uBAoBnBE,qCAAAA,CApByFC,4BAAAA,GAA+BL,CAAAA,CAAEM,YAAYJ,uBAAFE,qCAAAA,CAAwEC,4BAAAA;0BAClLL,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;kBACxBR,CAAAA,CAAEU,WAAWV,CAAAA,CAAEQ;EAHrBL,YAAAA,EAIIH,CAAAA,CAAES,WAiBlB,CAjB8BT,CAAAA,CAAEiB,WAiBhC,CAjB4CjB,CAAAA,CAAEc,QAiB9C,CAAA,CAjBwDd,CAAAA,CAAEY,QAiB1D,CAjBmEZ,CAAAA,CAAEW,MAiBrE,EAAA,MAAA,CAAA,CAAA,EAjBuFX,CAAAA,CAAEa,UAiBzF,CAAA,EAjBsGb,CAAAA,CAAEgB,QAiBxG,CAAA,CAjBkHhB,CAAAA,CAAEQ,SAiBpH,EAjB+HR,CAAAA,CAAEe,UAiBjI,CAjB4If,CAAAA,CAAEQ,SAiB9I,CAAA,CAAA,CAAA,CAAA,CAAA;EAAA,aAAA,EAhBiBR,CAAAA,CAAEU,UAgBnB,CAhB8BV,CAAAA,CAAEkB,SAgBhC,CAAA;EAAA,aAAAd,EAfiBJ,CAAAA,CAAEU,UAenBN,CAf8BJ,CAAAA,CAAEkB,SAehCd,CAAAA;CApBqH,EAAA,OAAlGF,EAMTF,CAAAA,CAAEmB,UANOjB,EAAAA;EAAiB,KAAsFI,EAOjHJ,iBAPiHI,CAAAA,GAAAA,EAMpGF,qCAAAA,CACyDC,4BAAAA,CAP2CC;EAAU,sBAAAF,CAAAA,EAAAA,MAAAA,GAAAA,SAAwEC;EAA4B,cAAlGH,EAAAA,MAAAA;EAAiB,YAA5IK,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAUuDa,OAVvDb,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAO,aACwBC,EAAAA,MAAAA;EAAS,aAAvBC,EAAAA,MAAAA;CAAW,EAAA;EACG,KAAtBC,EAYXR,iBAZWQ,CAAAA,GAAAA,EAQqDN,qCAAAA,CAIMC,4BAAAA,CAZ3DK;EAAU,sBACuCC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAM,cAAjBC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAQ,YAAuBC,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAcvBO,OAduBP,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAU,aAArDC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAQ,aAA8DN,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAS,CAAA;;;;;;AAC5GR,iBAsBKsB,wBAAAA,CAtBHZ,QAAAA,EAsBsCT,WAtBtCS,EAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;AALmB;AA2BxC;AA8BA;;;;;;;;;;;;;;;;;AAI8CV,iBAJtBuB,uBAAAA,CAIwBT,OAAAA,EAJSd,CAAAA,CAAEwB,KAIXV,CAAAA,OAJwBX,aAIxBW,CAAAA,CAAAA,EAJqG,eAIrGA,CAAAA,SAAAA,EAJ0Fd,CAAAA,CAAEqB,SAI5FP,CAAAA;EAAQ,KAA8DN,EAH3GR,CAAAA,CAAEO,OAGyGC,CAHjGN,iBAGiGM,CAAAA,GAAAA,EAJtDJ,qCAAAA,CAC2BC,4BAAAA,CAG2BG,EAHIR,CAAAA,CAAEM,UAGNE,EAHkBN,iBAGlBM,CAAAA,GAAAA,EAHgBJ,qCAAAA,CAAwEC,4BAAAA,CAGxFG,CAAAA;EAAS,sBAAiBA,EAFpHR,CAAAA,CAAES,WAEkHD,CAFtGR,CAAAA,CAAEQ,SAEoGA,CAAAA;EAAS,cAAtBO,EAD/Gf,CAAAA,CAAEU,UAC6GK,CADlGf,CAAAA,CAAEQ,SACgGO,CAAAA;EAAU,YAAnCC,EAAxFhB,CAAAA,CAAES,WAAsFO,CAA1EhB,CAAAA,CAAEiB,WAAwED,CAA5DhB,CAAAA,CAAEc,QAA0DE,CAAAA,CAAhDhB,CAAAA,CAAEY,QAA8CI,CAArChB,CAAAA,CAAEW,MAAmCK,EAAAA,MAAAA,CAAAA,CAAAA,EAAjBhB,CAAAA,CAAEa,UAAeG,CAAAA,EAAFhB,CAAAA,CAAEgB,QAAAA,CAAAA,CAAUhB,CAAAA,CAAEQ,SAAZQ,EAAuBhB,CAAAA,CAAEe,UAAzBC,CAAoChB,CAAAA,CAAEQ,SAAtCQ,CAAAA,CAAAA,CAAAA,CAAAA,CAAAA;EAAQ,aAAhFC,EACfjB,CAAAA,CAAEU,UADaO,CACFjB,CAAAA,CAAEkB,SADAD,CAAAA;EAAW,aAAzBR,EAEDT,CAAAA,CAAEU,UAFDD,CAEYT,CAAAA,CAAEkB,SAFdT,CAAAA;CAAW,EAAA,OACGS,EAEtBlB,CAAAA,CAAEmB,UAFoBD,EAAAA;EAAS,KAAtBR,EAGVR,iBAHUQ,CAAAA,GAAAA,EAEGN,qCAAAA,CACyDC,4BAAAA,CAH5DK;EAAU,sBACGQ,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAS,cAAtBR,EAAAA,MAAAA;EAAU,YACjBS,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAIsDC,OAJtDD,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAU,aAAAf,EAAAA,MAAAA;EACqF,aAAlGF,EAAAA,MAAAA;CAAiB,EAAA;EAG+C,KAAAE,EAIhEF,iBAJgEE,CAAAA,GAAAA,EAAAA,qCAAAA,CAIMC,4BAAAA,CAAAA;EAA4B,sBAAlGH,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAiB,cAGwCkB,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAO,YAlBiEC,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAkBxED,OAlBwEC,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAS,aAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EAAvB,aAAA,CAAA,EAAA,MAAA,GAAA,SAAA"}
@@ -0,0 +1,89 @@
1
+ import { AgentMiddleware } from "../types.js";
2
+ import { BaseMessage } from "@langchain/core/messages";
3
+ import { z } from "zod";
4
+ import * as _langchain_core_language_models_base0 from "@langchain/core/language_models/base";
5
+ import { BaseLanguageModel } from "@langchain/core/language_models/base";
6
+
7
+ //#region src/agents/middlewareAgent/middlewares/summarization.d.ts
8
+ declare const contextSchema: z.ZodObject<{
9
+ model: z.ZodType<BaseLanguageModel<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>>;
10
+ maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;
11
+ messagesToKeep: z.ZodDefault<z.ZodNumber>;
12
+ tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, "many">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;
13
+ summaryPrompt: z.ZodDefault<z.ZodString>;
14
+ summaryPrefix: z.ZodDefault<z.ZodString>;
15
+ }, "strip", z.ZodTypeAny, {
16
+ model: BaseLanguageModel<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>;
17
+ maxTokensBeforeSummary?: number | undefined;
18
+ messagesToKeep: number;
19
+ tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;
20
+ summaryPrompt: string;
21
+ summaryPrefix: string;
22
+ }, {
23
+ model: BaseLanguageModel<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>;
24
+ maxTokensBeforeSummary?: number | undefined;
25
+ messagesToKeep?: number | undefined;
26
+ tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;
27
+ summaryPrompt?: string | undefined;
28
+ summaryPrefix?: string | undefined;
29
+ }>;
30
+ /**
31
+ * Default token counter that approximates based on character count
32
+ * @param messages Messages to count tokens for
33
+ * @returns Approximate token count
34
+ */
35
+ declare function countTokensApproximately(messages: BaseMessage[]): number;
36
+ /**
37
+ * Summarization middleware that automatically summarizes conversation history when token limits are approached.
38
+ *
39
+ * This middleware monitors message token counts and automatically summarizes older
40
+ * messages when a threshold is reached, preserving recent messages and maintaining
41
+ * context continuity by ensuring AI/Tool message pairs remain together.
42
+ *
43
+ * @param options Configuration options for the summarization middleware
44
+ * @returns A middleware instance
45
+ *
46
+ * @example
47
+ * ```ts
48
+ * import { summarizationMiddleware } from "langchain/middleware";
49
+ * import { createAgent } from "langchain";
50
+ *
51
+ * const agent = createAgent({
52
+ * llm: model,
53
+ * tools: [getWeather],
54
+ * middlewares: [
55
+ * summarizationMiddleware({
56
+ * model: new ChatOpenAI({ model: "gpt-4o" }),
57
+ * maxTokensBeforeSummary: 4000,
58
+ * messagesToKeep: 20,
59
+ * })
60
+ * ],
61
+ * });
62
+ *
63
+ * ```
64
+ */
65
+ declare function summarizationMiddleware(options: z.input<typeof contextSchema>): AgentMiddleware<undefined, z.ZodObject<{
66
+ model: z.ZodType<BaseLanguageModel<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>>;
67
+ maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;
68
+ messagesToKeep: z.ZodDefault<z.ZodNumber>;
69
+ tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, "many">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;
70
+ summaryPrompt: z.ZodDefault<z.ZodString>;
71
+ summaryPrefix: z.ZodDefault<z.ZodString>;
72
+ }, "strip", z.ZodTypeAny, {
73
+ model: BaseLanguageModel<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>;
74
+ maxTokensBeforeSummary?: number | undefined;
75
+ messagesToKeep: number;
76
+ tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;
77
+ summaryPrompt: string;
78
+ summaryPrefix: string;
79
+ }, {
80
+ model: BaseLanguageModel<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>;
81
+ maxTokensBeforeSummary?: number | undefined;
82
+ messagesToKeep?: number | undefined;
83
+ tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;
84
+ summaryPrompt?: string | undefined;
85
+ summaryPrefix?: string | undefined;
86
+ }>, any>;
87
+ //#endregion
88
+ export { countTokensApproximately, summarizationMiddleware };
89
+ //# sourceMappingURL=summarization.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"summarization.d.ts","names":["___types_js0","z","BaseMessage","BaseLanguageModel","contextSchema","_langchain_core_language_models_base0","BaseLanguageModelCallOptions","ZodTypeDef","ZodType","ZodNumber","ZodOptional","ZodDefault","ZodAny","ZodArray","ZodUnknown","ZodTuple","ZodPromise","ZodUnion","ZodFunction","ZodString","ZodTypeAny","Promise","ZodObject","countTokensApproximately","summarizationMiddleware","input","AgentMiddleware"],"sources":["../../../../src/agents/middlewareAgent/middlewares/summarization.d.ts"],"sourcesContent":["import { z } from \"zod\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\ndeclare const contextSchema: z.ZodObject<{\n model: z.ZodType<BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>;\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n messagesToKeep: z.ZodDefault<z.ZodNumber>;\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n summaryPrompt: z.ZodDefault<z.ZodString>;\n summaryPrefix: z.ZodDefault<z.ZodString>;\n}, \"strip\", z.ZodTypeAny, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep: number;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n summaryPrefix: string;\n}, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n summaryPrefix?: string | undefined;\n}>;\n/**\n * Default token counter that approximates based on character count\n * @param messages Messages to count tokens for\n * @returns Approximate token count\n */\nexport declare function countTokensApproximately(messages: BaseMessage[]): number;\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain/middleware\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middlewares: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * maxTokensBeforeSummary: 4000,\n * messagesToKeep: 20,\n * })\n * ],\n * });\n *\n * ```\n */\nexport declare function summarizationMiddleware(options: z.input<typeof contextSchema>): import(\"../types.js\").AgentMiddleware<undefined, z.ZodObject<{\n model: z.ZodType<BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>;\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n messagesToKeep: z.ZodDefault<z.ZodNumber>;\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n summaryPrompt: z.ZodDefault<z.ZodString>;\n summaryPrefix: z.ZodDefault<z.ZodString>;\n}, \"strip\", z.ZodTypeAny, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep: number;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n summaryPrefix: string;\n}, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n summaryPrefix?: string | undefined;\n}>, any>;\nexport {};\n"],"mappings":";;;;;;;cAGcI,eAAeH,CAAAA,CAAEqB;SACpBrB,CAAAA,CAAEO,QAAQL,uBAoBnBE,qCAAAA,CApByFC,4BAAAA,GAA+BL,CAAAA,CAAEM,YAAYJ,uBAAFE,qCAAAA,CAAwEC,4BAAAA;0BAClLL,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;kBACxBR,CAAAA,CAAEU,WAAWV,CAAAA,CAAEQ;EAHrBL,YAAAA,EAIIH,CAAAA,CAAES,WAiBlB,CAjB8BT,CAAAA,CAAEiB,WAiBhC,CAjB4CjB,CAAAA,CAAEc,QAiB9C,CAAA,CAjBwDd,CAAAA,CAAEY,QAiB1D,CAjBmEZ,CAAAA,CAAEW,MAiBrE,EAAA,MAAA,CAAA,CAAA,EAjBuFX,CAAAA,CAAEa,UAiBzF,CAAA,EAjBsGb,CAAAA,CAAEgB,QAiBxG,CAAA,CAjBkHhB,CAAAA,CAAEQ,SAiBpH,EAjB+HR,CAAAA,CAAEe,UAiBjI,CAjB4If,CAAAA,CAAEQ,SAiB9I,CAAA,CAAA,CAAA,CAAA,CAAA;EAAA,aAAA,EAhBiBR,CAAAA,CAAEU,UAgBnB,CAhB8BV,CAAAA,CAAEkB,SAgBhC,CAAA;EAAA,aAAAd,EAfiBJ,CAAAA,CAAEU,UAenBN,CAf8BJ,CAAAA,CAAEkB,SAehCd,CAAAA;CApBqH,EAAA,OAAlGF,EAMTF,CAAAA,CAAEmB,UANOjB,EAAAA;EAAiB,KAAsFI,EAOjHJ,iBAPiHI,CAAAA,GAAAA,EAMpGF,qCAAAA,CACyDC,4BAAAA,CAP2CC;EAAU,sBAAAF,CAAAA,EAAAA,MAAAA,GAAAA,SAAwEC;EAA4B,cAAlGH,EAAAA,MAAAA;EAAiB,YAA5IK,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAUuDa,OAVvDb,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAO,aACwBC,EAAAA,MAAAA;EAAS,aAAvBC,EAAAA,MAAAA;CAAW,EAAA;EACG,KAAtBC,EAYXR,iBAZWQ,CAAAA,GAAAA,EAQqDN,qCAAAA,CAIMC,4BAAAA,CAZ3DK;EAAU,sBACuCC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAM,cAAjBC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAQ,YAAuBC,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAcvBO,OAduBP,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAU,aAArDC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAQ,aAA8DN,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAS,CAAA;;;;;;AAC5GR,iBAsBKsB,wBAAAA,CAtBHZ,QAAAA,EAsBsCT,WAtBtCS,EAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;AALmB;AA2BxC;AA8BA;;;;;;;;;;;;;;;;;AAI8CV,iBAJtBuB,uBAAAA,CAIwBT,OAAAA,EAJSd,CAAAA,CAAEwB,KAIXV,CAAAA,OAJwBX,aAIxBW,CAAAA,CAAAA,EAJqG,eAIrGA,CAAAA,SAAAA,EAJ0Fd,CAAAA,CAAEqB,SAI5FP,CAAAA;EAAQ,KAA8DN,EAH3GR,CAAAA,CAAEO,OAGyGC,CAHjGN,iBAGiGM,CAAAA,GAAAA,EAJtDJ,qCAAAA,CAC2BC,4BAAAA,CAG2BG,EAHIR,CAAAA,CAAEM,UAGNE,EAHkBN,iBAGlBM,CAAAA,GAAAA,EAHgBJ,qCAAAA,CAAwEC,4BAAAA,CAGxFG,CAAAA;EAAS,sBAAiBA,EAFpHR,CAAAA,CAAES,WAEkHD,CAFtGR,CAAAA,CAAEQ,SAEoGA,CAAAA;EAAS,cAAtBO,EAD/Gf,CAAAA,CAAEU,UAC6GK,CADlGf,CAAAA,CAAEQ,SACgGO,CAAAA;EAAU,YAAnCC,EAAxFhB,CAAAA,CAAES,WAAsFO,CAA1EhB,CAAAA,CAAEiB,WAAwED,CAA5DhB,CAAAA,CAAEc,QAA0DE,CAAAA,CAAhDhB,CAAAA,CAAEY,QAA8CI,CAArChB,CAAAA,CAAEW,MAAmCK,EAAAA,MAAAA,CAAAA,CAAAA,EAAjBhB,CAAAA,CAAEa,UAAeG,CAAAA,EAAFhB,CAAAA,CAAEgB,QAAAA,CAAAA,CAAUhB,CAAAA,CAAEQ,SAAZQ,EAAuBhB,CAAAA,CAAEe,UAAzBC,CAAoChB,CAAAA,CAAEQ,SAAtCQ,CAAAA,CAAAA,CAAAA,CAAAA,CAAAA;EAAQ,aAAhFC,EACfjB,CAAAA,CAAEU,UADaO,CACFjB,CAAAA,CAAEkB,SADAD,CAAAA;EAAW,aAAzBR,EAEDT,CAAAA,CAAEU,UAFDD,CAEYT,CAAAA,CAAEkB,SAFdT,CAAAA;CAAW,EAAA,OACGS,EAEtBlB,CAAAA,CAAEmB,UAFoBD,EAAAA;EAAS,KAAtBR,EAGVR,iBAHUQ,CAAAA,GAAAA,EAEGN,qCAAAA,CACyDC,4BAAAA,CAH5DK;EAAU,sBACGQ,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAS,cAAtBR,EAAAA,MAAAA;EAAU,YACjBS,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAIsDC,OAJtDD,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAU,aAAAf,EAAAA,MAAAA;EACqF,aAAlGF,EAAAA,MAAAA;CAAiB,EAAA;EAG+C,KAAAE,EAIhEF,iBAJgEE,CAAAA,GAAAA,EAAAA,qCAAAA,CAIMC,4BAAAA,CAAAA;EAA4B,sBAAlGH,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAiB,cAGwCkB,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAO,YAlBiEC,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAkBxED,OAlBwEC,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAS,aAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EAAvB,aAAA,CAAA,EAAA,MAAA,GAAA,SAAA"}