langchain 1.2.1 → 1.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,25 @@
1
1
  # langchain
2
2
 
3
+ ## 1.2.3
4
+
5
+ ### Patch Changes
6
+
7
+ - [#9707](https://github.com/langchain-ai/langchainjs/pull/9707) [`e5063f9`](https://github.com/langchain-ai/langchainjs/commit/e5063f9c6e9989ea067dfdff39262b9e7b6aba62) Thanks [@hntrl](https://github.com/hntrl)! - add security hardening for `load`
8
+
9
+ - Updated dependencies [[`e5063f9`](https://github.com/langchain-ai/langchainjs/commit/e5063f9c6e9989ea067dfdff39262b9e7b6aba62), [`8996647`](https://github.com/langchain-ai/langchainjs/commit/89966470e8c0b112ce4f9a326004af6a4173f9e6)]:
10
+ - @langchain/core@1.1.8
11
+
12
+ ## 1.2.2
13
+
14
+ ### Patch Changes
15
+
16
+ - [#9675](https://github.com/langchain-ai/langchainjs/pull/9675) [`af664be`](https://github.com/langchain-ai/langchainjs/commit/af664becc0245b2315ea2f784c9a6c1d7622dbb4) Thanks [@jacoblee93](https://github.com/jacoblee93)! - Bump LangSmith dep to 0.4.0
17
+
18
+ - [#9673](https://github.com/langchain-ai/langchainjs/pull/9673) [`ffb2402`](https://github.com/langchain-ai/langchainjs/commit/ffb24026cd93e58219519ee24c6e23ea57cb5bde) Thanks [@hntrl](https://github.com/hntrl)! - add `context` utility
19
+
20
+ - Updated dependencies [[`df9c42b`](https://github.com/langchain-ai/langchainjs/commit/df9c42b3ab61b85309ab47256e1d93c3188435ee), [`8d2982b`](https://github.com/langchain-ai/langchainjs/commit/8d2982bb94c0f4e4314ace3cc98a1ae87571b1ed), [`af664be`](https://github.com/langchain-ai/langchainjs/commit/af664becc0245b2315ea2f784c9a6c1d7622dbb4), [`ffb2402`](https://github.com/langchain-ai/langchainjs/commit/ffb24026cd93e58219519ee24c6e23ea57cb5bde)]:
21
+ - @langchain/core@1.1.7
22
+
3
23
  ## 1.2.1
4
24
 
5
25
  ### Patch Changes
@@ -1 +1 @@
1
- {"version":3,"file":"contextEditing.d.cts","names":["BaseMessage","BaseLanguageModel","ContextSize","KeepSize","TokenCounter","ContextEdit","Promise","ClearToolUsesEditConfig","ClearToolUsesEdit","Set","ContextEditingMiddlewareConfig","contextEditingMiddleware","__types_js10","AgentMiddleware"],"sources":["../../../src/agents/middleware/contextEditing.d.ts"],"sourcesContent":["/**\n * Context editing middleware.\n *\n * This middleware mirrors Anthropic's context editing capabilities by clearing\n * older tool results once the conversation grows beyond a configurable token\n * threshold. The implementation is intentionally model-agnostic so it can be used\n * with any LangChain chat model.\n */\nimport type { BaseMessage } from \"@langchain/core/messages\";\nimport type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { type ContextSize, type KeepSize, type TokenCounter } from \"./summarization.js\";\n/**\n * Protocol describing a context editing strategy.\n *\n * Implement this interface to create custom strategies for managing\n * conversation context size. The `apply` method should modify the\n * messages array in-place and return the updated token count.\n *\n * @example\n * ```ts\n * import { HumanMessage, type ContextEdit, type BaseMessage } from \"langchain\";\n *\n * class RemoveOldHumanMessages implements ContextEdit {\n * constructor(private keepRecent: number = 10) {}\n *\n * async apply({ messages, countTokens }) {\n * // Check current token count\n * const tokens = await countTokens(messages);\n *\n * // Remove old human messages if over limit, keeping the most recent ones\n * if (tokens > 50000) {\n * const humanMessages: number[] = [];\n *\n * // Find all human message indices\n * for (let i = 0; i < messages.length; i++) {\n * if (HumanMessage.isInstance(messages[i])) {\n * humanMessages.push(i);\n * }\n * }\n *\n * // Remove old human messages (keep only the most recent N)\n * const toRemove = humanMessages.slice(0, -this.keepRecent);\n * for (let i = toRemove.length - 1; i >= 0; i--) {\n * messages.splice(toRemove[i]!, 1);\n * }\n * }\n * }\n * }\n * ```\n */\nexport interface ContextEdit {\n /**\n * Apply an edit to the message list, returning the new token count.\n *\n * This method should:\n * 1. Check if editing is needed based on `tokens` parameter\n * 2. Modify the `messages` array in-place (if needed)\n * 3. Return the new token count after modifications\n *\n * @param params - Parameters for the editing operation\n * @returns The updated token count after applying edits\n */\n apply(params: {\n /**\n * Array of messages to potentially edit (modify in-place)\n */\n messages: BaseMessage[];\n /**\n * Function to count tokens in a message array\n */\n countTokens: TokenCounter;\n /**\n * Optional model instance for model profile information\n */\n model?: BaseLanguageModel;\n }): void | Promise<void>;\n}\n/**\n * Configuration for clearing tool outputs when token limits are exceeded.\n */\nexport interface ClearToolUsesEditConfig {\n /**\n * Trigger conditions for context editing.\n * Can be a single condition object (all properties must be met) or an array of conditions (any condition must be met).\n *\n * @example\n * ```ts\n * // Single condition: trigger if tokens >= 100000 AND messages >= 50\n * trigger: { tokens: 100000, messages: 50 }\n *\n * // Multiple conditions: trigger if (tokens >= 100000 AND messages >= 50) OR (tokens >= 50000 AND messages >= 100)\n * trigger: [\n * { tokens: 100000, messages: 50 },\n * { tokens: 50000, messages: 100 }\n * ]\n *\n * // Fractional trigger: trigger at 80% of model's max input tokens\n * trigger: { fraction: 0.8 }\n * ```\n */\n trigger?: ContextSize | ContextSize[];\n /**\n * Context retention policy applied after editing.\n * Specify how many tool results to preserve using messages, tokens, or fraction.\n *\n * @example\n * ```ts\n * // Keep 3 most recent tool results\n * keep: { messages: 3 }\n *\n * // Keep tool results that fit within 1000 tokens\n * keep: { tokens: 1000 }\n *\n * // Keep tool results that fit within 30% of model's max input tokens\n * keep: { fraction: 0.3 }\n * ```\n */\n keep?: KeepSize;\n /**\n * Whether to clear the originating tool call parameters on the AI message.\n * @default false\n */\n clearToolInputs?: boolean;\n /**\n * List of tool names to exclude from clearing.\n * @default []\n */\n excludeTools?: string[];\n /**\n * Placeholder text inserted for cleared tool outputs.\n * @default \"[cleared]\"\n */\n placeholder?: string;\n /**\n * @deprecated Use `trigger: { tokens: value }` instead.\n */\n triggerTokens?: number;\n /**\n * @deprecated Use `keep: { messages: value }` instead.\n */\n keepMessages?: number;\n /**\n * @deprecated This property is deprecated and will be removed in a future version.\n * Use `keep: { tokens: value }` or `keep: { messages: value }` instead to control retention.\n */\n clearAtLeast?: number;\n}\n/**\n * Strategy for clearing tool outputs when token limits are exceeded.\n *\n * This strategy mirrors Anthropic's `clear_tool_uses_20250919` behavior by\n * replacing older tool results with a placeholder text when the conversation\n * grows too large. It preserves the most recent tool results and can exclude\n * specific tools from being cleared.\n *\n * @example\n * ```ts\n * import { ClearToolUsesEdit } from \"langchain\";\n *\n * const edit = new ClearToolUsesEdit({\n * trigger: { tokens: 100000 }, // Start clearing at 100K tokens\n * keep: { messages: 3 }, // Keep 3 most recent tool results\n * excludeTools: [\"important\"], // Never clear \"important\" tool\n * clearToolInputs: false, // Keep tool call arguments\n * placeholder: \"[cleared]\", // Replacement text\n * });\n *\n * // Multiple trigger conditions\n * const edit2 = new ClearToolUsesEdit({\n * trigger: [\n * { tokens: 100000, messages: 50 },\n * { tokens: 50000, messages: 100 }\n * ],\n * keep: { messages: 3 },\n * });\n *\n * // Fractional trigger with model profile\n * const edit3 = new ClearToolUsesEdit({\n * trigger: { fraction: 0.8 }, // Trigger at 80% of model's max tokens\n * keep: { fraction: 0.3 }, // Keep 30% of model's max tokens\n * });\n * ```\n */\nexport declare class ClearToolUsesEdit implements ContextEdit {\n #private;\n trigger: ContextSize | ContextSize[];\n keep: KeepSize;\n clearToolInputs: boolean;\n excludeTools: Set<string>;\n placeholder: string;\n model: BaseLanguageModel;\n clearAtLeast: number;\n constructor(config?: ClearToolUsesEditConfig);\n apply(params: {\n messages: BaseMessage[];\n model: BaseLanguageModel;\n countTokens: TokenCounter;\n }): Promise<void>;\n}\n/**\n * Configuration for the Context Editing Middleware.\n */\nexport interface ContextEditingMiddlewareConfig {\n /**\n * Sequence of edit strategies to apply. Defaults to a single\n * ClearToolUsesEdit mirroring Anthropic defaults.\n */\n edits?: ContextEdit[];\n /**\n * Whether to use approximate token counting (faster, less accurate)\n * or exact counting implemented by the chat model (potentially slower, more accurate).\n * Currently only OpenAI models support exact counting.\n * @default \"approx\"\n */\n tokenCountMethod?: \"approx\" | \"model\";\n}\n/**\n * Middleware that automatically prunes tool results to manage context size.\n *\n * This middleware applies a sequence of edits when the total input token count\n * exceeds configured thresholds. By default, it uses the `ClearToolUsesEdit` strategy\n * which mirrors Anthropic's `clear_tool_uses_20250919` behaviour by clearing older\n * tool results once the conversation exceeds 100,000 tokens.\n *\n * ## Basic Usage\n *\n * Use the middleware with default settings to automatically manage context:\n *\n * @example Basic usage with defaults\n * ```ts\n * import { contextEditingMiddleware } from \"langchain\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware(),\n * ],\n * });\n * ```\n *\n * The default configuration:\n * - Triggers when context exceeds **100,000 tokens**\n * - Keeps the **3 most recent** tool results\n * - Uses **approximate token counting** (fast)\n * - Does not clear tool call arguments\n *\n * ## Custom Configuration\n *\n * Customize the clearing behavior with `ClearToolUsesEdit`:\n *\n * @example Custom ClearToolUsesEdit configuration\n * ```ts\n * import { contextEditingMiddleware, ClearToolUsesEdit } from \"langchain\";\n *\n * // Single condition: trigger if tokens >= 50000 AND messages >= 20\n * const agent1 = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: { tokens: 50000, messages: 20 },\n * keep: { messages: 5 },\n * excludeTools: [\"search\"],\n * clearToolInputs: true,\n * }),\n * ],\n * tokenCountMethod: \"approx\",\n * }),\n * ],\n * });\n *\n * // Multiple conditions: trigger if (tokens >= 50000 AND messages >= 20) OR (tokens >= 30000 AND messages >= 50)\n * const agent2 = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: [\n * { tokens: 50000, messages: 20 },\n * { tokens: 30000, messages: 50 },\n * ],\n * keep: { messages: 5 },\n * }),\n * ],\n * }),\n * ],\n * });\n *\n * // Fractional trigger with model profile\n * const agent3 = createAgent({\n * model: chatModel,\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: { fraction: 0.8 }, // Trigger at 80% of model's max tokens\n * keep: { fraction: 0.3 }, // Keep 30% of model's max tokens\n * model: chatModel,\n * }),\n * ],\n * }),\n * ],\n * });\n * ```\n *\n * ## Custom Editing Strategies\n *\n * Implement your own context editing strategy by creating a class that\n * implements the `ContextEdit` interface:\n *\n * @example Custom editing strategy\n * ```ts\n * import { contextEditingMiddleware, type ContextEdit, type TokenCounter } from \"langchain\";\n * import type { BaseMessage } from \"@langchain/core/messages\";\n *\n * class CustomEdit implements ContextEdit {\n * async apply(params: {\n * tokens: number;\n * messages: BaseMessage[];\n * countTokens: TokenCounter;\n * }): Promise<number> {\n * // Implement your custom editing logic here\n * // and apply it to the messages array, then\n * // return the new token count after edits\n * return countTokens(messages);\n * }\n * }\n * ```\n *\n * @param config - Configuration options for the middleware\n * @returns A middleware instance that can be used with `createAgent`\n */\nexport declare function contextEditingMiddleware(config?: ContextEditingMiddlewareConfig): import(\"./types.js\").AgentMiddleware<undefined, undefined, any>;\n//# sourceMappingURL=contextEditing.d.ts.map"],"mappings":";;;;;;;AA2EsB;AAKtB;;;;AAqCmB;AAkEnB;;;;;;;;;;;;AAA6D;AAmB7D;AAyIA;;;;;;;;;;;;;;;;;;;UAjSiBK,WAAAA;;;;;;;;;;;;;;;;cAgBCL;;;;iBAIGI;;;;YAILH;aACDK;;;;;UAKEC,uBAAAA;;;;;;;;;;;;;;;;;;;;YAoBHL,cAAcA;;;;;;;;;;;;;;;;;SAiBjBC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAkEUK,iBAAAA,YAA6BH;;WAErCH,cAAcA;QACjBC;;gBAEQM;;SAEPR;;uBAEcM;;cAEPP;WACHC;iBACMG;MACbE;;;;;UAKSI,8BAAAA;;;;;UAKLL;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAoIYM,wBAAAA,UAAkCD,iCAAAA"}
1
+ {"version":3,"file":"contextEditing.d.cts","names":["BaseMessage","BaseLanguageModel","ContextSize","KeepSize","TokenCounter","ContextEdit","Promise","ClearToolUsesEditConfig","ClearToolUsesEdit","Set","ContextEditingMiddlewareConfig","contextEditingMiddleware","__types_js8","AgentMiddleware"],"sources":["../../../src/agents/middleware/contextEditing.d.ts"],"sourcesContent":["/**\n * Context editing middleware.\n *\n * This middleware mirrors Anthropic's context editing capabilities by clearing\n * older tool results once the conversation grows beyond a configurable token\n * threshold. The implementation is intentionally model-agnostic so it can be used\n * with any LangChain chat model.\n */\nimport type { BaseMessage } from \"@langchain/core/messages\";\nimport type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { type ContextSize, type KeepSize, type TokenCounter } from \"./summarization.js\";\n/**\n * Protocol describing a context editing strategy.\n *\n * Implement this interface to create custom strategies for managing\n * conversation context size. The `apply` method should modify the\n * messages array in-place and return the updated token count.\n *\n * @example\n * ```ts\n * import { HumanMessage, type ContextEdit, type BaseMessage } from \"langchain\";\n *\n * class RemoveOldHumanMessages implements ContextEdit {\n * constructor(private keepRecent: number = 10) {}\n *\n * async apply({ messages, countTokens }) {\n * // Check current token count\n * const tokens = await countTokens(messages);\n *\n * // Remove old human messages if over limit, keeping the most recent ones\n * if (tokens > 50000) {\n * const humanMessages: number[] = [];\n *\n * // Find all human message indices\n * for (let i = 0; i < messages.length; i++) {\n * if (HumanMessage.isInstance(messages[i])) {\n * humanMessages.push(i);\n * }\n * }\n *\n * // Remove old human messages (keep only the most recent N)\n * const toRemove = humanMessages.slice(0, -this.keepRecent);\n * for (let i = toRemove.length - 1; i >= 0; i--) {\n * messages.splice(toRemove[i]!, 1);\n * }\n * }\n * }\n * }\n * ```\n */\nexport interface ContextEdit {\n /**\n * Apply an edit to the message list, returning the new token count.\n *\n * This method should:\n * 1. Check if editing is needed based on `tokens` parameter\n * 2. Modify the `messages` array in-place (if needed)\n * 3. Return the new token count after modifications\n *\n * @param params - Parameters for the editing operation\n * @returns The updated token count after applying edits\n */\n apply(params: {\n /**\n * Array of messages to potentially edit (modify in-place)\n */\n messages: BaseMessage[];\n /**\n * Function to count tokens in a message array\n */\n countTokens: TokenCounter;\n /**\n * Optional model instance for model profile information\n */\n model?: BaseLanguageModel;\n }): void | Promise<void>;\n}\n/**\n * Configuration for clearing tool outputs when token limits are exceeded.\n */\nexport interface ClearToolUsesEditConfig {\n /**\n * Trigger conditions for context editing.\n * Can be a single condition object (all properties must be met) or an array of conditions (any condition must be met).\n *\n * @example\n * ```ts\n * // Single condition: trigger if tokens >= 100000 AND messages >= 50\n * trigger: { tokens: 100000, messages: 50 }\n *\n * // Multiple conditions: trigger if (tokens >= 100000 AND messages >= 50) OR (tokens >= 50000 AND messages >= 100)\n * trigger: [\n * { tokens: 100000, messages: 50 },\n * { tokens: 50000, messages: 100 }\n * ]\n *\n * // Fractional trigger: trigger at 80% of model's max input tokens\n * trigger: { fraction: 0.8 }\n * ```\n */\n trigger?: ContextSize | ContextSize[];\n /**\n * Context retention policy applied after editing.\n * Specify how many tool results to preserve using messages, tokens, or fraction.\n *\n * @example\n * ```ts\n * // Keep 3 most recent tool results\n * keep: { messages: 3 }\n *\n * // Keep tool results that fit within 1000 tokens\n * keep: { tokens: 1000 }\n *\n * // Keep tool results that fit within 30% of model's max input tokens\n * keep: { fraction: 0.3 }\n * ```\n */\n keep?: KeepSize;\n /**\n * Whether to clear the originating tool call parameters on the AI message.\n * @default false\n */\n clearToolInputs?: boolean;\n /**\n * List of tool names to exclude from clearing.\n * @default []\n */\n excludeTools?: string[];\n /**\n * Placeholder text inserted for cleared tool outputs.\n * @default \"[cleared]\"\n */\n placeholder?: string;\n /**\n * @deprecated Use `trigger: { tokens: value }` instead.\n */\n triggerTokens?: number;\n /**\n * @deprecated Use `keep: { messages: value }` instead.\n */\n keepMessages?: number;\n /**\n * @deprecated This property is deprecated and will be removed in a future version.\n * Use `keep: { tokens: value }` or `keep: { messages: value }` instead to control retention.\n */\n clearAtLeast?: number;\n}\n/**\n * Strategy for clearing tool outputs when token limits are exceeded.\n *\n * This strategy mirrors Anthropic's `clear_tool_uses_20250919` behavior by\n * replacing older tool results with a placeholder text when the conversation\n * grows too large. It preserves the most recent tool results and can exclude\n * specific tools from being cleared.\n *\n * @example\n * ```ts\n * import { ClearToolUsesEdit } from \"langchain\";\n *\n * const edit = new ClearToolUsesEdit({\n * trigger: { tokens: 100000 }, // Start clearing at 100K tokens\n * keep: { messages: 3 }, // Keep 3 most recent tool results\n * excludeTools: [\"important\"], // Never clear \"important\" tool\n * clearToolInputs: false, // Keep tool call arguments\n * placeholder: \"[cleared]\", // Replacement text\n * });\n *\n * // Multiple trigger conditions\n * const edit2 = new ClearToolUsesEdit({\n * trigger: [\n * { tokens: 100000, messages: 50 },\n * { tokens: 50000, messages: 100 }\n * ],\n * keep: { messages: 3 },\n * });\n *\n * // Fractional trigger with model profile\n * const edit3 = new ClearToolUsesEdit({\n * trigger: { fraction: 0.8 }, // Trigger at 80% of model's max tokens\n * keep: { fraction: 0.3 }, // Keep 30% of model's max tokens\n * });\n * ```\n */\nexport declare class ClearToolUsesEdit implements ContextEdit {\n #private;\n trigger: ContextSize | ContextSize[];\n keep: KeepSize;\n clearToolInputs: boolean;\n excludeTools: Set<string>;\n placeholder: string;\n model: BaseLanguageModel;\n clearAtLeast: number;\n constructor(config?: ClearToolUsesEditConfig);\n apply(params: {\n messages: BaseMessage[];\n model: BaseLanguageModel;\n countTokens: TokenCounter;\n }): Promise<void>;\n}\n/**\n * Configuration for the Context Editing Middleware.\n */\nexport interface ContextEditingMiddlewareConfig {\n /**\n * Sequence of edit strategies to apply. Defaults to a single\n * ClearToolUsesEdit mirroring Anthropic defaults.\n */\n edits?: ContextEdit[];\n /**\n * Whether to use approximate token counting (faster, less accurate)\n * or exact counting implemented by the chat model (potentially slower, more accurate).\n * Currently only OpenAI models support exact counting.\n * @default \"approx\"\n */\n tokenCountMethod?: \"approx\" | \"model\";\n}\n/**\n * Middleware that automatically prunes tool results to manage context size.\n *\n * This middleware applies a sequence of edits when the total input token count\n * exceeds configured thresholds. By default, it uses the `ClearToolUsesEdit` strategy\n * which mirrors Anthropic's `clear_tool_uses_20250919` behaviour by clearing older\n * tool results once the conversation exceeds 100,000 tokens.\n *\n * ## Basic Usage\n *\n * Use the middleware with default settings to automatically manage context:\n *\n * @example Basic usage with defaults\n * ```ts\n * import { contextEditingMiddleware } from \"langchain\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware(),\n * ],\n * });\n * ```\n *\n * The default configuration:\n * - Triggers when context exceeds **100,000 tokens**\n * - Keeps the **3 most recent** tool results\n * - Uses **approximate token counting** (fast)\n * - Does not clear tool call arguments\n *\n * ## Custom Configuration\n *\n * Customize the clearing behavior with `ClearToolUsesEdit`:\n *\n * @example Custom ClearToolUsesEdit configuration\n * ```ts\n * import { contextEditingMiddleware, ClearToolUsesEdit } from \"langchain\";\n *\n * // Single condition: trigger if tokens >= 50000 AND messages >= 20\n * const agent1 = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: { tokens: 50000, messages: 20 },\n * keep: { messages: 5 },\n * excludeTools: [\"search\"],\n * clearToolInputs: true,\n * }),\n * ],\n * tokenCountMethod: \"approx\",\n * }),\n * ],\n * });\n *\n * // Multiple conditions: trigger if (tokens >= 50000 AND messages >= 20) OR (tokens >= 30000 AND messages >= 50)\n * const agent2 = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: [\n * { tokens: 50000, messages: 20 },\n * { tokens: 30000, messages: 50 },\n * ],\n * keep: { messages: 5 },\n * }),\n * ],\n * }),\n * ],\n * });\n *\n * // Fractional trigger with model profile\n * const agent3 = createAgent({\n * model: chatModel,\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: { fraction: 0.8 }, // Trigger at 80% of model's max tokens\n * keep: { fraction: 0.3 }, // Keep 30% of model's max tokens\n * model: chatModel,\n * }),\n * ],\n * }),\n * ],\n * });\n * ```\n *\n * ## Custom Editing Strategies\n *\n * Implement your own context editing strategy by creating a class that\n * implements the `ContextEdit` interface:\n *\n * @example Custom editing strategy\n * ```ts\n * import { contextEditingMiddleware, type ContextEdit, type TokenCounter } from \"langchain\";\n * import type { BaseMessage } from \"@langchain/core/messages\";\n *\n * class CustomEdit implements ContextEdit {\n * async apply(params: {\n * tokens: number;\n * messages: BaseMessage[];\n * countTokens: TokenCounter;\n * }): Promise<number> {\n * // Implement your custom editing logic here\n * // and apply it to the messages array, then\n * // return the new token count after edits\n * return countTokens(messages);\n * }\n * }\n * ```\n *\n * @param config - Configuration options for the middleware\n * @returns A middleware instance that can be used with `createAgent`\n */\nexport declare function contextEditingMiddleware(config?: ContextEditingMiddlewareConfig): import(\"./types.js\").AgentMiddleware<undefined, undefined, any>;\n//# sourceMappingURL=contextEditing.d.ts.map"],"mappings":";;;;;;;AA2EsB;AAKtB;;;;AAqCmB;AAkEnB;;;;;;;;;;;;AAA6D;AAmB7D;AAyIA;;;;;;;;;;;;;;;;;;;UAjSiBK,WAAAA;;;;;;;;;;;;;;;;cAgBCL;;;;iBAIGI;;;;YAILH;aACDK;;;;;UAKEC,uBAAAA;;;;;;;;;;;;;;;;;;;;YAoBHL,cAAcA;;;;;;;;;;;;;;;;;SAiBjBC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAkEUK,iBAAAA,YAA6BH;;WAErCH,cAAcA;QACjBC;;gBAEQM;;SAEPR;;uBAEcM;;cAEPP;WACHC;iBACMG;MACbE;;;;;UAKSI,8BAAAA;;;;;UAKLL;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAoIYM,wBAAAA,UAAkCD,iCAAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"dynamicSystemPrompt.d.cts","names":["SystemMessage","Runtime","AgentBuiltInState","DynamicSystemPromptMiddlewareConfig","TContextSchema","Promise","dynamicSystemPromptMiddleware","__types_js9","AgentMiddleware"],"sources":["../../../src/agents/middleware/dynamicSystemPrompt.d.ts"],"sourcesContent":["import { SystemMessage } from \"@langchain/core/messages\";\nimport type { Runtime, AgentBuiltInState } from \"../runtime.js\";\nexport type DynamicSystemPromptMiddlewareConfig<TContextSchema> = (state: AgentBuiltInState, runtime: Runtime<TContextSchema>) => string | SystemMessage | Promise<string | SystemMessage>;\n/**\n * Dynamic System Prompt Middleware\n *\n * Allows setting the system prompt dynamically right before each model invocation.\n * Useful when the prompt depends on the current agent state or per-invocation context.\n *\n * @typeParam TContextSchema - The shape of the runtime context available at invocation time.\n * If your agent defines a `contextSchema`, pass the inferred type here to get full type-safety\n * for `runtime.context`.\n *\n * @param fn - Function that receives the current agent `state` and `runtime`, and\n * returns the system prompt for the next model call as a string.\n *\n * @returns A middleware instance that sets `systemPrompt` for the next model call.\n *\n * @example Basic usage with typed context\n * ```ts\n * import { z } from \"zod\";\n * import { dynamicSystemPrompt } from \"langchain\";\n * import { createAgent, SystemMessage } from \"langchain\";\n *\n * const contextSchema = z.object({ region: z.string().optional() });\n *\n * const middleware = dynamicSystemPrompt<z.infer<typeof contextSchema>>(\n * (_state, runtime) => `You are a helpful assistant. Region: ${runtime.context.region ?? \"n/a\"}`\n * );\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * contextSchema,\n * middleware: [middleware],\n * });\n *\n * await agent.invoke({ messages }, { context: { region: \"EU\" } });\n * ```\n *\n * @public\n */\nexport declare function dynamicSystemPromptMiddleware<TContextSchema = unknown>(fn: DynamicSystemPromptMiddlewareConfig<TContextSchema>): import(\"./types.js\").AgentMiddleware<undefined, undefined, any>;\n//# sourceMappingURL=dynamicSystemPrompt.d.ts.map"],"mappings":";;;;;KAEYG,8DAA8DD,4BAA4BD,QAAQG,6BAA6BJ,gBAAgBK,iBAAiBL;;;AAA5K;;;;;;;AAAkK;AAuClK;;;;AAA8K;;;;;;;;;;;;;;;;;;;;;;;;iBAAtJM,4DAA4DH,oCAAoCC"}
1
+ {"version":3,"file":"dynamicSystemPrompt.d.cts","names":["SystemMessage","Runtime","AgentBuiltInState","DynamicSystemPromptMiddlewareConfig","TContextSchema","Promise","dynamicSystemPromptMiddleware","__types_js12","AgentMiddleware"],"sources":["../../../src/agents/middleware/dynamicSystemPrompt.d.ts"],"sourcesContent":["import { SystemMessage } from \"@langchain/core/messages\";\nimport type { Runtime, AgentBuiltInState } from \"../runtime.js\";\nexport type DynamicSystemPromptMiddlewareConfig<TContextSchema> = (state: AgentBuiltInState, runtime: Runtime<TContextSchema>) => string | SystemMessage | Promise<string | SystemMessage>;\n/**\n * Dynamic System Prompt Middleware\n *\n * Allows setting the system prompt dynamically right before each model invocation.\n * Useful when the prompt depends on the current agent state or per-invocation context.\n *\n * @typeParam TContextSchema - The shape of the runtime context available at invocation time.\n * If your agent defines a `contextSchema`, pass the inferred type here to get full type-safety\n * for `runtime.context`.\n *\n * @param fn - Function that receives the current agent `state` and `runtime`, and\n * returns the system prompt for the next model call as a string.\n *\n * @returns A middleware instance that sets `systemPrompt` for the next model call.\n *\n * @example Basic usage with typed context\n * ```ts\n * import { z } from \"zod\";\n * import { dynamicSystemPrompt } from \"langchain\";\n * import { createAgent, SystemMessage } from \"langchain\";\n *\n * const contextSchema = z.object({ region: z.string().optional() });\n *\n * const middleware = dynamicSystemPrompt<z.infer<typeof contextSchema>>(\n * (_state, runtime) => `You are a helpful assistant. Region: ${runtime.context.region ?? \"n/a\"}`\n * );\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * contextSchema,\n * middleware: [middleware],\n * });\n *\n * await agent.invoke({ messages }, { context: { region: \"EU\" } });\n * ```\n *\n * @public\n */\nexport declare function dynamicSystemPromptMiddleware<TContextSchema = unknown>(fn: DynamicSystemPromptMiddlewareConfig<TContextSchema>): import(\"./types.js\").AgentMiddleware<undefined, undefined, any>;\n//# sourceMappingURL=dynamicSystemPrompt.d.ts.map"],"mappings":";;;;;KAEYG,8DAA8DD,4BAA4BD,QAAQG,6BAA6BJ,gBAAgBK,iBAAiBL;;;AAA5K;;;;;;;AAAkK;AAuClK;;;;AAA8K;;;;;;;;;;;;;;;;;;;;;;;;iBAAtJM,4DAA4DH,oCAAoCC"}
@@ -1 +1 @@
1
- {"version":3,"file":"modelCallLimit.d.cts","names":["z","InferInteropZodInput","contextSchema","ZodNumber","ZodOptional","ZodEnum","ZodTypeAny","ZodObject","ModelCallLimitMiddlewareConfig","Partial","modelCallLimitMiddleware","ZodDefault","__types_js12","AgentMiddleware"],"sources":["../../../src/agents/middleware/modelCallLimit.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport { InferInteropZodInput } from \"@langchain/core/utils/types\";\ndeclare const contextSchema: z.ZodObject<{\n /**\n * The maximum number of model calls allowed per thread.\n */\n threadLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * The maximum number of model calls allowed per run.\n */\n runLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * The behavior to take when the limit is exceeded.\n * - \"error\" will throw an error and stop the agent.\n * - \"end\" will end the agent.\n * @default \"end\"\n */\n exitBehavior: z.ZodOptional<z.ZodEnum<[\"error\", \"end\"]>>;\n}, \"strip\", z.ZodTypeAny, {\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"end\" | \"error\" | undefined;\n}, {\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"end\" | \"error\" | undefined;\n}>;\nexport type ModelCallLimitMiddlewareConfig = Partial<InferInteropZodInput<typeof contextSchema>>;\n/**\n * Creates a middleware to limit the number of model calls at both thread and run levels.\n *\n * This middleware helps prevent excessive model API calls by enforcing limits on how many\n * times the model can be invoked. It supports two types of limits:\n *\n * - **Thread-level limit**: Restricts the total number of model calls across an entire conversation thread\n * - **Run-level limit**: Restricts the number of model calls within a single agent run/invocation\n *\n * ## How It Works\n *\n * The middleware intercepts model requests before they are sent and checks the current call counts\n * against the configured limits. If either limit is exceeded, it throws a `ModelCallLimitMiddlewareError`\n * to stop execution and prevent further API calls.\n *\n * ## Use Cases\n *\n * - **Cost Control**: Prevent runaway costs from excessive model calls in production\n * - **Testing**: Ensure agents don't make too many calls during development/testing\n * - **Safety**: Limit potential infinite loops or recursive agent behaviors\n * - **Rate Limiting**: Enforce organizational policies on model usage per conversation\n *\n * @param middlewareOptions - Configuration options for the call limits\n * @param middlewareOptions.threadLimit - Maximum number of model calls allowed per thread (optional)\n * @param middlewareOptions.runLimit - Maximum number of model calls allowed per run (optional)\n *\n * @returns A middleware instance that can be passed to `createAgent`\n *\n * @throws {ModelCallLimitMiddlewareError} When either the thread or run limit is exceeded\n *\n * @example\n * ```typescript\n * import { createAgent, modelCallLimitMiddleware } from \"langchain\";\n *\n * // Limit to 10 calls per thread and 3 calls per run\n * const agent = createAgent({\n * model: \"openai:gpt-4o-mini\",\n * tools: [myTool],\n * middleware: [\n * modelCallLimitMiddleware({\n * threadLimit: 10,\n * runLimit: 3\n * })\n * ]\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Limits can also be configured at runtime via context\n * const result = await agent.invoke(\n * { messages: [\"Hello\"] },\n * {\n * configurable: {\n * threadLimit: 5 // Override the default limit for this run\n * }\n * }\n * );\n * ```\n */\nexport declare function modelCallLimitMiddleware(middlewareOptions?: ModelCallLimitMiddlewareConfig): import(\"./types.js\").AgentMiddleware<z.ZodObject<{\n threadModelCallCount: z.ZodDefault<z.ZodNumber>;\n runModelCallCount: z.ZodDefault<z.ZodNumber>;\n}, \"strip\", z.ZodTypeAny, {\n threadModelCallCount: number;\n runModelCallCount: number;\n}, {\n threadModelCallCount?: number | undefined;\n runModelCallCount?: number | undefined;\n}>, z.ZodObject<{\n /**\n * The maximum number of model calls allowed per thread.\n */\n threadLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * The maximum number of model calls allowed per run.\n */\n runLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * The behavior to take when the limit is exceeded.\n * - \"error\" will throw an error and stop the agent.\n * - \"end\" will end the agent.\n * @default \"end\"\n */\n exitBehavior: z.ZodOptional<z.ZodEnum<[\"error\", \"end\"]>>;\n}, \"strip\", z.ZodTypeAny, {\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"end\" | \"error\" | undefined;\n}, {\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"end\" | \"error\" | undefined;\n}>, any>;\nexport {};\n//# sourceMappingURL=modelCallLimit.d.ts.map"],"mappings":";;;;;cAEcE,eAAeF,CAAAA,CAAEO;;;AADoC;EAKlCJ,WAAAA,EAAhBH,CAAAA,CAAEI,WAAcD,CAAFH,CAAAA,CAAEG,SAAAA,CAAAA;EAAdC;;;EAWeC,QAAAA,EAPpBL,CAAAA,CAAEI,WAOkBC,CAPNL,CAAAA,CAAEG,SAOIE,CAAAA;EAAdD;;;AAfoB;AAyBxC;;EAAqDH,YAAAA,EAVnCD,CAAAA,CAAEI,WAUiCH,CAVrBD,CAAAA,CAAEK,OAUmBJ,CAAAA,CAAAA,OAAAA,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA;CAARQ,EAAAA,OAAAA,EATjCT,CAAAA,CAAEM,UAS+BG,EAAAA;EAAO,WAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EA6D5BC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAwB;EAAqBF,YAAAA,CAAAA,EAAAA,KAAAA,GAAAA,OAAAA,GAAAA,SAAAA;CAC9BR,EAAEG;EAAbQ,WAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACUR,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAbQ,YAAAA,CAAAA,EAAAA,KAAAA,GAAAA,OAAAA,GAAAA,SAAAA;CACbX,CAAAA;AAHiIO,KA7DjIC,8BAAAA,GAAiCC,OA6DgGF,CA7DxFN,oBA6DwFM,CAAAA,OA7D5DL,aA6D4DK,CAAAA,CAAAA;;;;;;;;;;AAAH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAlHG,wBAAAA,qBAA6CF,iDAAsER,CAAAA,CAAEO;wBACnHP,CAAAA,CAAEW,WAAWX,CAAAA,CAAEG;qBAClBH,CAAAA,CAAEW,WAAWX,CAAAA,CAAEG;YAC1BH,CAAAA,CAAEM;;;;;;IAMVN,CAAAA,CAAEO;;;;eAIWP,CAAAA,CAAEI,YAAYJ,CAAAA,CAAEG;;;;YAInBH,CAAAA,CAAEI,YAAYJ,CAAAA,CAAEG;;;;;;;gBAOZH,CAAAA,CAAEI,YAAYJ,CAAAA,CAAEK;YACtBL,CAAAA,CAAEM"}
1
+ {"version":3,"file":"modelCallLimit.d.cts","names":["z","InferInteropZodInput","contextSchema","ZodNumber","ZodOptional","ZodEnum","ZodTypeAny","ZodObject","ModelCallLimitMiddlewareConfig","Partial","modelCallLimitMiddleware","ZodDefault","__types_js9","AgentMiddleware"],"sources":["../../../src/agents/middleware/modelCallLimit.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport { InferInteropZodInput } from \"@langchain/core/utils/types\";\ndeclare const contextSchema: z.ZodObject<{\n /**\n * The maximum number of model calls allowed per thread.\n */\n threadLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * The maximum number of model calls allowed per run.\n */\n runLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * The behavior to take when the limit is exceeded.\n * - \"error\" will throw an error and stop the agent.\n * - \"end\" will end the agent.\n * @default \"end\"\n */\n exitBehavior: z.ZodOptional<z.ZodEnum<[\"error\", \"end\"]>>;\n}, \"strip\", z.ZodTypeAny, {\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"end\" | \"error\" | undefined;\n}, {\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"end\" | \"error\" | undefined;\n}>;\nexport type ModelCallLimitMiddlewareConfig = Partial<InferInteropZodInput<typeof contextSchema>>;\n/**\n * Creates a middleware to limit the number of model calls at both thread and run levels.\n *\n * This middleware helps prevent excessive model API calls by enforcing limits on how many\n * times the model can be invoked. It supports two types of limits:\n *\n * - **Thread-level limit**: Restricts the total number of model calls across an entire conversation thread\n * - **Run-level limit**: Restricts the number of model calls within a single agent run/invocation\n *\n * ## How It Works\n *\n * The middleware intercepts model requests before they are sent and checks the current call counts\n * against the configured limits. If either limit is exceeded, it throws a `ModelCallLimitMiddlewareError`\n * to stop execution and prevent further API calls.\n *\n * ## Use Cases\n *\n * - **Cost Control**: Prevent runaway costs from excessive model calls in production\n * - **Testing**: Ensure agents don't make too many calls during development/testing\n * - **Safety**: Limit potential infinite loops or recursive agent behaviors\n * - **Rate Limiting**: Enforce organizational policies on model usage per conversation\n *\n * @param middlewareOptions - Configuration options for the call limits\n * @param middlewareOptions.threadLimit - Maximum number of model calls allowed per thread (optional)\n * @param middlewareOptions.runLimit - Maximum number of model calls allowed per run (optional)\n *\n * @returns A middleware instance that can be passed to `createAgent`\n *\n * @throws {ModelCallLimitMiddlewareError} When either the thread or run limit is exceeded\n *\n * @example\n * ```typescript\n * import { createAgent, modelCallLimitMiddleware } from \"langchain\";\n *\n * // Limit to 10 calls per thread and 3 calls per run\n * const agent = createAgent({\n * model: \"openai:gpt-4o-mini\",\n * tools: [myTool],\n * middleware: [\n * modelCallLimitMiddleware({\n * threadLimit: 10,\n * runLimit: 3\n * })\n * ]\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Limits can also be configured at runtime via context\n * const result = await agent.invoke(\n * { messages: [\"Hello\"] },\n * {\n * configurable: {\n * threadLimit: 5 // Override the default limit for this run\n * }\n * }\n * );\n * ```\n */\nexport declare function modelCallLimitMiddleware(middlewareOptions?: ModelCallLimitMiddlewareConfig): import(\"./types.js\").AgentMiddleware<z.ZodObject<{\n threadModelCallCount: z.ZodDefault<z.ZodNumber>;\n runModelCallCount: z.ZodDefault<z.ZodNumber>;\n}, \"strip\", z.ZodTypeAny, {\n threadModelCallCount: number;\n runModelCallCount: number;\n}, {\n threadModelCallCount?: number | undefined;\n runModelCallCount?: number | undefined;\n}>, z.ZodObject<{\n /**\n * The maximum number of model calls allowed per thread.\n */\n threadLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * The maximum number of model calls allowed per run.\n */\n runLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * The behavior to take when the limit is exceeded.\n * - \"error\" will throw an error and stop the agent.\n * - \"end\" will end the agent.\n * @default \"end\"\n */\n exitBehavior: z.ZodOptional<z.ZodEnum<[\"error\", \"end\"]>>;\n}, \"strip\", z.ZodTypeAny, {\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"end\" | \"error\" | undefined;\n}, {\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"end\" | \"error\" | undefined;\n}>, any>;\nexport {};\n//# sourceMappingURL=modelCallLimit.d.ts.map"],"mappings":";;;;;cAEcE,eAAeF,CAAAA,CAAEO;;;AADoC;EAKlCJ,WAAAA,EAAhBH,CAAAA,CAAEI,WAAcD,CAAFH,CAAAA,CAAEG,SAAAA,CAAAA;EAAdC;;;EAWeC,QAAAA,EAPpBL,CAAAA,CAAEI,WAOkBC,CAPNL,CAAAA,CAAEG,SAOIE,CAAAA;EAAdD;;;AAfoB;AAyBxC;;EAAqDH,YAAAA,EAVnCD,CAAAA,CAAEI,WAUiCH,CAVrBD,CAAAA,CAAEK,OAUmBJ,CAAAA,CAAAA,OAAAA,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA;CAARQ,EAAAA,OAAAA,EATjCT,CAAAA,CAAEM,UAS+BG,EAAAA;EAAO,WAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EA6D5BC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAwB;EAAqBF,YAAAA,CAAAA,EAAAA,KAAAA,GAAAA,OAAAA,GAAAA,SAAAA;CAC9BR,EAAEG;EAAbQ,WAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACUR,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAbQ,YAAAA,CAAAA,EAAAA,KAAAA,GAAAA,OAAAA,GAAAA,SAAAA;CACbX,CAAAA;AAHiIO,KA7DjIC,8BAAAA,GAAiCC,OA6DgGF,CA7DxFN,oBA6DwFM,CAAAA,OA7D5DL,aA6D4DK,CAAAA,CAAAA;;;;;;;;;;AAAH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAlHG,wBAAAA,qBAA6CF,iDAAsER,CAAAA,CAAEO;wBACnHP,CAAAA,CAAEW,WAAWX,CAAAA,CAAEG;qBAClBH,CAAAA,CAAEW,WAAWX,CAAAA,CAAEG;YAC1BH,CAAAA,CAAEM;;;;;;IAMVN,CAAAA,CAAEO;;;;eAIWP,CAAAA,CAAEI,YAAYJ,CAAAA,CAAEG;;;;YAInBH,CAAAA,CAAEI,YAAYJ,CAAAA,CAAEG;;;;;;;gBAOZH,CAAAA,CAAEI,YAAYJ,CAAAA,CAAEK;YACtBL,CAAAA,CAAEM"}
@@ -1 +1 @@
1
- {"version":3,"file":"summarization.d.cts","names":["_langchain_core_messages0","__types_js8","z","BaseMessage","BaseLanguageModel","InferInteropZodInput","DEFAULT_SUMMARY_PROMPT","TokenCounter","Promise","contextSizeSchema","ZodNumber","ZodOptional","ZodTypeAny","ZodObject","ZodEffects","ContextSize","infer","keepSchema","KeepSize","contextSchema","_langchain_core_language_models_base0","BaseLanguageModelCallOptions","ZodTypeDef","ZodType","ZodArray","ZodUnion","MessageStructure","MessageType","ZodUnknown","ZodTuple","ZodPromise","ZodFunction","ZodString","ZodDefault","SummarizationMiddlewareConfig","getProfileLimits","summarizationMiddleware","AgentMiddleware"],"sources":["../../../src/agents/middleware/summarization.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { InferInteropZodInput } from \"@langchain/core/utils/types\";\nexport declare const DEFAULT_SUMMARY_PROMPT = \"<role>\\nContext Extraction Assistant\\n</role>\\n\\n<primary_objective>\\nYour sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.\\n</primary_objective>\\n\\n<objective_information>\\nYou're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.\\nThis context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.\\n</objective_information>\\n\\n<instructions>\\nThe conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.\\nYou want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.\\n</instructions>\\n\\nThe user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:\\n\\nWith all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.\\nRespond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.\\n\\n<messages>\\nMessages to summarize:\\n{messages}\\n</messages>\";\nexport type TokenCounter = (messages: BaseMessage[]) => number | Promise<number>;\nexport declare const contextSizeSchema: z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n}, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}>;\nexport type ContextSize = z.infer<typeof contextSizeSchema>;\nexport declare const keepSchema: z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to keep\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to keep\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n messages: z.ZodOptional<z.ZodNumber>;\n}, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}>;\nexport type KeepSize = z.infer<typeof keepSchema>;\ndeclare const contextSchema: z.ZodObject<{\n /**\n * Model to use for summarization\n */\n model: z.ZodType<string | BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, string | BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>;\n /**\n * Trigger conditions for summarization.\n * Can be a single condition object (all properties must be met) or an array of conditions (any condition must be met).\n *\n * @example\n * ```ts\n * // Single condition: trigger if tokens >= 5000 AND messages >= 3\n * trigger: { tokens: 5000, messages: 3 }\n *\n * // Multiple conditions: trigger if (tokens >= 5000 AND messages >= 3) OR (tokens >= 3000 AND messages >= 6)\n * trigger: [\n * { tokens: 5000, messages: 3 },\n * { tokens: 3000, messages: 6 }\n * ]\n * ```\n */\n trigger: z.ZodOptional<z.ZodUnion<[z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, z.ZodArray<z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, \"many\">]>>;\n /**\n * Keep conditions for summarization\n */\n keep: z.ZodOptional<z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to keep\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to keep\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>>;\n /**\n * Token counter function to use for summarization\n */\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodType<BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>, z.ZodTypeDef, BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>>, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n /**\n * Summary prompt to use for summarization\n * @default {@link DEFAULT_SUMMARY_PROMPT}\n */\n summaryPrompt: z.ZodDefault<z.ZodString>;\n /**\n * Number of tokens to trim to before summarizing\n */\n trimTokensToSummarize: z.ZodOptional<z.ZodNumber>;\n /**\n * Prefix to add to the summary\n */\n summaryPrefix: z.ZodOptional<z.ZodString>;\n /**\n * @deprecated Use `trigger: { tokens: value }` instead.\n */\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n /**\n * @deprecated Use `keep: { messages: value }` instead.\n */\n messagesToKeep: z.ZodOptional<z.ZodNumber>;\n}, \"strip\", z.ZodTypeAny, {\n model: string | BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n trigger?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }[] | {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n keep?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n tokenCounter?: ((args_0: BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n trimTokensToSummarize?: number | undefined;\n summaryPrefix?: string | undefined;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n}, {\n model: string | BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n trigger?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }[] | {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n keep?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n tokenCounter?: ((args_0: BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n trimTokensToSummarize?: number | undefined;\n summaryPrefix?: string | undefined;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n}>;\nexport type SummarizationMiddlewareConfig = InferInteropZodInput<typeof contextSchema>;\n/**\n * Get max input tokens from model profile or fallback to model name lookup\n */\nexport declare function getProfileLimits(input: BaseLanguageModel): number | undefined;\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain\";\n * import { createAgent } from \"langchain\";\n *\n * // Single condition: trigger if tokens >= 4000 AND messages >= 10\n * const agent1 = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middleware: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * trigger: { tokens: 4000, messages: 10 },\n * keep: { messages: 20 },\n * })\n * ],\n * });\n *\n * // Multiple conditions: trigger if (tokens >= 5000 AND messages >= 3) OR (tokens >= 3000 AND messages >= 6)\n * const agent2 = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middleware: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * trigger: [\n * { tokens: 5000, messages: 3 },\n * { tokens: 3000, messages: 6 },\n * ],\n * keep: { messages: 20 },\n * })\n * ],\n * });\n *\n * ```\n */\nexport declare function summarizationMiddleware(options: SummarizationMiddlewareConfig): import(\"./types.js\").AgentMiddleware<undefined, z.ZodObject<{\n trigger: z.ZodOptional<z.ZodUnion<[z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, z.ZodArray<z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, \"many\">]>>;\n keep: z.ZodOptional<z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to keep\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to keep\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>>;\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodType<BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>, z.ZodTypeDef, BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>>, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n summaryPrompt: z.ZodDefault<z.ZodString>;\n trimTokensToSummarize: z.ZodOptional<z.ZodNumber>;\n summaryPrefix: z.ZodOptional<z.ZodString>;\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n messagesToKeep: z.ZodOptional<z.ZodNumber>;\n} & {\n model: z.ZodOptional<z.ZodType<BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>>;\n}, \"strip\", z.ZodTypeAny, {\n trigger?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }[] | {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n keep?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n tokenCounter?: ((args_0: BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n trimTokensToSummarize?: number | undefined;\n summaryPrefix?: string | undefined;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n model?: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions> | undefined;\n}, {\n trigger?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }[] | {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n keep?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n tokenCounter?: ((args_0: BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n trimTokensToSummarize?: number | undefined;\n summaryPrefix?: string | undefined;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n model?: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions> | undefined;\n}>, any>;\nexport {};\n//# sourceMappingURL=summarization.d.ts.map"],"mappings":";;;;;;;;;KAKYO,YAAAA,cAA0BJ,2BAA2BK;cAC5CC,mBAAmBP,CAAAA,CAAEY,WAAWZ,CAAAA,CAAEW;;;AADvD;EACqBJ,QAAAA,EAIPP,CAAAA,CAAES,WAyBd,CAzB0BT,CAAAA,CAAEQ,SAyB5B,CAAA;EAzB4BA;;;EAIhBC,MAAAA,EAAFT,CAAAA,CAAES,WAAAA,CAAYT,CAAAA,CAAEQ,SAAdC,CAAAA;EAIgBD;;;EAZyBG,QAAAA,EAYzCX,CAAAA,CAAES,WAZuCE,CAY3BX,CAAAA,CAAEQ,SAZyBG,CAAAA;CAAfX,EAAEY,OAAAA,EAa9BZ,CAAAA,CAAEU,UAb4BE,EAAAA;EAAU,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EA8BxCC,MAAAA,CAAAA,EAAAA,MAAW,GAAA,SAAkBN;EACpBQ,QAAAA,CAAAA,EAAAA,MA0BnB,GAAA,SAAA;CAtB0Bf,EAAEQ;EAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAIYD,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CACcT,CAAAA,EAAEQ;EAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACFC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAVkCC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAfX,EAAEY;EAAU,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EA2BjCI,MAAAA,CAAAA,EAAAA,MAAQ,GAAA,SAAkBD;EACxBE,QAAAA,CAAAA,EAAAA,MAmLZ,GAAA,SAAA;CAAAC,CAAAA;AA/K4BhB,KAjClBW,WAAAA,GAAcb,CAAAA,CAAEc,KAiCEZ,CAAAA,OAjCWK,iBAiCXL,CAAAA;AAAuGkB,cAhChHL,UAgCgHK,EAhCpGpB,CAAAA,CAAEY,UAgCkGQ,CAhCvFpB,CAAAA,CAAEW,SAgCqFS,CAAAA;EAAUF;;;EAqB7GV,QAAAA,EAjDpBR,CAAAA,CAAES,WAiDkBD,CAjDNR,CAAAA,CAAEQ,SAiDIA,CAAAA;EAAdC;;;EAQcD,MAAAA,EArDtBR,CAAAA,CAAES,WAqDoBD,CArDRR,CAAAA,CAAEQ,SAqDMA,CAAAA;EAAdC,QAAAA,EApDNT,CAAAA,CAAES,WAoDIA,CApDQT,CAAAA,CAAEQ,SAoDVC,CAAAA;CACJT,EAAEU,OAAAA,EApDNV,CAAAA,CAAEU,UAoDIA,EAAAA;EAboCC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAbC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAiCPJ,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAhBR,EAAES;EAIYD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAIgBD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAhBR,CAAAA,EAAES;EACFC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAbgBC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAbC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAbZ,EAAEsB;EA7BmBC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdd,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAkEmBD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAhBR,CAAAA;AAIcQ,KA5FpBQ,QAAAA,GAAWhB,CAAAA,CAAEc,KA4FON,CAAAA,OA5FMO,UA4FNP,CAAAA;cA3FlBS,aA2FIR,EA3FWT,CAAAA,CAAEW,SA2FbF,CAAAA;EACgBD;;;EATKG,KAAAA,EA/E5BX,CAAAA,CAAEqB,OA+E0BV,CAAAA,MAAAA,GA/ETT,iBA+ESS,CAAAA,GAAAA,EAgGrCO,qCAAAA,CA/KkGC,4BAAAA,CA+E7DR,EA/E4FX,CAAAA,CAAEoB,UA+E9FT,EAAAA,MAAAA,GA/EmHT,iBA+EnHS,CAAAA,GAAAA,EA/EwGO,qCAAAA,CAAiFC,4BAAAA,CA+EzLR,CAAAA;EAAbC;;;;;;;;;;;;;;;;EA8BmUW,OAAAA,EA5FhVvB,CAAAA,CAAES,WA4F8Uc,CA5FlUvB,CAAAA,CAAEuB,QA4FgUA,CAAAA,CA5FtTvB,CAAAA,CAAEY,UA4FoTW,CA5FzSvB,CAAAA,CAAEW,SA4FuSY,CAAAA;IAA3TM;;;IAKbE,QAAAA,EA7FH/B,CAAAA,CAAES,WA6FCsB,CA7FW/B,CAAAA,CAAEQ,SA6FbuB,CAAAA;IAIsBvB;;;IAItBC,MAAAA,EAjGLT,CAAAA,CAAES,WAiGGA,CAjGST,CAAAA,CAAEQ,SAiGXC,CAAAA;IAIuBD;;;IAItBC,QAAAA,EArGJT,CAAAA,CAAES,WAqGEA,CArGUT,CAAAA,CAAEQ,SAqGZC,CAAAA;EACRC,CAAAA,EAAAA,OAAAA,EArGEV,CAAAA,CAAEU,UAqGJA,EAAAA;IAAUQ,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACJhB,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAiBJ,QAAAA,CAAAA,EAAAA,MAAAA,GAeuC0B,SAAAA;EAAgB1B,CAAAA,EAAAA;IAA/DG,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAoJK,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAOY,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAOpKhB,CAAAA,CAAAA,EAAAA;IAAiBJ,QAAAA,CAAAA,EAAAA,MAAAA,GAeuC0B,SAAAA;IAAgB1B,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAqC2B;IAApGxB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAoJK,CAAAA,EAAAA;IA7KlJK,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAS,MAAA,CAAA,EAAA,MAAA,GAAA,SAAA;IAoL5BqB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAA6B;EAmDjBE,CAAAA,CAAAA,EArLhBlC,CAAAA,CAAEsB,QAqLcY,CArLLlC,CAAAA,CAAEY,UAqL0B,CArLfZ,CAAAA,CAAEW,SAqLa,CAAA;IAAUqB;;;IASzBxB,QAAAA,EA1LdR,CAAAA,CAAES,WA0LYD,CA1LAR,CAAAA,CAAEQ,SA0LFA,CAAAA;IAAdC;;;IAKAC,MAAAA,EA3LFV,CAAAA,CAAES,WA2LAC,CA3LYV,CAAAA,CAAEQ,SA2LdE,CAAAA;IAboCC;;;IAiClCF,QAAAA,EA3MFT,CAAAA,CAAES,WA2MAA,CA3MYT,CAAAA,CAAEQ,SA2MdC,CAAAA;EAIYD,CAAAA,EAAAA,OAAAA,EA9MhBR,CAAAA,CAAEU,UA8McF,EAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAIgBD,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACFC,CAAAA,EAAAA;IAbgBC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAbC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAXU,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EA7BmBC,CAAAA,CAAAA,EAAAA;IAAdd,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IA+DmBD,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAIYD,CAAAA,EAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACgBD,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACFC,CAAAA,CAAAA,EAAAA,MAAAA,CAAAA,CAAAA,CAAAA,CAAAA;EAVqBC;;;EAAhBb,IAAAA,EAhNbE,CAAAA,CAAES,WAgNWX,CAhNCE,CAAAA,CAAEY,UA2OoGY,CA3OzFxB,CAAAA,CAAEW,SA2OuFa,CAAAA;IAAgB1B;;;IAA+DA,QAAAA,EAvO3LE,CAAAA,CAAES,WAuO0Oe,CAvO9NxB,CAAAA,CAAEQ,SAuO4NgB,CAAAA;IAAgB1B;;;IAAlNwB,MAAAA,EAnO5CtB,CAAAA,CAAES,WAmO0Ca,CAnO9BtB,CAAAA,CAAEQ,SAmO4Bc,CAAAA;IAAkRI,QAAAA,EAlO5T1B,CAAAA,CAAES,WAkO0TiB,CAlO9S1B,CAAAA,CAAEQ,SAkO4SkB,CAAAA;EAA9RC,CAAAA,EAAAA,OAAAA,EAjOhC3B,CAAAA,CAAEU,UAiO8BiB,EAAAA;IAAyTnB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAA0BA,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAboB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAzBL,CAAAA,EAAAA;IAA3TM,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdpB,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACcqB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAbC,CAAAA,CAAAA,EAAAA;IACsBvB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACMqB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdrB,CAAAA,EAAAA;IACuBD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACMD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdC,CAAAA,CAAAA,CAAAA;EAAWS;;;EAEmHA,YAAAA,EApNlIlB,CAAAA,CAAES,WAoNgIS,CApNpHlB,CAAAA,CAAE6B,WAoN0LV,CApN9KnB,CAAAA,CAAE2B,QAoN4KR,CAAAA,CApNlKnB,CAAAA,CAAEsB,QAoNgKH,CApNvJnB,CAAAA,CAAEqB,OAoNqJF,CApN7IlB,WAoN6IkB,CAlPrMrB,yBAAAA,CA8BuG0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,CAoNyCN,EApN3BnB,CAAAA,CAAEoB,UAoNyBD,EApNblB,WAoNakB,CApNfrB,yBAAAA,CAAiD0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,CAoNvFN,CAAAA,EAAAA,MAAAA,CAAAA,CAAAA,EApNgHnB,CAAAA,CAAE0B,UAoNlHP,CAAAA,EApN+HnB,CAAAA,CAAEuB,QAoNjIJ,CAAAA,CApN2InB,CAAAA,CAAEQ,SAoN7IW,EApNwJnB,CAAAA,CAAE4B,UAoN1JT,CApNqKnB,CAAAA,CAAEQ,SAoNvKW,CAAAA,CAAAA,CAAAA,CAAAA,CAAAA;EAAtEjB;;;;EAC9HJ,aAAAA,EAhNLE,CAAAA,CAAE+B,UA+NuDP,CA/N5CxB,CAAAA,CAAE8B,SA+N0CN,CAAAA;EAAgB1B;;;EAA4FoB,qBAAAA,EA3N7JlB,CAAAA,CAAES,WAiOqDU,CAjOzCnB,CAAAA,CAAEQ,SAiOuCW,CAAAA;EAAtEjB;;;EAgBiBD,aAAAA,EA7OVD,CAAAA,CAAES,WA6OQR,CA7OID,CAAAA,CAAE8B,SA6ON7B,CAAAA;EAAoJK;;;EApItCK,sBAAAA,EArG/GX,CAAAA,CAAES,WAqG6GE,CArGjGX,CAAAA,CAAEQ,SAqG+FG,CAAAA;;AAAd;;kBAjGzGX,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;YACxBR,CAAAA,CAAEU;kBACMR,uBADIgB,qCAAAA,CACkEC,4BAAAA;;;;;;;;;;;;;;;2BAe7DlB,YAfQH,yBAAAA,CAeuC0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,qCAAgDnB;;;;;;;kBAO7JJ,uBAPoKgB,qCAAAA,CAO9FC,4BAAAA;;;;;;;;;;;;;;;2BAe7DlB,YAfQH,yBAAAA,CAeuC0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,qCAAgDnB;;;;;;;KAOrK0B,6BAAAA,GAAgC7B,4BAA4Bc;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAmDhDiB,uBAAAA,UAAiCF,2DAAgFhC,CAAAA,CAAEW;WAC9HX,CAAAA,CAAES,YAAYT,CAAAA,CAAEuB,UAAUvB,CAAAA,CAAEY,WAAWZ,CAAAA,CAAEW;;;;cAIpCX,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;YAIlBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;cAIdR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;cAClBR,CAAAA,CAAEU;;;;;;;;;;;;;;;;MAgBVV,CAAAA,CAAEsB,SAAStB,CAAAA,CAAEY,WAAWZ,CAAAA,CAAEW;;;;cAIhBX,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;YAIlBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;cAIdR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;cAClBR,CAAAA,CAAEU;;;;;;;;;;;;;;;;;QAiBRV,CAAAA,CAAES,YAAYT,CAAAA,CAAEY,WAAWZ,CAAAA,CAAEW;;;;cAIrBX,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;YAIlBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;cACdR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;cAClBR,CAAAA,CAAEU;;;;;;;;;;;;;;;;;gBAiBAV,CAAAA,CAAES,YAAYT,CAAAA,CAAE6B,YAAY7B,CAAAA,CAAE2B,UAAU3B,CAAAA,CAAEsB,SAAStB,CAAAA,CAAEqB,QAAQpB,YA3BxDH,yBAAAA,CA2BuG0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,GAAczB,CAAAA,CAAEoB,YAAYnB,YAAFH,yBAAAA,CAAiD0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,cAAyBzB,CAAAA,CAAE0B,aAAa1B,CAAAA,CAAEuB,UAAUvB,CAAAA,CAAEQ,WAAWR,CAAAA,CAAE4B,WAAW5B,CAAAA,CAAEQ;iBAChXR,CAAAA,CAAE+B,WAAW/B,CAAAA,CAAE8B;yBACP9B,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;iBACxBR,CAAAA,CAAES,YAAYT,CAAAA,CAAE8B;0BACP9B,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;kBACxBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;SAEzBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEqB,QAAQnB,uBAFFgB,qCAAAA,CAEwEC,4BAAAA,GAA+BnB,CAAAA,CAAEoB,YAAYlB,uBAAFgB,qCAAAA,CAAwEC,4BAAAA;YAChNnB,CAAAA,CAAEU;;;;;;;;;;;;;;;2BAeeT,YAfLH,yBAAAA,CAeoD0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,qCAAgDnB;;;;;;UAMrKJ,uBAN4KgB,qCAAAA,CAMtGC,4BAAAA;;;;;;;;;;;;;;;;2BAgBrDlB,YAhBAH,yBAAAA,CAgB+C0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,qCAAgDnB;;;;;;UAMrKJ,uBAN4KgB,qCAAAA,CAMtGC,4BAAAA"}
1
+ {"version":3,"file":"summarization.d.cts","names":["_langchain_core_messages0","__types_js11","z","BaseMessage","BaseLanguageModel","InferInteropZodInput","DEFAULT_SUMMARY_PROMPT","TokenCounter","Promise","contextSizeSchema","ZodNumber","ZodOptional","ZodTypeAny","ZodObject","ZodEffects","ContextSize","infer","keepSchema","KeepSize","contextSchema","_langchain_core_language_models_base0","BaseLanguageModelCallOptions","ZodTypeDef","ZodType","ZodArray","ZodUnion","MessageStructure","MessageType","ZodUnknown","ZodTuple","ZodPromise","ZodFunction","ZodString","ZodDefault","SummarizationMiddlewareConfig","getProfileLimits","summarizationMiddleware","AgentMiddleware"],"sources":["../../../src/agents/middleware/summarization.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { InferInteropZodInput } from \"@langchain/core/utils/types\";\nexport declare const DEFAULT_SUMMARY_PROMPT = \"<role>\\nContext Extraction Assistant\\n</role>\\n\\n<primary_objective>\\nYour sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.\\n</primary_objective>\\n\\n<objective_information>\\nYou're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.\\nThis context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.\\n</objective_information>\\n\\n<instructions>\\nThe conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.\\nYou want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.\\n</instructions>\\n\\nThe user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:\\n\\nWith all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.\\nRespond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.\\n\\n<messages>\\nMessages to summarize:\\n{messages}\\n</messages>\";\nexport type TokenCounter = (messages: BaseMessage[]) => number | Promise<number>;\nexport declare const contextSizeSchema: z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n}, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}>;\nexport type ContextSize = z.infer<typeof contextSizeSchema>;\nexport declare const keepSchema: z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to keep\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to keep\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n messages: z.ZodOptional<z.ZodNumber>;\n}, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n}>;\nexport type KeepSize = z.infer<typeof keepSchema>;\ndeclare const contextSchema: z.ZodObject<{\n /**\n * Model to use for summarization\n */\n model: z.ZodType<string | BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, string | BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>;\n /**\n * Trigger conditions for summarization.\n * Can be a single condition object (all properties must be met) or an array of conditions (any condition must be met).\n *\n * @example\n * ```ts\n * // Single condition: trigger if tokens >= 5000 AND messages >= 3\n * trigger: { tokens: 5000, messages: 3 }\n *\n * // Multiple conditions: trigger if (tokens >= 5000 AND messages >= 3) OR (tokens >= 3000 AND messages >= 6)\n * trigger: [\n * { tokens: 5000, messages: 3 },\n * { tokens: 3000, messages: 6 }\n * ]\n * ```\n */\n trigger: z.ZodOptional<z.ZodUnion<[z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, z.ZodArray<z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, \"many\">]>>;\n /**\n * Keep conditions for summarization\n */\n keep: z.ZodOptional<z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to keep\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to keep\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>>;\n /**\n * Token counter function to use for summarization\n */\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodType<BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>, z.ZodTypeDef, BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>>, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n /**\n * Summary prompt to use for summarization\n * @default {@link DEFAULT_SUMMARY_PROMPT}\n */\n summaryPrompt: z.ZodDefault<z.ZodString>;\n /**\n * Number of tokens to trim to before summarizing\n */\n trimTokensToSummarize: z.ZodOptional<z.ZodNumber>;\n /**\n * Prefix to add to the summary\n */\n summaryPrefix: z.ZodOptional<z.ZodString>;\n /**\n * @deprecated Use `trigger: { tokens: value }` instead.\n */\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n /**\n * @deprecated Use `keep: { messages: value }` instead.\n */\n messagesToKeep: z.ZodOptional<z.ZodNumber>;\n}, \"strip\", z.ZodTypeAny, {\n model: string | BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n trigger?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }[] | {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n keep?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n tokenCounter?: ((args_0: BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n trimTokensToSummarize?: number | undefined;\n summaryPrefix?: string | undefined;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n}, {\n model: string | BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n trigger?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }[] | {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n keep?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n tokenCounter?: ((args_0: BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n trimTokensToSummarize?: number | undefined;\n summaryPrefix?: string | undefined;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n}>;\nexport type SummarizationMiddlewareConfig = InferInteropZodInput<typeof contextSchema>;\n/**\n * Get max input tokens from model profile or fallback to model name lookup\n */\nexport declare function getProfileLimits(input: BaseLanguageModel): number | undefined;\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain\";\n * import { createAgent } from \"langchain\";\n *\n * // Single condition: trigger if tokens >= 4000 AND messages >= 10\n * const agent1 = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middleware: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * trigger: { tokens: 4000, messages: 10 },\n * keep: { messages: 20 },\n * })\n * ],\n * });\n *\n * // Multiple conditions: trigger if (tokens >= 5000 AND messages >= 3) OR (tokens >= 3000 AND messages >= 6)\n * const agent2 = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middleware: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * trigger: [\n * { tokens: 5000, messages: 3 },\n * { tokens: 3000, messages: 6 },\n * ],\n * keep: { messages: 20 },\n * })\n * ],\n * });\n *\n * ```\n */\nexport declare function summarizationMiddleware(options: SummarizationMiddlewareConfig): import(\"./types.js\").AgentMiddleware<undefined, z.ZodObject<{\n trigger: z.ZodOptional<z.ZodUnion<[z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, z.ZodArray<z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of messages to use as the trigger\n */\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, \"many\">]>>;\n keep: z.ZodOptional<z.ZodEffects<z.ZodObject<{\n /**\n * Fraction of the model's context size to keep\n */\n fraction: z.ZodOptional<z.ZodNumber>;\n /**\n * Number of tokens to keep\n */\n tokens: z.ZodOptional<z.ZodNumber>;\n messages: z.ZodOptional<z.ZodNumber>;\n }, \"strip\", z.ZodTypeAny, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }, {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }>>;\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodType<BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>, z.ZodTypeDef, BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>>, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n summaryPrompt: z.ZodDefault<z.ZodString>;\n trimTokensToSummarize: z.ZodOptional<z.ZodNumber>;\n summaryPrefix: z.ZodOptional<z.ZodString>;\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n messagesToKeep: z.ZodOptional<z.ZodNumber>;\n} & {\n model: z.ZodOptional<z.ZodType<BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>>;\n}, \"strip\", z.ZodTypeAny, {\n trigger?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }[] | {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n keep?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n tokenCounter?: ((args_0: BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n trimTokensToSummarize?: number | undefined;\n summaryPrefix?: string | undefined;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n model?: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions> | undefined;\n}, {\n trigger?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n }[] | {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n keep?: {\n fraction?: number | undefined;\n tokens?: number | undefined;\n messages?: number | undefined;\n } | undefined;\n tokenCounter?: ((args_0: BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n trimTokensToSummarize?: number | undefined;\n summaryPrefix?: string | undefined;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n model?: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions> | undefined;\n}>, any>;\nexport {};\n//# sourceMappingURL=summarization.d.ts.map"],"mappings":";;;;;;;;;KAKYO,YAAAA,cAA0BJ,2BAA2BK;cAC5CC,mBAAmBP,CAAAA,CAAEY,WAAWZ,CAAAA,CAAEW;;;AADvD;EACqBJ,QAAAA,EAIPP,CAAAA,CAAES,WAyBd,CAzB0BT,CAAAA,CAAEQ,SAyB5B,CAAA;EAzB4BA;;;EAIhBC,MAAAA,EAAFT,CAAAA,CAAES,WAAAA,CAAYT,CAAAA,CAAEQ,SAAdC,CAAAA;EAIgBD;;;EAZyBG,QAAAA,EAYzCX,CAAAA,CAAES,WAZuCE,CAY3BX,CAAAA,CAAEQ,SAZyBG,CAAAA;CAAfX,EAAEY,OAAAA,EAa9BZ,CAAAA,CAAEU,UAb4BE,EAAAA;EAAU,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EA8BxCC,MAAAA,CAAAA,EAAAA,MAAW,GAAA,SAAkBN;EACpBQ,QAAAA,CAAAA,EAAAA,MA0BnB,GAAA,SAAA;CAtB0Bf,EAAEQ;EAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAIYD,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CACcT,CAAAA,EAAEQ;EAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACFC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAVkCC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAfX,EAAEY;EAAU,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EA2BjCI,MAAAA,CAAAA,EAAAA,MAAQ,GAAA,SAAkBD;EACxBE,QAAAA,CAAAA,EAAAA,MAmLZ,GAAA,SAAA;CAAAC,CAAAA;AA/K4BhB,KAjClBW,WAAAA,GAAcb,CAAAA,CAAEc,KAiCEZ,CAAAA,OAjCWK,iBAiCXL,CAAAA;AAAuGkB,cAhChHL,UAgCgHK,EAhCpGpB,CAAAA,CAAEY,UAgCkGQ,CAhCvFpB,CAAAA,CAAEW,SAgCqFS,CAAAA;EAAUF;;;EAqB7GV,QAAAA,EAjDpBR,CAAAA,CAAES,WAiDkBD,CAjDNR,CAAAA,CAAEQ,SAiDIA,CAAAA;EAAdC;;;EAQcD,MAAAA,EArDtBR,CAAAA,CAAES,WAqDoBD,CArDRR,CAAAA,CAAEQ,SAqDMA,CAAAA;EAAdC,QAAAA,EApDNT,CAAAA,CAAES,WAoDIA,CApDQT,CAAAA,CAAEQ,SAoDVC,CAAAA;CACJT,EAAEU,OAAAA,EApDNV,CAAAA,CAAEU,UAoDIA,EAAAA;EAboCC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAbC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAiCPJ,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAhBR,EAAES;EAIYD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAIgBD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAhBR,CAAAA,EAAES;EACFC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAbgBC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAbC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAbZ,EAAEsB;EA7BmBC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdd,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAkEmBD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAhBR,CAAAA;AAIcQ,KA5FpBQ,QAAAA,GAAWhB,CAAAA,CAAEc,KA4FON,CAAAA,OA5FMO,UA4FNP,CAAAA;cA3FlBS,aA2FIR,EA3FWT,CAAAA,CAAEW,SA2FbF,CAAAA;EACgBD;;;EATKG,KAAAA,EA/E5BX,CAAAA,CAAEqB,OA+E0BV,CAAAA,MAAAA,GA/ETT,iBA+ESS,CAAAA,GAAAA,EAgGrCO,qCAAAA,CA/KkGC,4BAAAA,CA+E7DR,EA/E4FX,CAAAA,CAAEoB,UA+E9FT,EAAAA,MAAAA,GA/EmHT,iBA+EnHS,CAAAA,GAAAA,EA/EwGO,qCAAAA,CAAiFC,4BAAAA,CA+EzLR,CAAAA;EAAbC;;;;;;;;;;;;;;;;EA8BmUW,OAAAA,EA5FhVvB,CAAAA,CAAES,WA4F8Uc,CA5FlUvB,CAAAA,CAAEuB,QA4FgUA,CAAAA,CA5FtTvB,CAAAA,CAAEY,UA4FoTW,CA5FzSvB,CAAAA,CAAEW,SA4FuSY,CAAAA;IAA3TM;;;IAKbE,QAAAA,EA7FH/B,CAAAA,CAAES,WA6FCsB,CA7FW/B,CAAAA,CAAEQ,SA6FbuB,CAAAA;IAIsBvB;;;IAItBC,MAAAA,EAjGLT,CAAAA,CAAES,WAiGGA,CAjGST,CAAAA,CAAEQ,SAiGXC,CAAAA;IAIuBD;;;IAItBC,QAAAA,EArGJT,CAAAA,CAAES,WAqGEA,CArGUT,CAAAA,CAAEQ,SAqGZC,CAAAA;EACRC,CAAAA,EAAAA,OAAAA,EArGEV,CAAAA,CAAEU,UAqGJA,EAAAA;IAAUQ,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACJhB,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAiBJ,QAAAA,CAAAA,EAAAA,MAAAA,GAeuC0B,SAAAA;EAAgB1B,CAAAA,EAAAA;IAA/DG,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAoJK,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAOY,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAOpKhB,CAAAA,CAAAA,EAAAA;IAAiBJ,QAAAA,CAAAA,EAAAA,MAAAA,GAeuC0B,SAAAA;IAAgB1B,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAqC2B;IAApGxB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAoJK,CAAAA,EAAAA;IA7KlJK,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAS,MAAA,CAAA,EAAA,MAAA,GAAA,SAAA;IAoL5BqB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAA6B;EAmDjBE,CAAAA,CAAAA,EArLhBlC,CAAAA,CAAEsB,QAqLcY,CArLLlC,CAAAA,CAAEY,UAqL0B,CArLfZ,CAAAA,CAAEW,SAqLa,CAAA;IAAUqB;;;IASzBxB,QAAAA,EA1LdR,CAAAA,CAAES,WA0LYD,CA1LAR,CAAAA,CAAEQ,SA0LFA,CAAAA;IAAdC;;;IAKAC,MAAAA,EA3LFV,CAAAA,CAAES,WA2LAC,CA3LYV,CAAAA,CAAEQ,SA2LdE,CAAAA;IAboCC;;;IAiClCF,QAAAA,EA3MFT,CAAAA,CAAES,WA2MAA,CA3MYT,CAAAA,CAAEQ,SA2MdC,CAAAA;EAIYD,CAAAA,EAAAA,OAAAA,EA9MhBR,CAAAA,CAAEU,UA8McF,EAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAIgBD,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACFC,CAAAA,EAAAA;IAbgBC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAbC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAXU,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EA7BmBC,CAAAA,CAAAA,EAAAA;IAAdd,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IA+DmBD,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAIYD,CAAAA,EAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACgBD,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACFC,CAAAA,CAAAA,EAAAA,MAAAA,CAAAA,CAAAA,CAAAA,CAAAA;EAVqBC;;;EAAhBb,IAAAA,EAhNbE,CAAAA,CAAES,WAgNWX,CAhNCE,CAAAA,CAAEY,UA2OoGY,CA3OzFxB,CAAAA,CAAEW,SA2OuFa,CAAAA;IAAgB1B;;;IAA+DA,QAAAA,EAvO3LE,CAAAA,CAAES,WAuO0Oe,CAvO9NxB,CAAAA,CAAEQ,SAuO4NgB,CAAAA;IAAgB1B;;;IAAlNwB,MAAAA,EAnO5CtB,CAAAA,CAAES,WAmO0Ca,CAnO9BtB,CAAAA,CAAEQ,SAmO4Bc,CAAAA;IAAkRI,QAAAA,EAlO5T1B,CAAAA,CAAES,WAkO0TiB,CAlO9S1B,CAAAA,CAAEQ,SAkO4SkB,CAAAA;EAA9RC,CAAAA,EAAAA,OAAAA,EAjOhC3B,CAAAA,CAAEU,UAiO8BiB,EAAAA;IAAyTnB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAA0BA,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAboB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAzBL,CAAAA,EAAAA;IAA3TM,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdpB,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACcqB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAbC,CAAAA,CAAAA,EAAAA;IACsBvB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACMqB,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdrB,CAAAA,EAAAA;IACuBD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IAAdC,MAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;IACMD,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAdC,CAAAA,CAAAA,CAAAA;EAAWS;;;EAEmHA,YAAAA,EApNlIlB,CAAAA,CAAES,WAoNgIS,CApNpHlB,CAAAA,CAAE6B,WAoN0LV,CApN9KnB,CAAAA,CAAE2B,QAoN4KR,CAAAA,CApNlKnB,CAAAA,CAAEsB,QAoNgKH,CApNvJnB,CAAAA,CAAEqB,OAoNqJF,CApN7IlB,WAoN6IkB,CAlPrMrB,yBAAAA,CA8BuG0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,CAoNyCN,EApN3BnB,CAAAA,CAAEoB,UAoNyBD,EApNblB,WAoNakB,CApNfrB,yBAAAA,CAAiD0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,CAoNvFN,CAAAA,EAAAA,MAAAA,CAAAA,CAAAA,EApNgHnB,CAAAA,CAAE0B,UAoNlHP,CAAAA,EApN+HnB,CAAAA,CAAEuB,QAoNjIJ,CAAAA,CApN2InB,CAAAA,CAAEQ,SAoN7IW,EApNwJnB,CAAAA,CAAE4B,UAoN1JT,CApNqKnB,CAAAA,CAAEQ,SAoNvKW,CAAAA,CAAAA,CAAAA,CAAAA,CAAAA;EAAtEjB;;;;EAC9HJ,aAAAA,EAhNLE,CAAAA,CAAE+B,UA+NuDP,CA/N5CxB,CAAAA,CAAE8B,SA+N0CN,CAAAA;EAAgB1B;;;EAA4FoB,qBAAAA,EA3N7JlB,CAAAA,CAAES,WAiOqDU,CAjOzCnB,CAAAA,CAAEQ,SAiOuCW,CAAAA;EAAtEjB;;;EAgBiBD,aAAAA,EA7OVD,CAAAA,CAAES,WA6OQR,CA7OID,CAAAA,CAAE8B,SA6ON7B,CAAAA;EAAoJK;;;EApItCK,sBAAAA,EArG/GX,CAAAA,CAAES,WAqG6GE,CArGjGX,CAAAA,CAAEQ,SAqG+FG,CAAAA;;AAAd;;kBAjGzGX,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;YACxBR,CAAAA,CAAEU;kBACMR,uBADIgB,qCAAAA,CACkEC,4BAAAA;;;;;;;;;;;;;;;2BAe7DlB,YAfQH,yBAAAA,CAeuC0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,qCAAgDnB;;;;;;;kBAO7JJ,uBAPoKgB,qCAAAA,CAO9FC,4BAAAA;;;;;;;;;;;;;;;2BAe7DlB,YAfQH,yBAAAA,CAeuC0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,qCAAgDnB;;;;;;;KAOrK0B,6BAAAA,GAAgC7B,4BAA4Bc;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAmDhDiB,uBAAAA,UAAiCF,2DAAgFhC,CAAAA,CAAEW;WAC9HX,CAAAA,CAAES,YAAYT,CAAAA,CAAEuB,UAAUvB,CAAAA,CAAEY,WAAWZ,CAAAA,CAAEW;;;;cAIpCX,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;YAIlBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;cAIdR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;cAClBR,CAAAA,CAAEU;;;;;;;;;;;;;;;;MAgBVV,CAAAA,CAAEsB,SAAStB,CAAAA,CAAEY,WAAWZ,CAAAA,CAAEW;;;;cAIhBX,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;YAIlBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;cAIdR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;cAClBR,CAAAA,CAAEU;;;;;;;;;;;;;;;;;QAiBRV,CAAAA,CAAES,YAAYT,CAAAA,CAAEY,WAAWZ,CAAAA,CAAEW;;;;cAIrBX,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;;;YAIlBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;cACdR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;cAClBR,CAAAA,CAAEU;;;;;;;;;;;;;;;;;gBAiBAV,CAAAA,CAAES,YAAYT,CAAAA,CAAE6B,YAAY7B,CAAAA,CAAE2B,UAAU3B,CAAAA,CAAEsB,SAAStB,CAAAA,CAAEqB,QAAQpB,YA3BxDH,yBAAAA,CA2BuG0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,GAAczB,CAAAA,CAAEoB,YAAYnB,YAAFH,yBAAAA,CAAiD0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,cAAyBzB,CAAAA,CAAE0B,aAAa1B,CAAAA,CAAEuB,UAAUvB,CAAAA,CAAEQ,WAAWR,CAAAA,CAAE4B,WAAW5B,CAAAA,CAAEQ;iBAChXR,CAAAA,CAAE+B,WAAW/B,CAAAA,CAAE8B;yBACP9B,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;iBACxBR,CAAAA,CAAES,YAAYT,CAAAA,CAAE8B;0BACP9B,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;kBACxBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;;SAEzBR,CAAAA,CAAES,YAAYT,CAAAA,CAAEqB,QAAQnB,uBAFFgB,qCAAAA,CAEwEC,4BAAAA,GAA+BnB,CAAAA,CAAEoB,YAAYlB,uBAAFgB,qCAAAA,CAAwEC,4BAAAA;YAChNnB,CAAAA,CAAEU;;;;;;;;;;;;;;;2BAeeT,YAfLH,yBAAAA,CAeoD0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,qCAAgDnB;;;;;;UAMrKJ,uBAN4KgB,qCAAAA,CAMtGC,4BAAAA;;;;;;;;;;;;;;;;2BAgBrDlB,YAhBAH,yBAAAA,CAgB+C0B,gBAAAA,EAAgB1B,yBAAAA,CAAqC2B,WAAAA,qCAAgDnB;;;;;;UAMrKJ,uBAN4KgB,qCAAAA,CAMtGC,4BAAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"toolCallLimit.d.cts","names":["z","InferInteropZodInput","ToolCallLimitExceededError","Error","ToolCallLimitOptionsSchema","ZodString","ZodOptional","ZodNumber","ZodEnum","ZodDefault","ZodTypeAny","ZodObject","ToolCallLimitConfig","toolCallLimitMiddleware","ZodRecord","Record","__types_js11","AgentMiddleware"],"sources":["../../../src/agents/middleware/toolCallLimit.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport type { InferInteropZodInput } from \"@langchain/core/utils/types\";\n/**\n * Exception raised when tool call limits are exceeded.\n *\n * This exception is raised when the configured exit behavior is 'error'\n * and either the thread or run tool call limit has been exceeded.\n */\nexport declare class ToolCallLimitExceededError extends Error {\n /**\n * Current thread tool call count.\n */\n threadCount: number;\n /**\n * Current run tool call count.\n */\n runCount: number;\n /**\n * Thread tool call limit (if set).\n */\n threadLimit: number | undefined;\n /**\n * Run tool call limit (if set).\n */\n runLimit: number | undefined;\n /**\n * Tool name being limited (if specific tool), or undefined for all tools.\n */\n toolName: string | undefined;\n constructor(threadCount: number, runCount: number, threadLimit: number | undefined, runLimit: number | undefined, toolName?: string | undefined);\n}\n/**\n * Options for configuring the Tool Call Limit middleware.\n */\nexport declare const ToolCallLimitOptionsSchema: z.ZodObject<{\n /**\n * Name of the specific tool to limit. If undefined, limits apply to all tools.\n */\n toolName: z.ZodOptional<z.ZodString>;\n /**\n * Maximum number of tool calls allowed per thread.\n * undefined means no limit.\n */\n threadLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * Maximum number of tool calls allowed per run.\n * undefined means no limit.\n */\n runLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * What to do when limits are exceeded.\n * - \"continue\": Block exceeded tools with error messages, let other tools continue (default)\n * - \"error\": Raise a ToolCallLimitExceededError exception\n * - \"end\": Stop execution immediately, injecting a ToolMessage and an AI message\n * for the single tool call that exceeded the limit. Raises NotImplementedError\n * if there are multiple tool calls.\n *\n * @default \"continue\"\n */\n exitBehavior: z.ZodDefault<z.ZodEnum<[\"continue\", \"error\", \"end\"]>>;\n}, \"strip\", z.ZodTypeAny, {\n toolName?: string | undefined;\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior: \"continue\" | \"end\" | \"error\";\n}, {\n toolName?: string | undefined;\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"continue\" | \"end\" | \"error\" | undefined;\n}>;\nexport type ToolCallLimitConfig = InferInteropZodInput<typeof ToolCallLimitOptionsSchema>;\n/**\n * Middleware that tracks tool call counts and enforces limits.\n *\n * This middleware monitors the number of tool calls made during agent execution\n * and can terminate the agent when specified limits are reached. It supports\n * both thread-level and run-level call counting with configurable exit behaviors.\n *\n * Thread-level: The middleware counts all tool calls in the entire message history\n * and persists this count across multiple runs (invocations) of the agent.\n *\n * Run-level: The middleware counts tool calls made after the last HumanMessage,\n * representing the current run (invocation) of the agent.\n *\n * @param options - Configuration options for the middleware\n * @param options.toolName - Name of the specific tool to limit. If undefined, limits apply to all tools.\n * @param options.threadLimit - Maximum number of tool calls allowed per thread. undefined means no limit.\n * @param options.runLimit - Maximum number of tool calls allowed per run. undefined means no limit.\n * @param options.exitBehavior - What to do when limits are exceeded.\n * - \"continue\": Block exceeded tools with error messages, let other tools continue. Model decides when to end. (default)\n * - \"error\": Raise a ToolCallLimitExceededError exception\n * - \"end\": Stop execution immediately with a ToolMessage + AI message for the single tool call that exceeded the limit. Raises NotImplementedError if there are multiple tool calls.\n *\n * @throws {Error} If both limits are undefined, if exitBehavior is invalid, or if runLimit exceeds threadLimit.\n * @throws {NotImplementedError} If exitBehavior is \"end\" and there are multiple tool calls.\n *\n * @example Continue execution with blocked tools (default)\n * ```ts\n * import { toolCallLimitMiddleware } from \"@langchain/langchain/agents/middleware\";\n * import { createAgent } from \"@langchain/langchain/agents\";\n *\n * // Block exceeded tools but let other tools and model continue\n * const limiter = toolCallLimitMiddleware({\n * threadLimit: 20,\n * runLimit: 10,\n * exitBehavior: \"continue\", // default\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [limiter]\n * });\n * ```\n *\n * @example Stop immediately when limit exceeded\n * ```ts\n * // End execution immediately with an AI message\n * const limiter = toolCallLimitMiddleware({\n * runLimit: 5,\n * exitBehavior: \"end\"\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [limiter]\n * });\n * ```\n *\n * @example Raise exception on limit\n * ```ts\n * // Strict limit with exception handling\n * const limiter = toolCallLimitMiddleware({\n * toolName: \"search\",\n * threadLimit: 5,\n * exitBehavior: \"error\"\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [limiter]\n * });\n *\n * try {\n * const result = await agent.invoke({ messages: [new HumanMessage(\"Task\")] });\n * } catch (error) {\n * if (error instanceof ToolCallLimitExceededError) {\n * console.log(`Search limit exceeded: ${error}`);\n * }\n * }\n * ```\n */\nexport declare function toolCallLimitMiddleware(options: ToolCallLimitConfig): import(\"./types.js\").AgentMiddleware<z.ZodObject<{\n threadToolCallCount: z.ZodDefault<z.ZodRecord<z.ZodString, z.ZodNumber>>;\n runToolCallCount: z.ZodDefault<z.ZodRecord<z.ZodString, z.ZodNumber>>;\n}, \"strip\", z.ZodTypeAny, {\n threadToolCallCount: Record<string, number>;\n runToolCallCount: Record<string, number>;\n}, {\n threadToolCallCount?: Record<string, number> | undefined;\n runToolCallCount?: Record<string, number> | undefined;\n}>, undefined, any>;\n//# sourceMappingURL=toolCallLimit.d.ts.map"],"mappings":";;;;;;;;;AAQA;AA0BA;;AAIgBM,cA9BKJ,0BAAAA,SAAmCC,KAAK,CA8B7CG;EAKiBC;;;EAKjBD,WAAAA,EAAAA,MAAAA;EAWiBE;;;EAzBkBG,QAAAA,EAAAA,MAAAA;EAAS;AAqC5D;AAiFA;EAAyDC,WAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACLP;;;EAAzBI,QAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACsBJ;;;EAAzBI,QAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACVC,WAAAA,CAAAA,WAAAA,EAAAA,MAAAA,EAAAA,QAAAA,EAAAA,MAAAA,EAAAA,WAAAA,EAAAA,MAAAA,GAAAA,SAAAA,EAAAA,QAAAA,EAAAA,MAAAA,GAAAA,SAAAA,EAAAA,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;;;;;AAHwGC,cAtHjGP,0BAsHiGO,EAtHrEX,CAAAA,CAAEW,SAsHmEA,CAAAA;;AAAH;;YAlHrGX,CAAAA,CAAEM,YAAYN,CAAAA,CAAEK;;;;;eAKbL,CAAAA,CAAEM,YAAYN,CAAAA,CAAEO;;;;;YAKnBP,CAAAA,CAAEM,YAAYN,CAAAA,CAAEO;;;;;;;;;;;gBAWZP,CAAAA,CAAES,WAAWT,CAAAA,CAAEQ;YACrBR,CAAAA,CAAEU;;;;;;;;;;;KAWFE,mBAAAA,GAAsBX,4BAA4BG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAiFtCS,uBAAAA,UAAiCD,sCAA2DZ,CAAAA,CAAEW;uBAC7FX,CAAAA,CAAES,WAAWT,CAAAA,CAAEc,UAAUd,CAAAA,CAAEK,WAAWL,CAAAA,CAAEO;oBAC3CP,CAAAA,CAAES,WAAWT,CAAAA,CAAEc,UAAUd,CAAAA,CAAEK,WAAWL,CAAAA,CAAEO;YAClDP,CAAAA,CAAEU;uBACWK;oBACHA;;wBAEIA;qBACHA"}
1
+ {"version":3,"file":"toolCallLimit.d.cts","names":["z","InferInteropZodInput","ToolCallLimitExceededError","Error","ToolCallLimitOptionsSchema","ZodString","ZodOptional","ZodNumber","ZodEnum","ZodDefault","ZodTypeAny","ZodObject","ToolCallLimitConfig","toolCallLimitMiddleware","ZodRecord","Record","__types_js10","AgentMiddleware"],"sources":["../../../src/agents/middleware/toolCallLimit.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport type { InferInteropZodInput } from \"@langchain/core/utils/types\";\n/**\n * Exception raised when tool call limits are exceeded.\n *\n * This exception is raised when the configured exit behavior is 'error'\n * and either the thread or run tool call limit has been exceeded.\n */\nexport declare class ToolCallLimitExceededError extends Error {\n /**\n * Current thread tool call count.\n */\n threadCount: number;\n /**\n * Current run tool call count.\n */\n runCount: number;\n /**\n * Thread tool call limit (if set).\n */\n threadLimit: number | undefined;\n /**\n * Run tool call limit (if set).\n */\n runLimit: number | undefined;\n /**\n * Tool name being limited (if specific tool), or undefined for all tools.\n */\n toolName: string | undefined;\n constructor(threadCount: number, runCount: number, threadLimit: number | undefined, runLimit: number | undefined, toolName?: string | undefined);\n}\n/**\n * Options for configuring the Tool Call Limit middleware.\n */\nexport declare const ToolCallLimitOptionsSchema: z.ZodObject<{\n /**\n * Name of the specific tool to limit. If undefined, limits apply to all tools.\n */\n toolName: z.ZodOptional<z.ZodString>;\n /**\n * Maximum number of tool calls allowed per thread.\n * undefined means no limit.\n */\n threadLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * Maximum number of tool calls allowed per run.\n * undefined means no limit.\n */\n runLimit: z.ZodOptional<z.ZodNumber>;\n /**\n * What to do when limits are exceeded.\n * - \"continue\": Block exceeded tools with error messages, let other tools continue (default)\n * - \"error\": Raise a ToolCallLimitExceededError exception\n * - \"end\": Stop execution immediately, injecting a ToolMessage and an AI message\n * for the single tool call that exceeded the limit. Raises NotImplementedError\n * if there are multiple tool calls.\n *\n * @default \"continue\"\n */\n exitBehavior: z.ZodDefault<z.ZodEnum<[\"continue\", \"error\", \"end\"]>>;\n}, \"strip\", z.ZodTypeAny, {\n toolName?: string | undefined;\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior: \"continue\" | \"end\" | \"error\";\n}, {\n toolName?: string | undefined;\n threadLimit?: number | undefined;\n runLimit?: number | undefined;\n exitBehavior?: \"continue\" | \"end\" | \"error\" | undefined;\n}>;\nexport type ToolCallLimitConfig = InferInteropZodInput<typeof ToolCallLimitOptionsSchema>;\n/**\n * Middleware that tracks tool call counts and enforces limits.\n *\n * This middleware monitors the number of tool calls made during agent execution\n * and can terminate the agent when specified limits are reached. It supports\n * both thread-level and run-level call counting with configurable exit behaviors.\n *\n * Thread-level: The middleware counts all tool calls in the entire message history\n * and persists this count across multiple runs (invocations) of the agent.\n *\n * Run-level: The middleware counts tool calls made after the last HumanMessage,\n * representing the current run (invocation) of the agent.\n *\n * @param options - Configuration options for the middleware\n * @param options.toolName - Name of the specific tool to limit. If undefined, limits apply to all tools.\n * @param options.threadLimit - Maximum number of tool calls allowed per thread. undefined means no limit.\n * @param options.runLimit - Maximum number of tool calls allowed per run. undefined means no limit.\n * @param options.exitBehavior - What to do when limits are exceeded.\n * - \"continue\": Block exceeded tools with error messages, let other tools continue. Model decides when to end. (default)\n * - \"error\": Raise a ToolCallLimitExceededError exception\n * - \"end\": Stop execution immediately with a ToolMessage + AI message for the single tool call that exceeded the limit. Raises NotImplementedError if there are multiple tool calls.\n *\n * @throws {Error} If both limits are undefined, if exitBehavior is invalid, or if runLimit exceeds threadLimit.\n * @throws {NotImplementedError} If exitBehavior is \"end\" and there are multiple tool calls.\n *\n * @example Continue execution with blocked tools (default)\n * ```ts\n * import { toolCallLimitMiddleware } from \"@langchain/langchain/agents/middleware\";\n * import { createAgent } from \"@langchain/langchain/agents\";\n *\n * // Block exceeded tools but let other tools and model continue\n * const limiter = toolCallLimitMiddleware({\n * threadLimit: 20,\n * runLimit: 10,\n * exitBehavior: \"continue\", // default\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [limiter]\n * });\n * ```\n *\n * @example Stop immediately when limit exceeded\n * ```ts\n * // End execution immediately with an AI message\n * const limiter = toolCallLimitMiddleware({\n * runLimit: 5,\n * exitBehavior: \"end\"\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [limiter]\n * });\n * ```\n *\n * @example Raise exception on limit\n * ```ts\n * // Strict limit with exception handling\n * const limiter = toolCallLimitMiddleware({\n * toolName: \"search\",\n * threadLimit: 5,\n * exitBehavior: \"error\"\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [limiter]\n * });\n *\n * try {\n * const result = await agent.invoke({ messages: [new HumanMessage(\"Task\")] });\n * } catch (error) {\n * if (error instanceof ToolCallLimitExceededError) {\n * console.log(`Search limit exceeded: ${error}`);\n * }\n * }\n * ```\n */\nexport declare function toolCallLimitMiddleware(options: ToolCallLimitConfig): import(\"./types.js\").AgentMiddleware<z.ZodObject<{\n threadToolCallCount: z.ZodDefault<z.ZodRecord<z.ZodString, z.ZodNumber>>;\n runToolCallCount: z.ZodDefault<z.ZodRecord<z.ZodString, z.ZodNumber>>;\n}, \"strip\", z.ZodTypeAny, {\n threadToolCallCount: Record<string, number>;\n runToolCallCount: Record<string, number>;\n}, {\n threadToolCallCount?: Record<string, number> | undefined;\n runToolCallCount?: Record<string, number> | undefined;\n}>, undefined, any>;\n//# sourceMappingURL=toolCallLimit.d.ts.map"],"mappings":";;;;;;;;;AAQA;AA0BA;;AAIgBM,cA9BKJ,0BAAAA,SAAmCC,KAAK,CA8B7CG;EAKiBC;;;EAKjBD,WAAAA,EAAAA,MAAAA;EAWiBE;;;EAzBkBG,QAAAA,EAAAA,MAAAA;EAAS;AAqC5D;AAiFA;EAAyDC,WAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACLP;;;EAAzBI,QAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACsBJ;;;EAAzBI,QAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EACVC,WAAAA,CAAAA,WAAAA,EAAAA,MAAAA,EAAAA,QAAAA,EAAAA,MAAAA,EAAAA,WAAAA,EAAAA,MAAAA,GAAAA,SAAAA,EAAAA,QAAAA,EAAAA,MAAAA,GAAAA,SAAAA,EAAAA,QAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;;;;;AAHwGC,cAtHjGP,0BAsHiGO,EAtHrEX,CAAAA,CAAEW,SAsHmEA,CAAAA;;AAAH;;YAlHrGX,CAAAA,CAAEM,YAAYN,CAAAA,CAAEK;;;;;eAKbL,CAAAA,CAAEM,YAAYN,CAAAA,CAAEO;;;;;YAKnBP,CAAAA,CAAEM,YAAYN,CAAAA,CAAEO;;;;;;;;;;;gBAWZP,CAAAA,CAAES,WAAWT,CAAAA,CAAEQ;YACrBR,CAAAA,CAAEU;;;;;;;;;;;KAWFE,mBAAAA,GAAsBX,4BAA4BG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAiFtCS,uBAAAA,UAAiCD,sCAA2DZ,CAAAA,CAAEW;uBAC7FX,CAAAA,CAAES,WAAWT,CAAAA,CAAEc,UAAUd,CAAAA,CAAEK,WAAWL,CAAAA,CAAEO;oBAC3CP,CAAAA,CAAES,WAAWT,CAAAA,CAAEc,UAAUd,CAAAA,CAAEK,WAAWL,CAAAA,CAAEO;YAClDP,CAAAA,CAAEU;uBACWK;oBACHA;;wBAEIA;qBACHA"}
package/dist/index.cjs CHANGED
@@ -26,6 +26,7 @@ const require_promptCaching = require('./agents/middleware/provider/anthropic/pr
26
26
  require('./agents/middleware/index.cjs');
27
27
  const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
28
28
  const __langchain_core_tools = require_rolldown_runtime.__toESM(require("@langchain/core/tools"));
29
+ const __langchain_core_utils_context = require_rolldown_runtime.__toESM(require("@langchain/core/utils/context"));
29
30
  const __langchain_core_stores = require_rolldown_runtime.__toESM(require("@langchain/core/stores"));
30
31
  const __langchain_core_documents = require_rolldown_runtime.__toESM(require("@langchain/core/documents"));
31
32
 
@@ -62,6 +63,7 @@ require_rolldown_runtime.__export(src_exports, {
62
63
  ToolStrategy: () => require_responses.ToolStrategy,
63
64
  anthropicPromptCachingMiddleware: () => require_promptCaching.anthropicPromptCachingMiddleware,
64
65
  applyStrategy: () => require_pii.applyStrategy,
66
+ context: () => __langchain_core_utils_context.context,
65
67
  contextEditingMiddleware: () => require_contextEditing.contextEditingMiddleware,
66
68
  countTokensApproximately: () => require_utils.countTokensApproximately,
67
69
  createAgent: () => require_index.createAgent,
@@ -205,6 +207,12 @@ Object.defineProperty(exports, 'ToolMessageChunk', {
205
207
  exports.ToolStrategy = require_responses.ToolStrategy;
206
208
  exports.anthropicPromptCachingMiddleware = require_promptCaching.anthropicPromptCachingMiddleware;
207
209
  exports.applyStrategy = require_pii.applyStrategy;
210
+ Object.defineProperty(exports, 'context', {
211
+ enumerable: true,
212
+ get: function () {
213
+ return __langchain_core_utils_context.context;
214
+ }
215
+ });
208
216
  exports.contextEditingMiddleware = require_contextEditing.contextEditingMiddleware;
209
217
  exports.countTokensApproximately = require_utils.countTokensApproximately;
210
218
  exports.createAgent = require_index.createAgent;
@@ -1 +1 @@
1
- {"version":3,"file":"index.cjs","names":[],"sources":["../src/index.ts"],"sourcesContent":["/**\n * LangChain Messages\n */\nexport {\n BaseMessage,\n BaseMessageChunk,\n AIMessage,\n AIMessageChunk,\n SystemMessage,\n SystemMessageChunk,\n HumanMessage,\n HumanMessageChunk,\n ToolMessage,\n ToolMessageChunk,\n type ContentBlock,\n filterMessages,\n trimMessages,\n} from \"@langchain/core/messages\";\n\n/**\n * Universal Chat Model\n */\nexport { initChatModel } from \"./chat_models/universal.js\";\n\n/**\n * LangChain Tools\n */\nexport {\n tool,\n Tool,\n type ToolRuntime,\n DynamicTool,\n StructuredTool,\n DynamicStructuredTool,\n} from \"@langchain/core/tools\";\n\n/**\n * LangChain Agents\n */\nexport * from \"./agents/index.js\";\n\n/**\n * `createAgent` pre-built middleware\n */\nexport * from \"./agents/middleware/index.js\";\n\n/**\n * LangChain Stores\n */\nexport { InMemoryStore } from \"@langchain/core/stores\";\n\n/**\n * LangChain Documents\n */\nexport { type DocumentInput, Document } from \"@langchain/core/documents\";\n"],"mappings":""}
1
+ {"version":3,"file":"index.cjs","names":[],"sources":["../src/index.ts"],"sourcesContent":["/**\n * LangChain Messages\n */\nexport {\n BaseMessage,\n BaseMessageChunk,\n AIMessage,\n AIMessageChunk,\n SystemMessage,\n SystemMessageChunk,\n HumanMessage,\n HumanMessageChunk,\n ToolMessage,\n ToolMessageChunk,\n type ContentBlock,\n filterMessages,\n trimMessages,\n} from \"@langchain/core/messages\";\n\n/**\n * Universal Chat Model\n */\nexport { initChatModel } from \"./chat_models/universal.js\";\n\n/**\n * LangChain Tools\n */\nexport {\n tool,\n Tool,\n type ToolRuntime,\n DynamicTool,\n StructuredTool,\n DynamicStructuredTool,\n} from \"@langchain/core/tools\";\n\n/**\n * LangChain utilities\n */\nexport { context } from \"@langchain/core/utils/context\";\n\n/**\n * LangChain Agents\n */\nexport * from \"./agents/index.js\";\n\n/**\n * `createAgent` pre-built middleware\n */\nexport * from \"./agents/middleware/index.js\";\n\n/**\n * LangChain Stores\n */\nexport { InMemoryStore } from \"@langchain/core/stores\";\n\n/**\n * LangChain Documents\n */\nexport { type DocumentInput, Document } from \"@langchain/core/documents\";\n"],"mappings":""}
package/dist/index.d.cts CHANGED
@@ -28,6 +28,7 @@ import { PromptCachingMiddlewareConfig, anthropicPromptCachingMiddleware } from
28
28
  import { countTokensApproximately } from "./agents/middleware/utils.cjs";
29
29
  import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ContentBlock, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, filterMessages, trimMessages } from "@langchain/core/messages";
30
30
  import { DynamicStructuredTool, DynamicTool, StructuredTool, Tool, ToolRuntime, tool } from "@langchain/core/tools";
31
+ import { context } from "@langchain/core/utils/context";
31
32
  import { InMemoryStore } from "@langchain/core/stores";
32
33
  import { Document, DocumentInput } from "@langchain/core/documents";
33
- export { AIMessage, AIMessageChunk, Action, ActionRequest, AfterAgentHook, AfterModelHook, AgentMiddleware, AnyAnnotationRoot, ApproveDecision, BaseMessage, BaseMessageChunk, BeforeAgentHook, BeforeModelHook, BuiltInPIIType, BuiltInState, ClearToolUsesEdit, ClearToolUsesEditConfig, type ContentBlock, ContextEdit, ContextEditingMiddlewareConfig, CreateAgentParams, Decision, DecisionType, DescriptionFactory, Document, type DocumentInput, DynamicStructuredTool, DynamicSystemPromptMiddlewareConfig, DynamicTool, EditDecision, ExecutedToolCall, ExtractZodArrayTypes, FakeToolCallingModel, HITLRequest, HITLResponse, HumanInTheLoopMiddlewareConfig, HumanMessage, HumanMessageChunk, InMemoryStore, InferChannelType, InferContextInput, InferMergedInputState, InferMergedState, InferMiddlewareContext, InferMiddlewareContextInput, InferMiddlewareContextInputs, InferMiddlewareContexts, InferMiddlewareInputState, InferMiddlewareInputStates, InferMiddlewareState, InferMiddlewareStates, InferSchemaInput, Interrupt, InterruptOnConfig, JumpTo, JumpToTarget, LLMToolSelectorConfig, MIDDLEWARE_BRAND, MiddlewareResult, ModelCallLimitMiddlewareConfig, ModelRetryMiddlewareConfig, MultipleStructuredOutputsError, MultipleToolsBoundError, N, NormalizedSchemaInput, OpenAIModerationMiddlewareOptions, PIIDetectionError, PIIDetector, PIIMatch, PIIMiddlewareConfig, PIIRedactionMiddlewareConfig, PIIStrategy, PromptCachingMiddlewareConfig, ProviderStrategy, ReactAgent, RedactionRuleConfig, RejectDecision, ResolvedRedactionRule, ResponseFormat, ResponseFormatUndefined, ReviewConfig, Runtime, StructuredOutputParsingError, StructuredTool, SummarizationMiddlewareConfig, SystemMessage, SystemMessageChunk, TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT, ToAnnotationRoot, TodoListMiddlewareOptions, TokenCounter, Tool, ToolCall, ToolCallHandler, ToolCallLimitConfig, ToolCallLimitExceededError, ToolCallRequest, ToolEmulatorOptions, ToolInvocationError, ToolMessage, ToolMessageChunk, ToolResult, ToolRetryMiddlewareConfig, type ToolRuntime, ToolStrategy, UserInput, WithStateGraphNodes, WrapModelCallHandler, WrapModelCallHook, WrapToolCallHook, anthropicPromptCachingMiddleware, applyStrategy, contextEditingMiddleware, countTokensApproximately, createAgent, createMiddleware, detectCreditCard, detectEmail, detectIP, detectMacAddress, detectUrl, dynamicSystemPromptMiddleware, filterMessages, humanInTheLoopMiddleware, initChatModel, llmToolSelectorMiddleware, modelCallLimitMiddleware, modelFallbackMiddleware, modelRetryMiddleware, openAIModerationMiddleware, piiMiddleware, piiRedactionMiddleware, providerStrategy, resolveRedactionRule, summarizationMiddleware, todoListMiddleware, tool, toolCallLimitMiddleware, toolEmulatorMiddleware, toolRetryMiddleware, toolStrategy, trimMessages };
34
+ export { AIMessage, AIMessageChunk, Action, ActionRequest, AfterAgentHook, AfterModelHook, AgentMiddleware, AnyAnnotationRoot, ApproveDecision, BaseMessage, BaseMessageChunk, BeforeAgentHook, BeforeModelHook, BuiltInPIIType, BuiltInState, ClearToolUsesEdit, ClearToolUsesEditConfig, type ContentBlock, ContextEdit, ContextEditingMiddlewareConfig, CreateAgentParams, Decision, DecisionType, DescriptionFactory, Document, type DocumentInput, DynamicStructuredTool, DynamicSystemPromptMiddlewareConfig, DynamicTool, EditDecision, ExecutedToolCall, ExtractZodArrayTypes, FakeToolCallingModel, HITLRequest, HITLResponse, HumanInTheLoopMiddlewareConfig, HumanMessage, HumanMessageChunk, InMemoryStore, InferChannelType, InferContextInput, InferMergedInputState, InferMergedState, InferMiddlewareContext, InferMiddlewareContextInput, InferMiddlewareContextInputs, InferMiddlewareContexts, InferMiddlewareInputState, InferMiddlewareInputStates, InferMiddlewareState, InferMiddlewareStates, InferSchemaInput, Interrupt, InterruptOnConfig, JumpTo, JumpToTarget, LLMToolSelectorConfig, MIDDLEWARE_BRAND, MiddlewareResult, ModelCallLimitMiddlewareConfig, ModelRetryMiddlewareConfig, MultipleStructuredOutputsError, MultipleToolsBoundError, N, NormalizedSchemaInput, OpenAIModerationMiddlewareOptions, PIIDetectionError, PIIDetector, PIIMatch, PIIMiddlewareConfig, PIIRedactionMiddlewareConfig, PIIStrategy, PromptCachingMiddlewareConfig, ProviderStrategy, ReactAgent, RedactionRuleConfig, RejectDecision, ResolvedRedactionRule, ResponseFormat, ResponseFormatUndefined, ReviewConfig, Runtime, StructuredOutputParsingError, StructuredTool, SummarizationMiddlewareConfig, SystemMessage, SystemMessageChunk, TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT, ToAnnotationRoot, TodoListMiddlewareOptions, TokenCounter, Tool, ToolCall, ToolCallHandler, ToolCallLimitConfig, ToolCallLimitExceededError, ToolCallRequest, ToolEmulatorOptions, ToolInvocationError, ToolMessage, ToolMessageChunk, ToolResult, ToolRetryMiddlewareConfig, type ToolRuntime, ToolStrategy, UserInput, WithStateGraphNodes, WrapModelCallHandler, WrapModelCallHook, WrapToolCallHook, anthropicPromptCachingMiddleware, applyStrategy, context, contextEditingMiddleware, countTokensApproximately, createAgent, createMiddleware, detectCreditCard, detectEmail, detectIP, detectMacAddress, detectUrl, dynamicSystemPromptMiddleware, filterMessages, humanInTheLoopMiddleware, initChatModel, llmToolSelectorMiddleware, modelCallLimitMiddleware, modelFallbackMiddleware, modelRetryMiddleware, openAIModerationMiddleware, piiMiddleware, piiRedactionMiddleware, providerStrategy, resolveRedactionRule, summarizationMiddleware, todoListMiddleware, tool, toolCallLimitMiddleware, toolEmulatorMiddleware, toolRetryMiddleware, toolStrategy, trimMessages };
package/dist/index.d.ts CHANGED
@@ -29,6 +29,7 @@ import { countTokensApproximately } from "./agents/middleware/utils.js";
29
29
  import "./agents/middleware/index.js";
30
30
  import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ContentBlock, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, filterMessages, trimMessages } from "@langchain/core/messages";
31
31
  import { DynamicStructuredTool, DynamicTool, StructuredTool, Tool, ToolRuntime, tool } from "@langchain/core/tools";
32
+ import { context } from "@langchain/core/utils/context";
32
33
  import { InMemoryStore } from "@langchain/core/stores";
33
34
  import { Document, DocumentInput } from "@langchain/core/documents";
34
- export { AIMessage, AIMessageChunk, Action, ActionRequest, AfterAgentHook, AfterModelHook, AgentMiddleware, AnyAnnotationRoot, ApproveDecision, BaseMessage, BaseMessageChunk, BeforeAgentHook, BeforeModelHook, BuiltInPIIType, BuiltInState, ClearToolUsesEdit, ClearToolUsesEditConfig, type ContentBlock, ContextEdit, ContextEditingMiddlewareConfig, CreateAgentParams, Decision, DecisionType, DescriptionFactory, Document, type DocumentInput, DynamicStructuredTool, DynamicSystemPromptMiddlewareConfig, DynamicTool, EditDecision, ExecutedToolCall, ExtractZodArrayTypes, FakeToolCallingModel, HITLRequest, HITLResponse, HumanInTheLoopMiddlewareConfig, HumanMessage, HumanMessageChunk, InMemoryStore, InferChannelType, InferContextInput, InferMergedInputState, InferMergedState, InferMiddlewareContext, InferMiddlewareContextInput, InferMiddlewareContextInputs, InferMiddlewareContexts, InferMiddlewareInputState, InferMiddlewareInputStates, InferMiddlewareState, InferMiddlewareStates, InferSchemaInput, Interrupt, InterruptOnConfig, JumpTo, JumpToTarget, LLMToolSelectorConfig, MIDDLEWARE_BRAND, MiddlewareResult, ModelCallLimitMiddlewareConfig, ModelRetryMiddlewareConfig, MultipleStructuredOutputsError, MultipleToolsBoundError, N, NormalizedSchemaInput, OpenAIModerationMiddlewareOptions, PIIDetectionError, PIIDetector, PIIMatch, PIIMiddlewareConfig, PIIRedactionMiddlewareConfig, PIIStrategy, PromptCachingMiddlewareConfig, ProviderStrategy, ReactAgent, RedactionRuleConfig, RejectDecision, ResolvedRedactionRule, ResponseFormat, ResponseFormatUndefined, ReviewConfig, Runtime, StructuredOutputParsingError, StructuredTool, SummarizationMiddlewareConfig, SystemMessage, SystemMessageChunk, TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT, ToAnnotationRoot, TodoListMiddlewareOptions, TokenCounter, Tool, ToolCall, ToolCallHandler, ToolCallLimitConfig, ToolCallLimitExceededError, ToolCallRequest, ToolEmulatorOptions, ToolInvocationError, ToolMessage, ToolMessageChunk, ToolResult, ToolRetryMiddlewareConfig, type ToolRuntime, ToolStrategy, UserInput, WithStateGraphNodes, WrapModelCallHandler, WrapModelCallHook, WrapToolCallHook, anthropicPromptCachingMiddleware, applyStrategy, contextEditingMiddleware, countTokensApproximately, createAgent, createMiddleware, detectCreditCard, detectEmail, detectIP, detectMacAddress, detectUrl, dynamicSystemPromptMiddleware, filterMessages, humanInTheLoopMiddleware, initChatModel, llmToolSelectorMiddleware, modelCallLimitMiddleware, modelFallbackMiddleware, modelRetryMiddleware, openAIModerationMiddleware, piiMiddleware, piiRedactionMiddleware, providerStrategy, resolveRedactionRule, summarizationMiddleware, todoListMiddleware, tool, toolCallLimitMiddleware, toolEmulatorMiddleware, toolRetryMiddleware, toolStrategy, trimMessages };
35
+ export { AIMessage, AIMessageChunk, Action, ActionRequest, AfterAgentHook, AfterModelHook, AgentMiddleware, AnyAnnotationRoot, ApproveDecision, BaseMessage, BaseMessageChunk, BeforeAgentHook, BeforeModelHook, BuiltInPIIType, BuiltInState, ClearToolUsesEdit, ClearToolUsesEditConfig, type ContentBlock, ContextEdit, ContextEditingMiddlewareConfig, CreateAgentParams, Decision, DecisionType, DescriptionFactory, Document, type DocumentInput, DynamicStructuredTool, DynamicSystemPromptMiddlewareConfig, DynamicTool, EditDecision, ExecutedToolCall, ExtractZodArrayTypes, FakeToolCallingModel, HITLRequest, HITLResponse, HumanInTheLoopMiddlewareConfig, HumanMessage, HumanMessageChunk, InMemoryStore, InferChannelType, InferContextInput, InferMergedInputState, InferMergedState, InferMiddlewareContext, InferMiddlewareContextInput, InferMiddlewareContextInputs, InferMiddlewareContexts, InferMiddlewareInputState, InferMiddlewareInputStates, InferMiddlewareState, InferMiddlewareStates, InferSchemaInput, Interrupt, InterruptOnConfig, JumpTo, JumpToTarget, LLMToolSelectorConfig, MIDDLEWARE_BRAND, MiddlewareResult, ModelCallLimitMiddlewareConfig, ModelRetryMiddlewareConfig, MultipleStructuredOutputsError, MultipleToolsBoundError, N, NormalizedSchemaInput, OpenAIModerationMiddlewareOptions, PIIDetectionError, PIIDetector, PIIMatch, PIIMiddlewareConfig, PIIRedactionMiddlewareConfig, PIIStrategy, PromptCachingMiddlewareConfig, ProviderStrategy, ReactAgent, RedactionRuleConfig, RejectDecision, ResolvedRedactionRule, ResponseFormat, ResponseFormatUndefined, ReviewConfig, Runtime, StructuredOutputParsingError, StructuredTool, SummarizationMiddlewareConfig, SystemMessage, SystemMessageChunk, TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT, ToAnnotationRoot, TodoListMiddlewareOptions, TokenCounter, Tool, ToolCall, ToolCallHandler, ToolCallLimitConfig, ToolCallLimitExceededError, ToolCallRequest, ToolEmulatorOptions, ToolInvocationError, ToolMessage, ToolMessageChunk, ToolResult, ToolRetryMiddlewareConfig, type ToolRuntime, ToolStrategy, UserInput, WithStateGraphNodes, WrapModelCallHandler, WrapModelCallHook, WrapToolCallHook, anthropicPromptCachingMiddleware, applyStrategy, context, contextEditingMiddleware, countTokensApproximately, createAgent, createMiddleware, detectCreditCard, detectEmail, detectIP, detectMacAddress, detectUrl, dynamicSystemPromptMiddleware, filterMessages, humanInTheLoopMiddleware, initChatModel, llmToolSelectorMiddleware, modelCallLimitMiddleware, modelFallbackMiddleware, modelRetryMiddleware, openAIModerationMiddleware, piiMiddleware, piiRedactionMiddleware, providerStrategy, resolveRedactionRule, summarizationMiddleware, todoListMiddleware, tool, toolCallLimitMiddleware, toolEmulatorMiddleware, toolRetryMiddleware, toolStrategy, trimMessages };
package/dist/index.js CHANGED
@@ -26,6 +26,7 @@ import { anthropicPromptCachingMiddleware } from "./agents/middleware/provider/a
26
26
  import "./agents/middleware/index.js";
27
27
  import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, filterMessages, trimMessages } from "@langchain/core/messages";
28
28
  import { DynamicStructuredTool, DynamicTool, StructuredTool, Tool, tool } from "@langchain/core/tools";
29
+ import { context } from "@langchain/core/utils/context";
29
30
  import { InMemoryStore } from "@langchain/core/stores";
30
31
  import { Document } from "@langchain/core/documents";
31
32
 
@@ -62,6 +63,7 @@ __export(src_exports, {
62
63
  ToolStrategy: () => ToolStrategy,
63
64
  anthropicPromptCachingMiddleware: () => anthropicPromptCachingMiddleware,
64
65
  applyStrategy: () => applyStrategy,
66
+ context: () => context,
65
67
  contextEditingMiddleware: () => contextEditingMiddleware,
66
68
  countTokensApproximately: () => countTokensApproximately,
67
69
  createAgent: () => createAgent,
@@ -95,5 +97,5 @@ __export(src_exports, {
95
97
  });
96
98
 
97
99
  //#endregion
98
- export { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ClearToolUsesEdit, Document, DynamicStructuredTool, DynamicTool, FakeToolCallingModel, HumanMessage, HumanMessageChunk, InMemoryStore, MIDDLEWARE_BRAND, MultipleStructuredOutputsError, MultipleToolsBoundError, PIIDetectionError, ProviderStrategy, StructuredOutputParsingError, StructuredTool, SystemMessage, SystemMessageChunk, TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT, Tool, ToolCallLimitExceededError, ToolInvocationError, ToolMessage, ToolMessageChunk, ToolStrategy, anthropicPromptCachingMiddleware, applyStrategy, contextEditingMiddleware, countTokensApproximately, createAgent, createMiddleware, detectCreditCard, detectEmail, detectIP, detectMacAddress, detectUrl, dynamicSystemPromptMiddleware, filterMessages, humanInTheLoopMiddleware, initChatModel, llmToolSelectorMiddleware, modelCallLimitMiddleware, modelFallbackMiddleware, modelRetryMiddleware, openAIModerationMiddleware, piiMiddleware, piiRedactionMiddleware, providerStrategy, resolveRedactionRule, src_exports, summarizationMiddleware, todoListMiddleware, tool, toolCallLimitMiddleware, toolEmulatorMiddleware, toolRetryMiddleware, toolStrategy, trimMessages };
100
+ export { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ClearToolUsesEdit, Document, DynamicStructuredTool, DynamicTool, FakeToolCallingModel, HumanMessage, HumanMessageChunk, InMemoryStore, MIDDLEWARE_BRAND, MultipleStructuredOutputsError, MultipleToolsBoundError, PIIDetectionError, ProviderStrategy, StructuredOutputParsingError, StructuredTool, SystemMessage, SystemMessageChunk, TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT, Tool, ToolCallLimitExceededError, ToolInvocationError, ToolMessage, ToolMessageChunk, ToolStrategy, anthropicPromptCachingMiddleware, applyStrategy, context, contextEditingMiddleware, countTokensApproximately, createAgent, createMiddleware, detectCreditCard, detectEmail, detectIP, detectMacAddress, detectUrl, dynamicSystemPromptMiddleware, filterMessages, humanInTheLoopMiddleware, initChatModel, llmToolSelectorMiddleware, modelCallLimitMiddleware, modelFallbackMiddleware, modelRetryMiddleware, openAIModerationMiddleware, piiMiddleware, piiRedactionMiddleware, providerStrategy, resolveRedactionRule, src_exports, summarizationMiddleware, todoListMiddleware, tool, toolCallLimitMiddleware, toolEmulatorMiddleware, toolRetryMiddleware, toolStrategy, trimMessages };
99
101
  //# sourceMappingURL=index.js.map
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","names":[],"sources":["../src/index.ts"],"sourcesContent":["/**\n * LangChain Messages\n */\nexport {\n BaseMessage,\n BaseMessageChunk,\n AIMessage,\n AIMessageChunk,\n SystemMessage,\n SystemMessageChunk,\n HumanMessage,\n HumanMessageChunk,\n ToolMessage,\n ToolMessageChunk,\n type ContentBlock,\n filterMessages,\n trimMessages,\n} from \"@langchain/core/messages\";\n\n/**\n * Universal Chat Model\n */\nexport { initChatModel } from \"./chat_models/universal.js\";\n\n/**\n * LangChain Tools\n */\nexport {\n tool,\n Tool,\n type ToolRuntime,\n DynamicTool,\n StructuredTool,\n DynamicStructuredTool,\n} from \"@langchain/core/tools\";\n\n/**\n * LangChain Agents\n */\nexport * from \"./agents/index.js\";\n\n/**\n * `createAgent` pre-built middleware\n */\nexport * from \"./agents/middleware/index.js\";\n\n/**\n * LangChain Stores\n */\nexport { InMemoryStore } from \"@langchain/core/stores\";\n\n/**\n * LangChain Documents\n */\nexport { type DocumentInput, Document } from \"@langchain/core/documents\";\n"],"mappings":""}
1
+ {"version":3,"file":"index.js","names":[],"sources":["../src/index.ts"],"sourcesContent":["/**\n * LangChain Messages\n */\nexport {\n BaseMessage,\n BaseMessageChunk,\n AIMessage,\n AIMessageChunk,\n SystemMessage,\n SystemMessageChunk,\n HumanMessage,\n HumanMessageChunk,\n ToolMessage,\n ToolMessageChunk,\n type ContentBlock,\n filterMessages,\n trimMessages,\n} from \"@langchain/core/messages\";\n\n/**\n * Universal Chat Model\n */\nexport { initChatModel } from \"./chat_models/universal.js\";\n\n/**\n * LangChain Tools\n */\nexport {\n tool,\n Tool,\n type ToolRuntime,\n DynamicTool,\n StructuredTool,\n DynamicStructuredTool,\n} from \"@langchain/core/tools\";\n\n/**\n * LangChain utilities\n */\nexport { context } from \"@langchain/core/utils/context\";\n\n/**\n * LangChain Agents\n */\nexport * from \"./agents/index.js\";\n\n/**\n * `createAgent` pre-built middleware\n */\nexport * from \"./agents/middleware/index.js\";\n\n/**\n * LangChain Stores\n */\nexport { InMemoryStore } from \"@langchain/core/stores\";\n\n/**\n * LangChain Documents\n */\nexport { type DocumentInput, Document } from \"@langchain/core/documents\";\n"],"mappings":""}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "1.2.1",
3
+ "version": "1.2.3",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "author": "LangChain",
6
6
  "license": "MIT",
@@ -53,20 +53,20 @@
53
53
  "typescript": "~5.8.3",
54
54
  "vitest": "^3.2.4",
55
55
  "yaml": "^2.8.1",
56
- "@langchain/anthropic": "1.3.1",
56
+ "@langchain/anthropic": "1.3.3",
57
57
  "@langchain/cohere": "1.0.1",
58
- "@langchain/core": "1.1.6",
59
- "@langchain/eslint": "0.1.1",
58
+ "@langchain/core": "1.1.8",
60
59
  "@langchain/openai": "1.2.0",
60
+ "@langchain/eslint": "0.1.1",
61
61
  "@langchain/tsconfig": "0.0.1"
62
62
  },
63
63
  "peerDependencies": {
64
- "@langchain/core": "1.1.6"
64
+ "@langchain/core": "1.1.8"
65
65
  },
66
66
  "dependencies": {
67
67
  "@langchain/langgraph": "^1.0.0",
68
68
  "@langchain/langgraph-checkpoint": "^1.0.0",
69
- "langsmith": "~0.3.74",
69
+ "langsmith": ">=0.4.0 <1.0.0",
70
70
  "uuid": "^10.0.0",
71
71
  "zod": "^3.25.76 || ^4"
72
72
  },