@illuma-ai/agents 1.0.81
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +485 -0
- package/dist/cjs/agents/AgentContext.cjs +734 -0
- package/dist/cjs/agents/AgentContext.cjs.map +1 -0
- package/dist/cjs/common/enum.cjs +190 -0
- package/dist/cjs/common/enum.cjs.map +1 -0
- package/dist/cjs/events.cjs +172 -0
- package/dist/cjs/events.cjs.map +1 -0
- package/dist/cjs/graphs/Graph.cjs +1615 -0
- package/dist/cjs/graphs/Graph.cjs.map +1 -0
- package/dist/cjs/graphs/MultiAgentGraph.cjs +890 -0
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
- package/dist/cjs/instrumentation.cjs +21 -0
- package/dist/cjs/instrumentation.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/index.cjs +292 -0
- package/dist/cjs/llm/anthropic/index.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/types.cjs +50 -0
- package/dist/cjs/llm/anthropic/types.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +630 -0
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +218 -0
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
- package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
- package/dist/cjs/llm/bedrock/index.cjs +282 -0
- package/dist/cjs/llm/bedrock/index.cjs.map +1 -0
- package/dist/cjs/llm/fake.cjs +97 -0
- package/dist/cjs/llm/fake.cjs.map +1 -0
- package/dist/cjs/llm/google/index.cjs +216 -0
- package/dist/cjs/llm/google/index.cjs.map +1 -0
- package/dist/cjs/llm/google/utils/common.cjs +647 -0
- package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
- package/dist/cjs/llm/openai/index.cjs +1028 -0
- package/dist/cjs/llm/openai/index.cjs.map +1 -0
- package/dist/cjs/llm/openai/utils/index.cjs +765 -0
- package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
- package/dist/cjs/llm/openrouter/index.cjs +212 -0
- package/dist/cjs/llm/openrouter/index.cjs.map +1 -0
- package/dist/cjs/llm/providers.cjs +43 -0
- package/dist/cjs/llm/providers.cjs.map +1 -0
- package/dist/cjs/llm/text.cjs +69 -0
- package/dist/cjs/llm/text.cjs.map +1 -0
- package/dist/cjs/llm/vertexai/index.cjs +329 -0
- package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
- package/dist/cjs/main.cjs +240 -0
- package/dist/cjs/main.cjs.map +1 -0
- package/dist/cjs/messages/cache.cjs +387 -0
- package/dist/cjs/messages/cache.cjs.map +1 -0
- package/dist/cjs/messages/content.cjs +53 -0
- package/dist/cjs/messages/content.cjs.map +1 -0
- package/dist/cjs/messages/core.cjs +367 -0
- package/dist/cjs/messages/core.cjs.map +1 -0
- package/dist/cjs/messages/format.cjs +761 -0
- package/dist/cjs/messages/format.cjs.map +1 -0
- package/dist/cjs/messages/ids.cjs +23 -0
- package/dist/cjs/messages/ids.cjs.map +1 -0
- package/dist/cjs/messages/prune.cjs +398 -0
- package/dist/cjs/messages/prune.cjs.map +1 -0
- package/dist/cjs/messages/tools.cjs +96 -0
- package/dist/cjs/messages/tools.cjs.map +1 -0
- package/dist/cjs/run.cjs +328 -0
- package/dist/cjs/run.cjs.map +1 -0
- package/dist/cjs/schemas/validate.cjs +324 -0
- package/dist/cjs/schemas/validate.cjs.map +1 -0
- package/dist/cjs/splitStream.cjs +210 -0
- package/dist/cjs/splitStream.cjs.map +1 -0
- package/dist/cjs/stream.cjs +620 -0
- package/dist/cjs/stream.cjs.map +1 -0
- package/dist/cjs/tools/BrowserTools.cjs +248 -0
- package/dist/cjs/tools/BrowserTools.cjs.map +1 -0
- package/dist/cjs/tools/Calculator.cjs +66 -0
- package/dist/cjs/tools/Calculator.cjs.map +1 -0
- package/dist/cjs/tools/CodeExecutor.cjs +234 -0
- package/dist/cjs/tools/CodeExecutor.cjs.map +1 -0
- package/dist/cjs/tools/ProgrammaticToolCalling.cjs +636 -0
- package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -0
- package/dist/cjs/tools/ToolNode.cjs +548 -0
- package/dist/cjs/tools/ToolNode.cjs.map +1 -0
- package/dist/cjs/tools/ToolSearch.cjs +909 -0
- package/dist/cjs/tools/ToolSearch.cjs.map +1 -0
- package/dist/cjs/tools/handlers.cjs +255 -0
- package/dist/cjs/tools/handlers.cjs.map +1 -0
- package/dist/cjs/tools/schema.cjs +31 -0
- package/dist/cjs/tools/schema.cjs.map +1 -0
- package/dist/cjs/tools/search/anthropic.cjs +40 -0
- package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
- package/dist/cjs/tools/search/content.cjs +140 -0
- package/dist/cjs/tools/search/content.cjs.map +1 -0
- package/dist/cjs/tools/search/firecrawl.cjs +179 -0
- package/dist/cjs/tools/search/firecrawl.cjs.map +1 -0
- package/dist/cjs/tools/search/format.cjs +203 -0
- package/dist/cjs/tools/search/format.cjs.map +1 -0
- package/dist/cjs/tools/search/highlights.cjs +245 -0
- package/dist/cjs/tools/search/highlights.cjs.map +1 -0
- package/dist/cjs/tools/search/rerankers.cjs +174 -0
- package/dist/cjs/tools/search/rerankers.cjs.map +1 -0
- package/dist/cjs/tools/search/schema.cjs +117 -0
- package/dist/cjs/tools/search/schema.cjs.map +1 -0
- package/dist/cjs/tools/search/search.cjs +566 -0
- package/dist/cjs/tools/search/search.cjs.map +1 -0
- package/dist/cjs/tools/search/serper-scraper.cjs +132 -0
- package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -0
- package/dist/cjs/tools/search/tool.cjs +456 -0
- package/dist/cjs/tools/search/tool.cjs.map +1 -0
- package/dist/cjs/tools/search/utils.cjs +66 -0
- package/dist/cjs/tools/search/utils.cjs.map +1 -0
- package/dist/cjs/types/graph.cjs +29 -0
- package/dist/cjs/types/graph.cjs.map +1 -0
- package/dist/cjs/utils/contextAnalytics.cjs +66 -0
- package/dist/cjs/utils/contextAnalytics.cjs.map +1 -0
- package/dist/cjs/utils/events.cjs +31 -0
- package/dist/cjs/utils/events.cjs.map +1 -0
- package/dist/cjs/utils/graph.cjs +16 -0
- package/dist/cjs/utils/graph.cjs.map +1 -0
- package/dist/cjs/utils/handlers.cjs +70 -0
- package/dist/cjs/utils/handlers.cjs.map +1 -0
- package/dist/cjs/utils/llm.cjs +27 -0
- package/dist/cjs/utils/llm.cjs.map +1 -0
- package/dist/cjs/utils/misc.cjs +56 -0
- package/dist/cjs/utils/misc.cjs.map +1 -0
- package/dist/cjs/utils/run.cjs +73 -0
- package/dist/cjs/utils/run.cjs.map +1 -0
- package/dist/cjs/utils/schema.cjs +27 -0
- package/dist/cjs/utils/schema.cjs.map +1 -0
- package/dist/cjs/utils/title.cjs +125 -0
- package/dist/cjs/utils/title.cjs.map +1 -0
- package/dist/cjs/utils/tokens.cjs +125 -0
- package/dist/cjs/utils/tokens.cjs.map +1 -0
- package/dist/cjs/utils/toonFormat.cjs +388 -0
- package/dist/cjs/utils/toonFormat.cjs.map +1 -0
- package/dist/esm/agents/AgentContext.mjs +732 -0
- package/dist/esm/agents/AgentContext.mjs.map +1 -0
- package/dist/esm/common/enum.mjs +190 -0
- package/dist/esm/common/enum.mjs.map +1 -0
- package/dist/esm/events.mjs +164 -0
- package/dist/esm/events.mjs.map +1 -0
- package/dist/esm/graphs/Graph.mjs +1612 -0
- package/dist/esm/graphs/Graph.mjs.map +1 -0
- package/dist/esm/graphs/MultiAgentGraph.mjs +888 -0
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
- package/dist/esm/instrumentation.mjs +19 -0
- package/dist/esm/instrumentation.mjs.map +1 -0
- package/dist/esm/llm/anthropic/index.mjs +290 -0
- package/dist/esm/llm/anthropic/index.mjs.map +1 -0
- package/dist/esm/llm/anthropic/types.mjs +48 -0
- package/dist/esm/llm/anthropic/types.mjs.map +1 -0
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +627 -0
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -0
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs +216 -0
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -0
- package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
- package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
- package/dist/esm/llm/bedrock/index.mjs +280 -0
- package/dist/esm/llm/bedrock/index.mjs.map +1 -0
- package/dist/esm/llm/fake.mjs +94 -0
- package/dist/esm/llm/fake.mjs.map +1 -0
- package/dist/esm/llm/google/index.mjs +214 -0
- package/dist/esm/llm/google/index.mjs.map +1 -0
- package/dist/esm/llm/google/utils/common.mjs +638 -0
- package/dist/esm/llm/google/utils/common.mjs.map +1 -0
- package/dist/esm/llm/openai/index.mjs +1018 -0
- package/dist/esm/llm/openai/index.mjs.map +1 -0
- package/dist/esm/llm/openai/utils/index.mjs +759 -0
- package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
- package/dist/esm/llm/openrouter/index.mjs +210 -0
- package/dist/esm/llm/openrouter/index.mjs.map +1 -0
- package/dist/esm/llm/providers.mjs +39 -0
- package/dist/esm/llm/providers.mjs.map +1 -0
- package/dist/esm/llm/text.mjs +67 -0
- package/dist/esm/llm/text.mjs.map +1 -0
- package/dist/esm/llm/vertexai/index.mjs +327 -0
- package/dist/esm/llm/vertexai/index.mjs.map +1 -0
- package/dist/esm/main.mjs +37 -0
- package/dist/esm/main.mjs.map +1 -0
- package/dist/esm/messages/cache.mjs +382 -0
- package/dist/esm/messages/cache.mjs.map +1 -0
- package/dist/esm/messages/content.mjs +51 -0
- package/dist/esm/messages/content.mjs.map +1 -0
- package/dist/esm/messages/core.mjs +359 -0
- package/dist/esm/messages/core.mjs.map +1 -0
- package/dist/esm/messages/format.mjs +752 -0
- package/dist/esm/messages/format.mjs.map +1 -0
- package/dist/esm/messages/ids.mjs +21 -0
- package/dist/esm/messages/ids.mjs.map +1 -0
- package/dist/esm/messages/prune.mjs +393 -0
- package/dist/esm/messages/prune.mjs.map +1 -0
- package/dist/esm/messages/tools.mjs +93 -0
- package/dist/esm/messages/tools.mjs.map +1 -0
- package/dist/esm/run.mjs +325 -0
- package/dist/esm/run.mjs.map +1 -0
- package/dist/esm/schemas/validate.mjs +317 -0
- package/dist/esm/schemas/validate.mjs.map +1 -0
- package/dist/esm/splitStream.mjs +207 -0
- package/dist/esm/splitStream.mjs.map +1 -0
- package/dist/esm/stream.mjs +616 -0
- package/dist/esm/stream.mjs.map +1 -0
- package/dist/esm/tools/BrowserTools.mjs +244 -0
- package/dist/esm/tools/BrowserTools.mjs.map +1 -0
- package/dist/esm/tools/Calculator.mjs +41 -0
- package/dist/esm/tools/Calculator.mjs.map +1 -0
- package/dist/esm/tools/CodeExecutor.mjs +226 -0
- package/dist/esm/tools/CodeExecutor.mjs.map +1 -0
- package/dist/esm/tools/ProgrammaticToolCalling.mjs +622 -0
- package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -0
- package/dist/esm/tools/ToolNode.mjs +545 -0
- package/dist/esm/tools/ToolNode.mjs.map +1 -0
- package/dist/esm/tools/ToolSearch.mjs +870 -0
- package/dist/esm/tools/ToolSearch.mjs.map +1 -0
- package/dist/esm/tools/handlers.mjs +250 -0
- package/dist/esm/tools/handlers.mjs.map +1 -0
- package/dist/esm/tools/schema.mjs +28 -0
- package/dist/esm/tools/schema.mjs.map +1 -0
- package/dist/esm/tools/search/anthropic.mjs +37 -0
- package/dist/esm/tools/search/anthropic.mjs.map +1 -0
- package/dist/esm/tools/search/content.mjs +119 -0
- package/dist/esm/tools/search/content.mjs.map +1 -0
- package/dist/esm/tools/search/firecrawl.mjs +176 -0
- package/dist/esm/tools/search/firecrawl.mjs.map +1 -0
- package/dist/esm/tools/search/format.mjs +201 -0
- package/dist/esm/tools/search/format.mjs.map +1 -0
- package/dist/esm/tools/search/highlights.mjs +243 -0
- package/dist/esm/tools/search/highlights.mjs.map +1 -0
- package/dist/esm/tools/search/rerankers.mjs +168 -0
- package/dist/esm/tools/search/rerankers.mjs.map +1 -0
- package/dist/esm/tools/search/schema.mjs +104 -0
- package/dist/esm/tools/search/schema.mjs.map +1 -0
- package/dist/esm/tools/search/search.mjs +563 -0
- package/dist/esm/tools/search/search.mjs.map +1 -0
- package/dist/esm/tools/search/serper-scraper.mjs +129 -0
- package/dist/esm/tools/search/serper-scraper.mjs.map +1 -0
- package/dist/esm/tools/search/tool.mjs +454 -0
- package/dist/esm/tools/search/tool.mjs.map +1 -0
- package/dist/esm/tools/search/utils.mjs +61 -0
- package/dist/esm/tools/search/utils.mjs.map +1 -0
- package/dist/esm/types/graph.mjs +26 -0
- package/dist/esm/types/graph.mjs.map +1 -0
- package/dist/esm/utils/contextAnalytics.mjs +64 -0
- package/dist/esm/utils/contextAnalytics.mjs.map +1 -0
- package/dist/esm/utils/events.mjs +29 -0
- package/dist/esm/utils/events.mjs.map +1 -0
- package/dist/esm/utils/graph.mjs +13 -0
- package/dist/esm/utils/graph.mjs.map +1 -0
- package/dist/esm/utils/handlers.mjs +68 -0
- package/dist/esm/utils/handlers.mjs.map +1 -0
- package/dist/esm/utils/llm.mjs +24 -0
- package/dist/esm/utils/llm.mjs.map +1 -0
- package/dist/esm/utils/misc.mjs +53 -0
- package/dist/esm/utils/misc.mjs.map +1 -0
- package/dist/esm/utils/run.mjs +70 -0
- package/dist/esm/utils/run.mjs.map +1 -0
- package/dist/esm/utils/schema.mjs +24 -0
- package/dist/esm/utils/schema.mjs.map +1 -0
- package/dist/esm/utils/title.mjs +122 -0
- package/dist/esm/utils/title.mjs.map +1 -0
- package/dist/esm/utils/tokens.mjs +121 -0
- package/dist/esm/utils/tokens.mjs.map +1 -0
- package/dist/esm/utils/toonFormat.mjs +381 -0
- package/dist/esm/utils/toonFormat.mjs.map +1 -0
- package/dist/types/agents/AgentContext.d.ts +293 -0
- package/dist/types/common/enum.d.ts +155 -0
- package/dist/types/common/index.d.ts +1 -0
- package/dist/types/events.d.ts +31 -0
- package/dist/types/graphs/Graph.d.ts +216 -0
- package/dist/types/graphs/MultiAgentGraph.d.ts +104 -0
- package/dist/types/graphs/index.d.ts +2 -0
- package/dist/types/index.d.ts +21 -0
- package/dist/types/instrumentation.d.ts +1 -0
- package/dist/types/llm/anthropic/index.d.ts +39 -0
- package/dist/types/llm/anthropic/types.d.ts +37 -0
- package/dist/types/llm/anthropic/utils/message_inputs.d.ts +14 -0
- package/dist/types/llm/anthropic/utils/message_outputs.d.ts +14 -0
- package/dist/types/llm/anthropic/utils/output_parsers.d.ts +22 -0
- package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
- package/dist/types/llm/bedrock/index.d.ts +141 -0
- package/dist/types/llm/bedrock/types.d.ts +27 -0
- package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
- package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
- package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
- package/dist/types/llm/fake.d.ts +31 -0
- package/dist/types/llm/google/index.d.ts +24 -0
- package/dist/types/llm/google/types.d.ts +42 -0
- package/dist/types/llm/google/utils/common.d.ts +34 -0
- package/dist/types/llm/google/utils/tools.d.ts +10 -0
- package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
- package/dist/types/llm/openai/index.d.ts +127 -0
- package/dist/types/llm/openai/types.d.ts +10 -0
- package/dist/types/llm/openai/utils/index.d.ts +29 -0
- package/dist/types/llm/openrouter/index.d.ts +15 -0
- package/dist/types/llm/providers.d.ts +5 -0
- package/dist/types/llm/text.d.ts +21 -0
- package/dist/types/llm/vertexai/index.d.ts +293 -0
- package/dist/types/messages/cache.d.ts +54 -0
- package/dist/types/messages/content.d.ts +7 -0
- package/dist/types/messages/core.d.ts +14 -0
- package/dist/types/messages/format.d.ts +137 -0
- package/dist/types/messages/ids.d.ts +3 -0
- package/dist/types/messages/index.d.ts +7 -0
- package/dist/types/messages/prune.d.ts +52 -0
- package/dist/types/messages/reducer.d.ts +9 -0
- package/dist/types/messages/tools.d.ts +17 -0
- package/dist/types/mockStream.d.ts +32 -0
- package/dist/types/prompts/collab.d.ts +1 -0
- package/dist/types/prompts/index.d.ts +2 -0
- package/dist/types/prompts/taskmanager.d.ts +41 -0
- package/dist/types/run.d.ts +41 -0
- package/dist/types/schemas/index.d.ts +1 -0
- package/dist/types/schemas/validate.d.ts +59 -0
- package/dist/types/splitStream.d.ts +37 -0
- package/dist/types/stream.d.ts +15 -0
- package/dist/types/test/mockTools.d.ts +28 -0
- package/dist/types/tools/BrowserTools.d.ts +87 -0
- package/dist/types/tools/Calculator.d.ts +34 -0
- package/dist/types/tools/CodeExecutor.d.ts +57 -0
- package/dist/types/tools/ProgrammaticToolCalling.d.ts +138 -0
- package/dist/types/tools/ToolNode.d.ts +51 -0
- package/dist/types/tools/ToolSearch.d.ts +219 -0
- package/dist/types/tools/handlers.d.ts +22 -0
- package/dist/types/tools/schema.d.ts +12 -0
- package/dist/types/tools/search/anthropic.d.ts +16 -0
- package/dist/types/tools/search/content.d.ts +4 -0
- package/dist/types/tools/search/firecrawl.d.ts +54 -0
- package/dist/types/tools/search/format.d.ts +5 -0
- package/dist/types/tools/search/highlights.d.ts +13 -0
- package/dist/types/tools/search/index.d.ts +3 -0
- package/dist/types/tools/search/rerankers.d.ts +38 -0
- package/dist/types/tools/search/schema.d.ts +103 -0
- package/dist/types/tools/search/search.d.ts +8 -0
- package/dist/types/tools/search/serper-scraper.d.ts +59 -0
- package/dist/types/tools/search/test.d.ts +1 -0
- package/dist/types/tools/search/tool.d.ts +3 -0
- package/dist/types/tools/search/types.d.ts +575 -0
- package/dist/types/tools/search/utils.d.ts +10 -0
- package/dist/types/types/graph.d.ts +399 -0
- package/dist/types/types/index.d.ts +5 -0
- package/dist/types/types/llm.d.ts +105 -0
- package/dist/types/types/messages.d.ts +4 -0
- package/dist/types/types/run.d.ts +112 -0
- package/dist/types/types/stream.d.ts +308 -0
- package/dist/types/types/tools.d.ts +296 -0
- package/dist/types/utils/contextAnalytics.d.ts +37 -0
- package/dist/types/utils/events.d.ts +6 -0
- package/dist/types/utils/graph.d.ts +2 -0
- package/dist/types/utils/handlers.d.ts +34 -0
- package/dist/types/utils/index.d.ts +9 -0
- package/dist/types/utils/llm.d.ts +3 -0
- package/dist/types/utils/llmConfig.d.ts +3 -0
- package/dist/types/utils/logging.d.ts +1 -0
- package/dist/types/utils/misc.d.ts +7 -0
- package/dist/types/utils/run.d.ts +27 -0
- package/dist/types/utils/schema.d.ts +8 -0
- package/dist/types/utils/title.d.ts +4 -0
- package/dist/types/utils/tokens.d.ts +28 -0
- package/dist/types/utils/toonFormat.d.ts +111 -0
- package/package.json +190 -0
- package/src/agents/AgentContext.test.ts +458 -0
- package/src/agents/AgentContext.ts +972 -0
- package/src/agents/__tests__/AgentContext.test.ts +805 -0
- package/src/agents/__tests__/resolveStructuredOutputMode.test.ts +137 -0
- package/src/common/enum.ts +203 -0
- package/src/common/index.ts +2 -0
- package/src/events.ts +223 -0
- package/src/graphs/Graph.ts +2228 -0
- package/src/graphs/MultiAgentGraph.ts +1063 -0
- package/src/graphs/__tests__/structured-output.integration.test.ts +809 -0
- package/src/graphs/__tests__/structured-output.test.ts +183 -0
- package/src/graphs/index.ts +2 -0
- package/src/index.ts +34 -0
- package/src/instrumentation.ts +22 -0
- package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
- package/src/llm/anthropic/index.ts +413 -0
- package/src/llm/anthropic/llm.spec.ts +1442 -0
- package/src/llm/anthropic/types.ts +140 -0
- package/src/llm/anthropic/utils/message_inputs.ts +757 -0
- package/src/llm/anthropic/utils/message_outputs.ts +289 -0
- package/src/llm/anthropic/utils/output_parsers.ts +133 -0
- package/src/llm/anthropic/utils/tools.ts +29 -0
- package/src/llm/bedrock/__tests__/bedrock-caching.test.ts +495 -0
- package/src/llm/bedrock/index.ts +411 -0
- package/src/llm/bedrock/llm.spec.ts +616 -0
- package/src/llm/bedrock/types.ts +51 -0
- package/src/llm/bedrock/utils/index.ts +18 -0
- package/src/llm/bedrock/utils/message_inputs.ts +563 -0
- package/src/llm/bedrock/utils/message_outputs.ts +310 -0
- package/src/llm/fake.ts +133 -0
- package/src/llm/google/data/gettysburg10.wav +0 -0
- package/src/llm/google/data/hotdog.jpg +0 -0
- package/src/llm/google/index.ts +337 -0
- package/src/llm/google/llm.spec.ts +934 -0
- package/src/llm/google/types.ts +56 -0
- package/src/llm/google/utils/common.ts +873 -0
- package/src/llm/google/utils/tools.ts +160 -0
- package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
- package/src/llm/openai/index.ts +1366 -0
- package/src/llm/openai/types.ts +24 -0
- package/src/llm/openai/utils/index.ts +1035 -0
- package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
- package/src/llm/openrouter/index.ts +291 -0
- package/src/llm/providers.ts +52 -0
- package/src/llm/text.ts +94 -0
- package/src/llm/vertexai/index.ts +359 -0
- package/src/messages/__tests__/tools.test.ts +473 -0
- package/src/messages/cache.test.ts +1261 -0
- package/src/messages/cache.ts +518 -0
- package/src/messages/content.test.ts +362 -0
- package/src/messages/content.ts +63 -0
- package/src/messages/core.ts +473 -0
- package/src/messages/ensureThinkingBlock.test.ts +468 -0
- package/src/messages/format.ts +1029 -0
- package/src/messages/formatAgentMessages.test.ts +1513 -0
- package/src/messages/formatAgentMessages.tools.test.ts +419 -0
- package/src/messages/formatMessage.test.ts +693 -0
- package/src/messages/ids.ts +26 -0
- package/src/messages/index.ts +7 -0
- package/src/messages/labelContentByAgent.test.ts +887 -0
- package/src/messages/prune.ts +568 -0
- package/src/messages/reducer.ts +80 -0
- package/src/messages/shiftIndexTokenCountMap.test.ts +81 -0
- package/src/messages/tools.ts +108 -0
- package/src/mockStream.ts +99 -0
- package/src/prompts/collab.ts +6 -0
- package/src/prompts/index.ts +2 -0
- package/src/prompts/taskmanager.ts +61 -0
- package/src/run.ts +467 -0
- package/src/schemas/index.ts +2 -0
- package/src/schemas/schema-preparation.test.ts +500 -0
- package/src/schemas/validate.test.ts +358 -0
- package/src/schemas/validate.ts +454 -0
- package/src/scripts/abort.ts +157 -0
- package/src/scripts/ant_web_search.ts +158 -0
- package/src/scripts/ant_web_search_edge_case.ts +162 -0
- package/src/scripts/ant_web_search_error_edge_case.ts +148 -0
- package/src/scripts/args.ts +48 -0
- package/src/scripts/caching.ts +132 -0
- package/src/scripts/cli.ts +172 -0
- package/src/scripts/cli2.ts +133 -0
- package/src/scripts/cli3.ts +184 -0
- package/src/scripts/cli4.ts +191 -0
- package/src/scripts/cli5.ts +191 -0
- package/src/scripts/code_exec.ts +213 -0
- package/src/scripts/code_exec_files.ts +236 -0
- package/src/scripts/code_exec_multi_session.ts +241 -0
- package/src/scripts/code_exec_ptc.ts +334 -0
- package/src/scripts/code_exec_session.ts +282 -0
- package/src/scripts/code_exec_simple.ts +147 -0
- package/src/scripts/content.ts +138 -0
- package/src/scripts/empty_input.ts +137 -0
- package/src/scripts/handoff-test.ts +135 -0
- package/src/scripts/image.ts +178 -0
- package/src/scripts/memory.ts +97 -0
- package/src/scripts/multi-agent-chain.ts +331 -0
- package/src/scripts/multi-agent-conditional.ts +221 -0
- package/src/scripts/multi-agent-document-review-chain.ts +197 -0
- package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
- package/src/scripts/multi-agent-parallel-start.ts +265 -0
- package/src/scripts/multi-agent-parallel.ts +394 -0
- package/src/scripts/multi-agent-sequence.ts +217 -0
- package/src/scripts/multi-agent-supervisor.ts +365 -0
- package/src/scripts/multi-agent-test.ts +186 -0
- package/src/scripts/parallel-asymmetric-tools-test.ts +274 -0
- package/src/scripts/parallel-full-metadata-test.ts +240 -0
- package/src/scripts/parallel-tools-test.ts +340 -0
- package/src/scripts/programmatic_exec.ts +396 -0
- package/src/scripts/programmatic_exec_agent.ts +231 -0
- package/src/scripts/search.ts +146 -0
- package/src/scripts/sequential-full-metadata-test.ts +197 -0
- package/src/scripts/simple.ts +225 -0
- package/src/scripts/single-agent-metadata-test.ts +198 -0
- package/src/scripts/stream.ts +140 -0
- package/src/scripts/test-custom-prompt-key.ts +145 -0
- package/src/scripts/test-handoff-input.ts +170 -0
- package/src/scripts/test-handoff-preamble.ts +277 -0
- package/src/scripts/test-multi-agent-list-handoff.ts +417 -0
- package/src/scripts/test-parallel-agent-labeling.ts +325 -0
- package/src/scripts/test-parallel-handoffs.ts +291 -0
- package/src/scripts/test-thinking-handoff-bedrock.ts +153 -0
- package/src/scripts/test-thinking-handoff.ts +155 -0
- package/src/scripts/test-tools-before-handoff.ts +226 -0
- package/src/scripts/test_code_api.ts +361 -0
- package/src/scripts/thinking-bedrock.ts +159 -0
- package/src/scripts/thinking.ts +171 -0
- package/src/scripts/tool_search.ts +162 -0
- package/src/scripts/tools.ts +177 -0
- package/src/specs/agent-handoffs.test.ts +888 -0
- package/src/specs/anthropic.simple.test.ts +387 -0
- package/src/specs/azure.simple.test.ts +364 -0
- package/src/specs/cache.simple.test.ts +396 -0
- package/src/specs/deepseek.simple.test.ts +283 -0
- package/src/specs/emergency-prune.test.ts +407 -0
- package/src/specs/moonshot.simple.test.ts +358 -0
- package/src/specs/openai.simple.test.ts +311 -0
- package/src/specs/openrouter.simple.test.ts +107 -0
- package/src/specs/prune.test.ts +901 -0
- package/src/specs/reasoning.test.ts +201 -0
- package/src/specs/spec.utils.ts +3 -0
- package/src/specs/thinking-handoff.test.ts +620 -0
- package/src/specs/thinking-prune.test.ts +703 -0
- package/src/specs/token-distribution-edge-case.test.ts +316 -0
- package/src/specs/token-memoization.test.ts +32 -0
- package/src/specs/tool-error.test.ts +198 -0
- package/src/splitStream.test.ts +691 -0
- package/src/splitStream.ts +234 -0
- package/src/stream.test.ts +94 -0
- package/src/stream.ts +801 -0
- package/src/test/mockTools.ts +386 -0
- package/src/tools/BrowserTools.ts +393 -0
- package/src/tools/Calculator.test.ts +278 -0
- package/src/tools/Calculator.ts +46 -0
- package/src/tools/CodeExecutor.ts +270 -0
- package/src/tools/ProgrammaticToolCalling.ts +785 -0
- package/src/tools/ToolNode.ts +674 -0
- package/src/tools/ToolSearch.ts +1095 -0
- package/src/tools/__tests__/BrowserTools.test.ts +265 -0
- package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.ts +319 -0
- package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +1006 -0
- package/src/tools/__tests__/ToolSearch.integration.test.ts +162 -0
- package/src/tools/__tests__/ToolSearch.test.ts +1003 -0
- package/src/tools/handlers.ts +363 -0
- package/src/tools/schema.ts +37 -0
- package/src/tools/search/anthropic.ts +51 -0
- package/src/tools/search/content.test.ts +173 -0
- package/src/tools/search/content.ts +147 -0
- package/src/tools/search/firecrawl.ts +210 -0
- package/src/tools/search/format.ts +250 -0
- package/src/tools/search/highlights.ts +320 -0
- package/src/tools/search/index.ts +3 -0
- package/src/tools/search/jina-reranker.test.ts +130 -0
- package/src/tools/search/output.md +2775 -0
- package/src/tools/search/rerankers.ts +242 -0
- package/src/tools/search/schema.ts +113 -0
- package/src/tools/search/search.ts +768 -0
- package/src/tools/search/serper-scraper.ts +155 -0
- package/src/tools/search/test.html +884 -0
- package/src/tools/search/test.md +643 -0
- package/src/tools/search/test.ts +159 -0
- package/src/tools/search/tool.ts +657 -0
- package/src/tools/search/types.ts +665 -0
- package/src/tools/search/utils.ts +79 -0
- package/src/types/graph.test.ts +218 -0
- package/src/types/graph.ts +533 -0
- package/src/types/index.ts +6 -0
- package/src/types/llm.ts +140 -0
- package/src/types/messages.ts +4 -0
- package/src/types/run.ts +128 -0
- package/src/types/stream.ts +417 -0
- package/src/types/tools.ts +355 -0
- package/src/utils/contextAnalytics.ts +103 -0
- package/src/utils/events.ts +32 -0
- package/src/utils/graph.ts +11 -0
- package/src/utils/handlers.ts +107 -0
- package/src/utils/index.ts +9 -0
- package/src/utils/llm.ts +26 -0
- package/src/utils/llmConfig.ts +208 -0
- package/src/utils/logging.ts +48 -0
- package/src/utils/misc.ts +57 -0
- package/src/utils/run.ts +106 -0
- package/src/utils/schema.ts +35 -0
- package/src/utils/title.ts +177 -0
- package/src/utils/tokens.ts +142 -0
- package/src/utils/toonFormat.ts +475 -0
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var aws = require('@langchain/aws');
|
|
4
|
+
var messages = require('@langchain/core/messages');
|
|
5
|
+
var outputs = require('@langchain/core/outputs');
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Optimized ChatBedrockConverse wrapper that fixes contentBlockIndex conflicts
|
|
9
|
+
* and adds support for:
|
|
10
|
+
*
|
|
11
|
+
* - Prompt caching support for Bedrock Converse API (Illuma feature)
|
|
12
|
+
* - Application Inference Profiles (PR #9129)
|
|
13
|
+
* - Service Tiers (Priority/Standard/Flex) (PR #9785) - requires AWS SDK 3.966.0+
|
|
14
|
+
*
|
|
15
|
+
* Bedrock sends the same contentBlockIndex for both text and tool_use content blocks,
|
|
16
|
+
* causing LangChain's merge logic to fail with "field[contentBlockIndex] already exists"
|
|
17
|
+
* errors. This wrapper simply strips contentBlockIndex from response_metadata to avoid
|
|
18
|
+
* the conflict.
|
|
19
|
+
*
|
|
20
|
+
* The contentBlockIndex field is only used internally by Bedrock's streaming protocol
|
|
21
|
+
* and isn't needed by application logic - the index field on tool_call_chunks serves
|
|
22
|
+
* the purpose of tracking tool call ordering.
|
|
23
|
+
*
|
|
24
|
+
* PROMPT CACHING:
|
|
25
|
+
* When promptCache: true is set, this wrapper adds cachePoint markers to the tools array
|
|
26
|
+
* to enable Bedrock prompt caching for tool definitions. This allows tool schemas to be
|
|
27
|
+
* cached and reused across requests, reducing latency and costs.
|
|
28
|
+
*
|
|
29
|
+
* CACHE TOKEN EXTRACTION:
|
|
30
|
+
* LangChain AWS doesn't extract cacheReadInputTokens/cacheWriteInputTokens from Bedrock's
|
|
31
|
+
* response. This wrapper adds input_token_details to usage_metadata with cache information.
|
|
32
|
+
*/
|
|
33
|
+
class CustomChatBedrockConverse extends aws.ChatBedrockConverse {
|
|
34
|
+
/** Enable Bedrock prompt caching for tool definitions */
|
|
35
|
+
promptCache;
|
|
36
|
+
/** Application Inference Profile ARN to use instead of model ID */
|
|
37
|
+
applicationInferenceProfile;
|
|
38
|
+
/** Service tier for model invocation */
|
|
39
|
+
serviceTier;
|
|
40
|
+
constructor(fields) {
|
|
41
|
+
super(fields);
|
|
42
|
+
this.promptCache = fields?.promptCache ?? false;
|
|
43
|
+
this.applicationInferenceProfile = fields?.applicationInferenceProfile;
|
|
44
|
+
this.serviceTier = fields?.serviceTier;
|
|
45
|
+
// Fix: Force supportsToolChoiceValues for Claude models
|
|
46
|
+
// The parent constructor checks `model.includes('claude-3')` but this fails when:
|
|
47
|
+
// 1. Using applicationInferenceProfile ARNs (arn:aws:bedrock:...)
|
|
48
|
+
// 2. Using different naming conventions (claude-4, claude-opus-4, etc.)
|
|
49
|
+
// We need to ensure tool_choice is properly set for withStructuredOutput to work
|
|
50
|
+
const modelName = (fields?.model ?? '').toLowerCase();
|
|
51
|
+
const profileName = (fields?.applicationInferenceProfile ?? '').toLowerCase();
|
|
52
|
+
const isClaudeModel = modelName.includes('claude') ||
|
|
53
|
+
modelName.includes('anthropic') ||
|
|
54
|
+
profileName.includes('claude') ||
|
|
55
|
+
profileName.includes('anthropic');
|
|
56
|
+
if (isClaudeModel && !this.supportsToolChoiceValues?.length) {
|
|
57
|
+
// Claude models support all tool choice values
|
|
58
|
+
this.supportsToolChoiceValues = ['auto', 'any', 'tool'];
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
static lc_name() {
|
|
62
|
+
return 'IllumaBedrockConverse';
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Get the model ID to use for API calls.
|
|
66
|
+
* Returns applicationInferenceProfile if set, otherwise returns this.model.
|
|
67
|
+
*/
|
|
68
|
+
getModelId() {
|
|
69
|
+
return this.applicationInferenceProfile ?? this.model;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Override invocationParams to:
|
|
73
|
+
* 1. Add cachePoint to tools when promptCache is enabled
|
|
74
|
+
* 2. Add serviceTier support
|
|
75
|
+
*
|
|
76
|
+
* CACHING STRATEGY: Separate cachePoints for core tools and MCP tools
|
|
77
|
+
* - Core tools (web_search, execute_code, etc.) are stable → cache first
|
|
78
|
+
* - MCP tools (have '_mcp_' in name) are dynamic → cache separately after
|
|
79
|
+
* - This allows core tools to stay cached when MCP selection changes
|
|
80
|
+
*
|
|
81
|
+
* NOTE: Only Claude models support cachePoint - Nova and other models will reject it.
|
|
82
|
+
*/
|
|
83
|
+
invocationParams(options) {
|
|
84
|
+
const params = super.invocationParams(options);
|
|
85
|
+
// Add cachePoint to tools array if promptCache is enabled and tools exist
|
|
86
|
+
// Only Claude models support cachePoint - check model name
|
|
87
|
+
const modelId = this.model.toLowerCase();
|
|
88
|
+
const isClaudeModel = modelId.includes('claude') || modelId.includes('anthropic');
|
|
89
|
+
if (this.promptCache &&
|
|
90
|
+
isClaudeModel &&
|
|
91
|
+
params.toolConfig?.tools &&
|
|
92
|
+
Array.isArray(params.toolConfig.tools) &&
|
|
93
|
+
params.toolConfig.tools.length > 0) {
|
|
94
|
+
// Separate core tools from MCP tools
|
|
95
|
+
// MCP tools have '_mcp_' in their name (e.g., 'search_emails_mcp_Google-Workspace')
|
|
96
|
+
const coreTools = [];
|
|
97
|
+
const mcpTools = [];
|
|
98
|
+
for (const tool of params.toolConfig.tools) {
|
|
99
|
+
// Check if tool has a name property with '_mcp_' pattern
|
|
100
|
+
const toolName = tool.toolSpec?.name ?? '';
|
|
101
|
+
if (toolName.includes('_mcp_')) {
|
|
102
|
+
mcpTools.push(tool);
|
|
103
|
+
}
|
|
104
|
+
else {
|
|
105
|
+
coreTools.push(tool);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
// Build tools array with strategic cachePoints:
|
|
109
|
+
// [CoreTool1, CoreTool2, cachePoint] + [MCPTool1, MCPTool2, cachePoint]
|
|
110
|
+
const toolsWithCache = [];
|
|
111
|
+
// Add core tools with cachePoint (if any)
|
|
112
|
+
if (coreTools.length > 0) {
|
|
113
|
+
toolsWithCache.push(...coreTools);
|
|
114
|
+
toolsWithCache.push({ cachePoint: { type: 'default' } });
|
|
115
|
+
}
|
|
116
|
+
// Add MCP tools with their own cachePoint (if any)
|
|
117
|
+
if (mcpTools.length > 0) {
|
|
118
|
+
toolsWithCache.push(...mcpTools);
|
|
119
|
+
toolsWithCache.push({ cachePoint: { type: 'default' } });
|
|
120
|
+
}
|
|
121
|
+
// If no tools at all (shouldn't happen but safety check)
|
|
122
|
+
if (toolsWithCache.length === 0) {
|
|
123
|
+
toolsWithCache.push({ cachePoint: { type: 'default' } });
|
|
124
|
+
}
|
|
125
|
+
params.toolConfig.tools = toolsWithCache;
|
|
126
|
+
}
|
|
127
|
+
// Add serviceTier support
|
|
128
|
+
const serviceTierType = options?.serviceTier ?? this.serviceTier;
|
|
129
|
+
return {
|
|
130
|
+
...params,
|
|
131
|
+
serviceTier: serviceTierType ? { type: serviceTierType } : undefined,
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Override _generateNonStreaming to use applicationInferenceProfile as modelId.
|
|
136
|
+
* Uses the same model-swapping pattern as streaming for consistency.
|
|
137
|
+
*/
|
|
138
|
+
async _generateNonStreaming(messages, options, runManager) {
|
|
139
|
+
// Temporarily swap model for applicationInferenceProfile support
|
|
140
|
+
const originalModel = this.model;
|
|
141
|
+
if (this.applicationInferenceProfile != null &&
|
|
142
|
+
this.applicationInferenceProfile !== '') {
|
|
143
|
+
this.model = this.applicationInferenceProfile;
|
|
144
|
+
}
|
|
145
|
+
try {
|
|
146
|
+
return await super._generateNonStreaming(messages, options, runManager);
|
|
147
|
+
}
|
|
148
|
+
finally {
|
|
149
|
+
// Restore original model
|
|
150
|
+
this.model = originalModel;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
/**
|
|
154
|
+
* Override _streamResponseChunks to:
|
|
155
|
+
* 1. Use applicationInferenceProfile as modelId (by temporarily swapping this.model)
|
|
156
|
+
* 2. Strip contentBlockIndex from response_metadata to prevent merge conflicts
|
|
157
|
+
* 3. Extract cacheReadInputTokens/cacheWriteInputTokens and add to usage_metadata
|
|
158
|
+
*
|
|
159
|
+
* Note: We delegate to super._streamResponseChunks() to preserve @langchain/aws's
|
|
160
|
+
* internal chunk handling which correctly preserves array content for reasoning blocks.
|
|
161
|
+
*/
|
|
162
|
+
async *_streamResponseChunks(messages, options, runManager) {
|
|
163
|
+
// Temporarily swap model for applicationInferenceProfile support
|
|
164
|
+
const originalModel = this.model;
|
|
165
|
+
if (this.applicationInferenceProfile != null &&
|
|
166
|
+
this.applicationInferenceProfile !== '') {
|
|
167
|
+
this.model = this.applicationInferenceProfile;
|
|
168
|
+
}
|
|
169
|
+
try {
|
|
170
|
+
// Use parent's streaming logic which correctly handles reasoning content
|
|
171
|
+
const baseStream = super._streamResponseChunks(messages, options, runManager);
|
|
172
|
+
for await (const chunk of baseStream) {
|
|
173
|
+
// Clean and enhance chunk
|
|
174
|
+
yield this.processChunk(chunk);
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
finally {
|
|
178
|
+
// Restore original model
|
|
179
|
+
this.model = originalModel;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Process a chunk by:
|
|
184
|
+
* 1. Removing contentBlockIndex from response_metadata
|
|
185
|
+
* 2. Extracting cache token information from Bedrock's usage data
|
|
186
|
+
*/
|
|
187
|
+
processChunk(chunk) {
|
|
188
|
+
const message = chunk.message;
|
|
189
|
+
if (!(message instanceof messages.AIMessageChunk)) {
|
|
190
|
+
return chunk;
|
|
191
|
+
}
|
|
192
|
+
const responseMetadata = message.response_metadata;
|
|
193
|
+
let needsModification = false;
|
|
194
|
+
let cleanedMetadata = responseMetadata;
|
|
195
|
+
// Check if contentBlockIndex exists anywhere in response_metadata
|
|
196
|
+
const hasContentBlockIndex = this.hasContentBlockIndex(responseMetadata);
|
|
197
|
+
if (hasContentBlockIndex) {
|
|
198
|
+
cleanedMetadata = this.removeContentBlockIndex(responseMetadata);
|
|
199
|
+
needsModification = true;
|
|
200
|
+
}
|
|
201
|
+
// Extract cache tokens from metadata.usage (Bedrock streaming format)
|
|
202
|
+
// The metadata chunk contains usage with cacheReadInputTokens/cacheWriteInputTokens
|
|
203
|
+
const metadata = responseMetadata.metadata;
|
|
204
|
+
const usage = (metadata?.usage ?? responseMetadata.usage);
|
|
205
|
+
let enhancedUsageMetadata = message.usage_metadata;
|
|
206
|
+
if (usage) {
|
|
207
|
+
const cacheRead = usage.cacheReadInputTokens ?? 0;
|
|
208
|
+
const cacheWrite = usage.cacheWriteInputTokens ?? 0;
|
|
209
|
+
const inputTokens = usage.inputTokens ?? 0;
|
|
210
|
+
const outputTokens = usage.outputTokens ?? 0;
|
|
211
|
+
if (cacheRead > 0 || cacheWrite > 0) {
|
|
212
|
+
needsModification = true;
|
|
213
|
+
enhancedUsageMetadata = {
|
|
214
|
+
input_tokens: message.usage_metadata?.input_tokens ?? inputTokens,
|
|
215
|
+
output_tokens: message.usage_metadata?.output_tokens ?? outputTokens,
|
|
216
|
+
total_tokens: message.usage_metadata?.total_tokens ??
|
|
217
|
+
usage.totalTokens ??
|
|
218
|
+
0,
|
|
219
|
+
input_token_details: {
|
|
220
|
+
cache_read: cacheRead,
|
|
221
|
+
cache_creation: cacheWrite,
|
|
222
|
+
},
|
|
223
|
+
};
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
if (needsModification) {
|
|
227
|
+
return new outputs.ChatGenerationChunk({
|
|
228
|
+
text: chunk.text,
|
|
229
|
+
message: new messages.AIMessageChunk({
|
|
230
|
+
...message,
|
|
231
|
+
response_metadata: cleanedMetadata,
|
|
232
|
+
usage_metadata: enhancedUsageMetadata,
|
|
233
|
+
}),
|
|
234
|
+
generationInfo: chunk.generationInfo,
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
return chunk;
|
|
238
|
+
}
|
|
239
|
+
/**
|
|
240
|
+
* Check if contentBlockIndex exists at any level in the object
|
|
241
|
+
*/
|
|
242
|
+
hasContentBlockIndex(obj) {
|
|
243
|
+
if (obj === null || obj === undefined || typeof obj !== 'object') {
|
|
244
|
+
return false;
|
|
245
|
+
}
|
|
246
|
+
if ('contentBlockIndex' in obj) {
|
|
247
|
+
return true;
|
|
248
|
+
}
|
|
249
|
+
for (const value of Object.values(obj)) {
|
|
250
|
+
if (typeof value === 'object' && value !== null) {
|
|
251
|
+
if (this.hasContentBlockIndex(value)) {
|
|
252
|
+
return true;
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
return false;
|
|
257
|
+
}
|
|
258
|
+
/**
|
|
259
|
+
* Recursively remove contentBlockIndex from all levels of an object
|
|
260
|
+
*/
|
|
261
|
+
removeContentBlockIndex(obj) {
|
|
262
|
+
if (obj === null || obj === undefined) {
|
|
263
|
+
return obj;
|
|
264
|
+
}
|
|
265
|
+
if (Array.isArray(obj)) {
|
|
266
|
+
return obj.map((item) => this.removeContentBlockIndex(item));
|
|
267
|
+
}
|
|
268
|
+
if (typeof obj === 'object') {
|
|
269
|
+
const cleaned = {};
|
|
270
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
271
|
+
if (key !== 'contentBlockIndex') {
|
|
272
|
+
cleaned[key] = this.removeContentBlockIndex(value);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
return cleaned;
|
|
276
|
+
}
|
|
277
|
+
return obj;
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
exports.CustomChatBedrockConverse = CustomChatBedrockConverse;
|
|
282
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs","sources":["../../../../src/llm/bedrock/index.ts"],"sourcesContent":["/**\n * Optimized ChatBedrockConverse wrapper that fixes contentBlockIndex conflicts\n * and adds support for:\n *\n * - Prompt caching support for Bedrock Converse API (Illuma feature)\n * - Application Inference Profiles (PR #9129)\n * - Service Tiers (Priority/Standard/Flex) (PR #9785) - requires AWS SDK 3.966.0+\n *\n * Bedrock sends the same contentBlockIndex for both text and tool_use content blocks,\n * causing LangChain's merge logic to fail with \"field[contentBlockIndex] already exists\"\n * errors. This wrapper simply strips contentBlockIndex from response_metadata to avoid\n * the conflict.\n *\n * The contentBlockIndex field is only used internally by Bedrock's streaming protocol\n * and isn't needed by application logic - the index field on tool_call_chunks serves\n * the purpose of tracking tool call ordering.\n *\n * PROMPT CACHING:\n * When promptCache: true is set, this wrapper adds cachePoint markers to the tools array\n * to enable Bedrock prompt caching for tool definitions. This allows tool schemas to be\n * cached and reused across requests, reducing latency and costs.\n *\n * CACHE TOKEN EXTRACTION:\n * LangChain AWS doesn't extract cacheReadInputTokens/cacheWriteInputTokens from Bedrock's\n * response. This wrapper adds input_token_details to usage_metadata with cache information.\n */\n\nimport { ChatBedrockConverse } from '@langchain/aws';\nimport { AIMessageChunk } from '@langchain/core/messages';\nimport type { BaseMessage, UsageMetadata } from '@langchain/core/messages';\nimport { ChatGenerationChunk, ChatResult } from '@langchain/core/outputs';\nimport type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport type { ChatBedrockConverseInput } from '@langchain/aws';\n\n/**\n * Service tier type for Bedrock invocations.\n * Requires AWS SDK >= 3.966.0 to actually work.\n * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html\n */\nexport type ServiceTierType = 'priority' | 'default' | 'flex' | 'reserved';\n\n/**\n * Extended input interface with additional features:\n * - promptCache: Enable Bedrock prompt caching for tool definitions\n * - applicationInferenceProfile: Use an inference profile ARN instead of model ID\n * - serviceTier: Specify service tier (Priority, Standard, Flex, Reserved)\n */\nexport interface CustomChatBedrockConverseInput\n extends ChatBedrockConverseInput {\n /**\n * Enable Bedrock prompt caching for tool definitions.\n * When true, adds cachePoint markers to tools array.\n */\n promptCache?: boolean;\n\n /**\n * Application Inference Profile ARN to use for the model.\n * For example, \"arn:aws:bedrock:eu-west-1:123456789102:application-inference-profile/fm16bt65tzgx\"\n * When provided, this ARN will be used for the actual inference calls instead of the model ID.\n * Must still provide `model` as normal modelId to benefit from all the metadata.\n * @see https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-create.html\n */\n applicationInferenceProfile?: string;\n\n /**\n * Service tier for model invocation.\n * Specifies the processing tier type used for serving the request.\n * Supported values are 'priority', 'default', 'flex', and 'reserved'.\n *\n * - 'priority': Prioritized processing for lower latency\n * - 'default': Standard processing tier\n * - 'flex': Flexible processing tier with lower cost\n * - 'reserved': Reserved capacity for consistent performance\n *\n * If not provided, AWS uses the default tier.\n * Note: Requires AWS SDK >= 3.966.0 to work.\n * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html\n */\n serviceTier?: ServiceTierType;\n}\n\n/**\n * Extended call options with serviceTier override support.\n */\nexport interface CustomChatBedrockConverseCallOptions {\n serviceTier?: ServiceTierType;\n}\n\nexport class CustomChatBedrockConverse extends ChatBedrockConverse {\n /** Enable Bedrock prompt caching for tool definitions */\n promptCache: boolean;\n\n /** Application Inference Profile ARN to use instead of model ID */\n applicationInferenceProfile?: string;\n\n /** Service tier for model invocation */\n serviceTier?: ServiceTierType;\n\n constructor(fields?: CustomChatBedrockConverseInput) {\n super(fields);\n this.promptCache = fields?.promptCache ?? false;\n this.applicationInferenceProfile = fields?.applicationInferenceProfile;\n this.serviceTier = fields?.serviceTier;\n\n // Fix: Force supportsToolChoiceValues for Claude models\n // The parent constructor checks `model.includes('claude-3')` but this fails when:\n // 1. Using applicationInferenceProfile ARNs (arn:aws:bedrock:...)\n // 2. Using different naming conventions (claude-4, claude-opus-4, etc.)\n // We need to ensure tool_choice is properly set for withStructuredOutput to work\n const modelName = (fields?.model ?? '').toLowerCase();\n const profileName = (fields?.applicationInferenceProfile ?? '').toLowerCase();\n const isClaudeModel =\n modelName.includes('claude') ||\n modelName.includes('anthropic') ||\n profileName.includes('claude') ||\n profileName.includes('anthropic');\n\n if (isClaudeModel && !this.supportsToolChoiceValues?.length) {\n // Claude models support all tool choice values\n this.supportsToolChoiceValues = ['auto', 'any', 'tool'];\n }\n }\n\n static lc_name(): string {\n return 'IllumaBedrockConverse';\n }\n\n /**\n * Get the model ID to use for API calls.\n * Returns applicationInferenceProfile if set, otherwise returns this.model.\n */\n protected getModelId(): string {\n return this.applicationInferenceProfile ?? this.model;\n }\n\n /**\n * Override invocationParams to:\n * 1. Add cachePoint to tools when promptCache is enabled\n * 2. Add serviceTier support\n *\n * CACHING STRATEGY: Separate cachePoints for core tools and MCP tools\n * - Core tools (web_search, execute_code, etc.) are stable → cache first\n * - MCP tools (have '_mcp_' in name) are dynamic → cache separately after\n * - This allows core tools to stay cached when MCP selection changes\n *\n * NOTE: Only Claude models support cachePoint - Nova and other models will reject it.\n */\n override invocationParams(\n options?: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions\n ): ReturnType<ChatBedrockConverse['invocationParams']> & {\n serviceTier?: { type: ServiceTierType };\n } {\n const params = super.invocationParams(options);\n\n // Add cachePoint to tools array if promptCache is enabled and tools exist\n // Only Claude models support cachePoint - check model name\n const modelId = this.model.toLowerCase();\n const isClaudeModel =\n modelId.includes('claude') || modelId.includes('anthropic');\n\n if (\n this.promptCache &&\n isClaudeModel &&\n params.toolConfig?.tools &&\n Array.isArray(params.toolConfig.tools) &&\n params.toolConfig.tools.length > 0\n ) {\n // Separate core tools from MCP tools\n // MCP tools have '_mcp_' in their name (e.g., 'search_emails_mcp_Google-Workspace')\n const coreTools: typeof params.toolConfig.tools = [];\n const mcpTools: typeof params.toolConfig.tools = [];\n\n for (const tool of params.toolConfig.tools) {\n // Check if tool has a name property with '_mcp_' pattern\n const toolName =\n (tool as { toolSpec?: { name?: string } }).toolSpec?.name ?? '';\n if (toolName.includes('_mcp_')) {\n mcpTools.push(tool);\n } else {\n coreTools.push(tool);\n }\n }\n\n // Build tools array with strategic cachePoints:\n // [CoreTool1, CoreTool2, cachePoint] + [MCPTool1, MCPTool2, cachePoint]\n const toolsWithCache: typeof params.toolConfig.tools = [];\n\n // Add core tools with cachePoint (if any)\n if (coreTools.length > 0) {\n toolsWithCache.push(...coreTools);\n toolsWithCache.push({ cachePoint: { type: 'default' } });\n }\n\n // Add MCP tools with their own cachePoint (if any)\n if (mcpTools.length > 0) {\n toolsWithCache.push(...mcpTools);\n toolsWithCache.push({ cachePoint: { type: 'default' } });\n }\n\n // If no tools at all (shouldn't happen but safety check)\n if (toolsWithCache.length === 0) {\n toolsWithCache.push({ cachePoint: { type: 'default' } });\n }\n\n params.toolConfig.tools = toolsWithCache;\n }\n\n // Add serviceTier support\n const serviceTierType = options?.serviceTier ?? this.serviceTier;\n\n return {\n ...params,\n serviceTier: serviceTierType ? { type: serviceTierType } : undefined,\n };\n }\n\n /**\n * Override _generateNonStreaming to use applicationInferenceProfile as modelId.\n * Uses the same model-swapping pattern as streaming for consistency.\n */\n override async _generateNonStreaming(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions,\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n // Temporarily swap model for applicationInferenceProfile support\n const originalModel = this.model;\n if (\n this.applicationInferenceProfile != null &&\n this.applicationInferenceProfile !== ''\n ) {\n this.model = this.applicationInferenceProfile;\n }\n\n try {\n return await super._generateNonStreaming(messages, options, runManager);\n } finally {\n // Restore original model\n this.model = originalModel;\n }\n }\n\n /**\n * Override _streamResponseChunks to:\n * 1. Use applicationInferenceProfile as modelId (by temporarily swapping this.model)\n * 2. Strip contentBlockIndex from response_metadata to prevent merge conflicts\n * 3. Extract cacheReadInputTokens/cacheWriteInputTokens and add to usage_metadata\n *\n * Note: We delegate to super._streamResponseChunks() to preserve @langchain/aws's\n * internal chunk handling which correctly preserves array content for reasoning blocks.\n */\n override async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions,\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n // Temporarily swap model for applicationInferenceProfile support\n const originalModel = this.model;\n if (\n this.applicationInferenceProfile != null &&\n this.applicationInferenceProfile !== ''\n ) {\n this.model = this.applicationInferenceProfile;\n }\n\n try {\n // Use parent's streaming logic which correctly handles reasoning content\n const baseStream = super._streamResponseChunks(\n messages,\n options,\n runManager\n );\n\n for await (const chunk of baseStream) {\n // Clean and enhance chunk\n yield this.processChunk(chunk);\n }\n } finally {\n // Restore original model\n this.model = originalModel;\n }\n }\n\n /**\n * Process a chunk by:\n * 1. Removing contentBlockIndex from response_metadata\n * 2. Extracting cache token information from Bedrock's usage data\n */\n private processChunk(chunk: ChatGenerationChunk): ChatGenerationChunk {\n const message = chunk.message;\n if (!(message instanceof AIMessageChunk)) {\n return chunk;\n }\n\n const responseMetadata = message.response_metadata as Record<\n string,\n unknown\n >;\n let needsModification = false;\n let cleanedMetadata = responseMetadata;\n\n // Check if contentBlockIndex exists anywhere in response_metadata\n const hasContentBlockIndex = this.hasContentBlockIndex(responseMetadata);\n if (hasContentBlockIndex) {\n cleanedMetadata = this.removeContentBlockIndex(\n responseMetadata\n ) as Record<string, unknown>;\n needsModification = true;\n }\n\n // Extract cache tokens from metadata.usage (Bedrock streaming format)\n // The metadata chunk contains usage with cacheReadInputTokens/cacheWriteInputTokens\n const metadata = responseMetadata.metadata as\n | Record<string, unknown>\n | undefined;\n const usage = (metadata?.usage ?? responseMetadata.usage) as\n | Record<string, unknown>\n | undefined;\n\n let enhancedUsageMetadata: UsageMetadata | undefined =\n message.usage_metadata;\n\n if (usage) {\n const cacheRead = (usage.cacheReadInputTokens as number | undefined) ?? 0;\n const cacheWrite =\n (usage.cacheWriteInputTokens as number | undefined) ?? 0;\n const inputTokens = (usage.inputTokens as number | undefined) ?? 0;\n const outputTokens = (usage.outputTokens as number | undefined) ?? 0;\n\n if (cacheRead > 0 || cacheWrite > 0) {\n needsModification = true;\n enhancedUsageMetadata = {\n input_tokens: message.usage_metadata?.input_tokens ?? inputTokens,\n output_tokens: message.usage_metadata?.output_tokens ?? outputTokens,\n total_tokens:\n message.usage_metadata?.total_tokens ??\n (usage.totalTokens as number | undefined) ??\n 0,\n input_token_details: {\n cache_read: cacheRead,\n cache_creation: cacheWrite,\n },\n };\n }\n }\n\n if (needsModification) {\n return new ChatGenerationChunk({\n text: chunk.text,\n message: new AIMessageChunk({\n ...message,\n response_metadata: cleanedMetadata,\n usage_metadata: enhancedUsageMetadata,\n }),\n generationInfo: chunk.generationInfo,\n });\n }\n\n return chunk;\n }\n\n /**\n * Check if contentBlockIndex exists at any level in the object\n */\n private hasContentBlockIndex(obj: unknown): boolean {\n if (obj === null || obj === undefined || typeof obj !== 'object') {\n return false;\n }\n\n if ('contentBlockIndex' in obj) {\n return true;\n }\n\n for (const value of Object.values(obj)) {\n if (typeof value === 'object' && value !== null) {\n if (this.hasContentBlockIndex(value)) {\n return true;\n }\n }\n }\n\n return false;\n }\n\n /**\n * Recursively remove contentBlockIndex from all levels of an object\n */\n private removeContentBlockIndex(obj: unknown): unknown {\n if (obj === null || obj === undefined) {\n return obj;\n }\n\n if (Array.isArray(obj)) {\n return obj.map((item) => this.removeContentBlockIndex(item));\n }\n\n if (typeof obj === 'object') {\n const cleaned: Record<string, unknown> = {};\n for (const [key, value] of Object.entries(obj)) {\n if (key !== 'contentBlockIndex') {\n cleaned[key] = this.removeContentBlockIndex(value);\n }\n }\n return cleaned;\n }\n\n return obj;\n }\n}\n\nexport type { ChatBedrockConverseInput };\n"],"names":["ChatBedrockConverse","AIMessageChunk","ChatGenerationChunk"],"mappings":";;;;;;AAAA;;;;;;;;;;;;;;;;;;;;;;;;;AAyBG;AA+DG,MAAO,yBAA0B,SAAQA,uBAAmB,CAAA;;AAEhE,IAAA,WAAW;;AAGX,IAAA,2BAA2B;;AAG3B,IAAA,WAAW;AAEX,IAAA,WAAA,CAAY,MAAuC,EAAA;QACjD,KAAK,CAAC,MAAM,CAAC;QACb,IAAI,CAAC,WAAW,GAAG,MAAM,EAAE,WAAW,IAAI,KAAK;AAC/C,QAAA,IAAI,CAAC,2BAA2B,GAAG,MAAM,EAAE,2BAA2B;AACtE,QAAA,IAAI,CAAC,WAAW,GAAG,MAAM,EAAE,WAAW;;;;;;AAOtC,QAAA,MAAM,SAAS,GAAG,CAAC,MAAM,EAAE,KAAK,IAAI,EAAE,EAAE,WAAW,EAAE;AACrD,QAAA,MAAM,WAAW,GAAG,CAAC,MAAM,EAAE,2BAA2B,IAAI,EAAE,EAAE,WAAW,EAAE;AAC7E,QAAA,MAAM,aAAa,GACjB,SAAS,CAAC,QAAQ,CAAC,QAAQ,CAAC;AAC5B,YAAA,SAAS,CAAC,QAAQ,CAAC,WAAW,CAAC;AAC/B,YAAA,WAAW,CAAC,QAAQ,CAAC,QAAQ,CAAC;AAC9B,YAAA,WAAW,CAAC,QAAQ,CAAC,WAAW,CAAC;QAEnC,IAAI,aAAa,IAAI,CAAC,IAAI,CAAC,wBAAwB,EAAE,MAAM,EAAE;;YAE3D,IAAI,CAAC,wBAAwB,GAAG,CAAC,MAAM,EAAE,KAAK,EAAE,MAAM,CAAC;;;AAI3D,IAAA,OAAO,OAAO,GAAA;AACZ,QAAA,OAAO,uBAAuB;;AAGhC;;;AAGG;IACO,UAAU,GAAA;AAClB,QAAA,OAAO,IAAI,CAAC,2BAA2B,IAAI,IAAI,CAAC,KAAK;;AAGvD;;;;;;;;;;;AAWG;AACM,IAAA,gBAAgB,CACvB,OAA0E,EAAA;QAI1E,MAAM,MAAM,GAAG,KAAK,CAAC,gBAAgB,CAAC,OAAO,CAAC;;;QAI9C,MAAM,OAAO,GAAG,IAAI,CAAC,KAAK,CAAC,WAAW,EAAE;AACxC,QAAA,MAAM,aAAa,GACjB,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAC,IAAI,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAC;QAE7D,IACE,IAAI,CAAC,WAAW;YAChB,aAAa;YACb,MAAM,CAAC,UAAU,EAAE,KAAK;YACxB,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC;YACtC,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,EAClC;;;YAGA,MAAM,SAAS,GAAmC,EAAE;YACpD,MAAM,QAAQ,GAAmC,EAAE;YAEnD,KAAK,MAAM,IAAI,IAAI,MAAM,CAAC,UAAU,CAAC,KAAK,EAAE;;gBAE1C,MAAM,QAAQ,GACX,IAAyC,CAAC,QAAQ,EAAE,IAAI,IAAI,EAAE;AACjE,gBAAA,IAAI,QAAQ,CAAC,QAAQ,CAAC,OAAO,CAAC,EAAE;AAC9B,oBAAA,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC;;qBACd;AACL,oBAAA,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC;;;;;YAMxB,MAAM,cAAc,GAAmC,EAAE;;AAGzD,YAAA,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE;AACxB,gBAAA,cAAc,CAAC,IAAI,CAAC,GAAG,SAAS,CAAC;AACjC,gBAAA,cAAc,CAAC,IAAI,CAAC,EAAE,UAAU,EAAE,EAAE,IAAI,EAAE,SAAS,EAAE,EAAE,CAAC;;;AAI1D,YAAA,IAAI,QAAQ,CAAC,MAAM,GAAG,CAAC,EAAE;AACvB,gBAAA,cAAc,CAAC,IAAI,CAAC,GAAG,QAAQ,CAAC;AAChC,gBAAA,cAAc,CAAC,IAAI,CAAC,EAAE,UAAU,EAAE,EAAE,IAAI,EAAE,SAAS,EAAE,EAAE,CAAC;;;AAI1D,YAAA,IAAI,cAAc,CAAC,MAAM,KAAK,CAAC,EAAE;AAC/B,gBAAA,cAAc,CAAC,IAAI,CAAC,EAAE,UAAU,EAAE,EAAE,IAAI,EAAE,SAAS,EAAE,EAAE,CAAC;;AAG1D,YAAA,MAAM,CAAC,UAAU,CAAC,KAAK,GAAG,cAAc;;;QAI1C,MAAM,eAAe,GAAG,OAAO,EAAE,WAAW,IAAI,IAAI,CAAC,WAAW;QAEhE,OAAO;AACL,YAAA,GAAG,MAAM;AACT,YAAA,WAAW,EAAE,eAAe,GAAG,EAAE,IAAI,EAAE,eAAe,EAAE,GAAG,SAAS;SACrE;;AAGH;;;AAGG;AACM,IAAA,MAAM,qBAAqB,CAClC,QAAuB,EACvB,OAAyE,EACzE,UAAqC,EAAA;;AAGrC,QAAA,MAAM,aAAa,GAAG,IAAI,CAAC,KAAK;AAChC,QAAA,IACE,IAAI,CAAC,2BAA2B,IAAI,IAAI;AACxC,YAAA,IAAI,CAAC,2BAA2B,KAAK,EAAE,EACvC;AACA,YAAA,IAAI,CAAC,KAAK,GAAG,IAAI,CAAC,2BAA2B;;AAG/C,QAAA,IAAI;YACF,OAAO,MAAM,KAAK,CAAC,qBAAqB,CAAC,QAAQ,EAAE,OAAO,EAAE,UAAU,CAAC;;gBAC/D;;AAER,YAAA,IAAI,CAAC,KAAK,GAAG,aAAa;;;AAI9B;;;;;;;;AAQG;IACM,OAAO,qBAAqB,CACnC,QAAuB,EACvB,OAAyE,EACzE,UAAqC,EAAA;;AAGrC,QAAA,MAAM,aAAa,GAAG,IAAI,CAAC,KAAK;AAChC,QAAA,IACE,IAAI,CAAC,2BAA2B,IAAI,IAAI;AACxC,YAAA,IAAI,CAAC,2BAA2B,KAAK,EAAE,EACvC;AACA,YAAA,IAAI,CAAC,KAAK,GAAG,IAAI,CAAC,2BAA2B;;AAG/C,QAAA,IAAI;;AAEF,YAAA,MAAM,UAAU,GAAG,KAAK,CAAC,qBAAqB,CAC5C,QAAQ,EACR,OAAO,EACP,UAAU,CACX;AAED,YAAA,WAAW,MAAM,KAAK,IAAI,UAAU,EAAE;;AAEpC,gBAAA,MAAM,IAAI,CAAC,YAAY,CAAC,KAAK,CAAC;;;gBAExB;;AAER,YAAA,IAAI,CAAC,KAAK,GAAG,aAAa;;;AAI9B;;;;AAIG;AACK,IAAA,YAAY,CAAC,KAA0B,EAAA;AAC7C,QAAA,MAAM,OAAO,GAAG,KAAK,CAAC,OAAO;AAC7B,QAAA,IAAI,EAAE,OAAO,YAAYC,uBAAc,CAAC,EAAE;AACxC,YAAA,OAAO,KAAK;;AAGd,QAAA,MAAM,gBAAgB,GAAG,OAAO,CAAC,iBAGhC;QACD,IAAI,iBAAiB,GAAG,KAAK;QAC7B,IAAI,eAAe,GAAG,gBAAgB;;QAGtC,MAAM,oBAAoB,GAAG,IAAI,CAAC,oBAAoB,CAAC,gBAAgB,CAAC;QACxE,IAAI,oBAAoB,EAAE;AACxB,YAAA,eAAe,GAAG,IAAI,CAAC,uBAAuB,CAC5C,gBAAgB,CACU;YAC5B,iBAAiB,GAAG,IAAI;;;;AAK1B,QAAA,MAAM,QAAQ,GAAG,gBAAgB,CAAC,QAErB;QACb,MAAM,KAAK,IAAI,QAAQ,EAAE,KAAK,IAAI,gBAAgB,CAAC,KAAK,CAE3C;AAEb,QAAA,IAAI,qBAAqB,GACvB,OAAO,CAAC,cAAc;QAExB,IAAI,KAAK,EAAE;AACT,YAAA,MAAM,SAAS,GAAI,KAAK,CAAC,oBAA2C,IAAI,CAAC;AACzE,YAAA,MAAM,UAAU,GACb,KAAK,CAAC,qBAA4C,IAAI,CAAC;AAC1D,YAAA,MAAM,WAAW,GAAI,KAAK,CAAC,WAAkC,IAAI,CAAC;AAClE,YAAA,MAAM,YAAY,GAAI,KAAK,CAAC,YAAmC,IAAI,CAAC;YAEpE,IAAI,SAAS,GAAG,CAAC,IAAI,UAAU,GAAG,CAAC,EAAE;gBACnC,iBAAiB,GAAG,IAAI;AACxB,gBAAA,qBAAqB,GAAG;AACtB,oBAAA,YAAY,EAAE,OAAO,CAAC,cAAc,EAAE,YAAY,IAAI,WAAW;AACjE,oBAAA,aAAa,EAAE,OAAO,CAAC,cAAc,EAAE,aAAa,IAAI,YAAY;AACpE,oBAAA,YAAY,EACV,OAAO,CAAC,cAAc,EAAE,YAAY;AACnC,wBAAA,KAAK,CAAC,WAAkC;wBACzC,CAAC;AACH,oBAAA,mBAAmB,EAAE;AACnB,wBAAA,UAAU,EAAE,SAAS;AACrB,wBAAA,cAAc,EAAE,UAAU;AAC3B,qBAAA;iBACF;;;QAIL,IAAI,iBAAiB,EAAE;YACrB,OAAO,IAAIC,2BAAmB,CAAC;gBAC7B,IAAI,EAAE,KAAK,CAAC,IAAI;gBAChB,OAAO,EAAE,IAAID,uBAAc,CAAC;AAC1B,oBAAA,GAAG,OAAO;AACV,oBAAA,iBAAiB,EAAE,eAAe;AAClC,oBAAA,cAAc,EAAE,qBAAqB;iBACtC,CAAC;gBACF,cAAc,EAAE,KAAK,CAAC,cAAc;AACrC,aAAA,CAAC;;AAGJ,QAAA,OAAO,KAAK;;AAGd;;AAEG;AACK,IAAA,oBAAoB,CAAC,GAAY,EAAA;AACvC,QAAA,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,KAAK,SAAS,IAAI,OAAO,GAAG,KAAK,QAAQ,EAAE;AAChE,YAAA,OAAO,KAAK;;AAGd,QAAA,IAAI,mBAAmB,IAAI,GAAG,EAAE;AAC9B,YAAA,OAAO,IAAI;;QAGb,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE;YACtC,IAAI,OAAO,KAAK,KAAK,QAAQ,IAAI,KAAK,KAAK,IAAI,EAAE;AAC/C,gBAAA,IAAI,IAAI,CAAC,oBAAoB,CAAC,KAAK,CAAC,EAAE;AACpC,oBAAA,OAAO,IAAI;;;;AAKjB,QAAA,OAAO,KAAK;;AAGd;;AAEG;AACK,IAAA,uBAAuB,CAAC,GAAY,EAAA;QAC1C,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,KAAK,SAAS,EAAE;AACrC,YAAA,OAAO,GAAG;;AAGZ,QAAA,IAAI,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE;AACtB,YAAA,OAAO,GAAG,CAAC,GAAG,CAAC,CAAC,IAAI,KAAK,IAAI,CAAC,uBAAuB,CAAC,IAAI,CAAC,CAAC;;AAG9D,QAAA,IAAI,OAAO,GAAG,KAAK,QAAQ,EAAE;YAC3B,MAAM,OAAO,GAA4B,EAAE;AAC3C,YAAA,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE;AAC9C,gBAAA,IAAI,GAAG,KAAK,mBAAmB,EAAE;oBAC/B,OAAO,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,uBAAuB,CAAC,KAAK,CAAC;;;AAGtD,YAAA,OAAO,OAAO;;AAGhB,QAAA,OAAO,GAAG;;AAEb;;;;"}
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var outputs = require('@langchain/core/outputs');
|
|
4
|
+
var messages = require('@langchain/core/messages');
|
|
5
|
+
var testing = require('@langchain/core/utils/testing');
|
|
6
|
+
|
|
7
|
+
class FakeChatModel extends testing.FakeListChatModel {
|
|
8
|
+
splitStrategy;
|
|
9
|
+
toolCalls = [];
|
|
10
|
+
addedToolCalls = false;
|
|
11
|
+
constructor({ responses, sleep, emitCustomEvent, splitStrategy = { type: 'regex', value: /(?<=\s+)|(?=\s+)/ }, toolCalls = [] }) {
|
|
12
|
+
super({ responses, sleep, emitCustomEvent });
|
|
13
|
+
this.splitStrategy = splitStrategy;
|
|
14
|
+
this.toolCalls = toolCalls;
|
|
15
|
+
}
|
|
16
|
+
splitText(text) {
|
|
17
|
+
if (this.splitStrategy.type === 'regex') {
|
|
18
|
+
return text.split(this.splitStrategy.value);
|
|
19
|
+
}
|
|
20
|
+
else {
|
|
21
|
+
const chunkSize = this.splitStrategy.value;
|
|
22
|
+
const chunks = [];
|
|
23
|
+
for (let i = 0; i < text.length; i += chunkSize) {
|
|
24
|
+
chunks.push(text.slice(i, i + chunkSize));
|
|
25
|
+
}
|
|
26
|
+
return chunks;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
_createResponseChunk(text, tool_call_chunks) {
|
|
30
|
+
return new outputs.ChatGenerationChunk({
|
|
31
|
+
text,
|
|
32
|
+
generationInfo: {},
|
|
33
|
+
message: new messages.AIMessageChunk({
|
|
34
|
+
content: text,
|
|
35
|
+
tool_call_chunks,
|
|
36
|
+
additional_kwargs: tool_call_chunks ? {
|
|
37
|
+
tool_calls: tool_call_chunks.map((toolCall) => ({
|
|
38
|
+
index: toolCall.index ?? 0,
|
|
39
|
+
id: toolCall.id ?? '',
|
|
40
|
+
type: 'function',
|
|
41
|
+
function: {
|
|
42
|
+
name: toolCall.name ?? '',
|
|
43
|
+
arguments: toolCall.args ?? '',
|
|
44
|
+
},
|
|
45
|
+
})),
|
|
46
|
+
} : undefined,
|
|
47
|
+
})
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
async *_streamResponseChunks(_messages, options, runManager) {
|
|
51
|
+
const response = this._currentResponse();
|
|
52
|
+
this._incrementResponse();
|
|
53
|
+
if (this.emitCustomEvent) {
|
|
54
|
+
await runManager?.handleCustomEvent('some_test_event', {
|
|
55
|
+
someval: true,
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
const chunks = this.splitText(response);
|
|
59
|
+
for await (const chunk of chunks) {
|
|
60
|
+
await this._sleepIfRequested();
|
|
61
|
+
if (options.thrownErrorString != null && options.thrownErrorString) {
|
|
62
|
+
throw new Error(options.thrownErrorString);
|
|
63
|
+
}
|
|
64
|
+
const responseChunk = super._createResponseChunk(chunk);
|
|
65
|
+
yield responseChunk;
|
|
66
|
+
void runManager?.handleLLMNewToken(chunk);
|
|
67
|
+
}
|
|
68
|
+
await this._sleepIfRequested();
|
|
69
|
+
if (this.toolCalls.length > 0 && !this.addedToolCalls) {
|
|
70
|
+
this.addedToolCalls = true;
|
|
71
|
+
const toolCallChunks = this.toolCalls.map((toolCall) => {
|
|
72
|
+
return {
|
|
73
|
+
name: toolCall.name,
|
|
74
|
+
args: JSON.stringify(toolCall.args),
|
|
75
|
+
id: toolCall.id,
|
|
76
|
+
type: 'tool_call_chunk',
|
|
77
|
+
};
|
|
78
|
+
});
|
|
79
|
+
const responseChunk = this._createResponseChunk('', toolCallChunks);
|
|
80
|
+
yield responseChunk;
|
|
81
|
+
void runManager?.handleLLMNewToken('');
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
function createFakeStreamingLLM({ responses, sleep, splitStrategy, toolCalls, }) {
|
|
86
|
+
return new FakeChatModel({
|
|
87
|
+
sleep,
|
|
88
|
+
responses,
|
|
89
|
+
emitCustomEvent: true,
|
|
90
|
+
splitStrategy,
|
|
91
|
+
toolCalls,
|
|
92
|
+
});
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
exports.FakeChatModel = FakeChatModel;
|
|
96
|
+
exports.createFakeStreamingLLM = createFakeStreamingLLM;
|
|
97
|
+
//# sourceMappingURL=fake.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"fake.cjs","sources":["../../../src/llm/fake.ts"],"sourcesContent":["import { ChatGenerationChunk } from '@langchain/core/outputs';\nimport { AIMessageChunk } from '@langchain/core/messages';\nimport type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport { FakeListChatModel } from '@langchain/core/utils/testing';\nimport { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';\n\ntype SplitStrategy = {\n type: 'regex' | 'fixed';\n value: RegExp | number;\n};\n\nexport class FakeChatModel extends FakeListChatModel {\n private splitStrategy: SplitStrategy;\n private toolCalls: ToolCall[] = [];\n private addedToolCalls: boolean = false;\n\n constructor({\n responses,\n sleep,\n emitCustomEvent,\n splitStrategy = { type: 'regex', value: /(?<=\\s+)|(?=\\s+)/ },\n toolCalls = []\n }: {\n responses: string[];\n sleep?: number;\n emitCustomEvent?: boolean;\n splitStrategy?: SplitStrategy;\n toolCalls?: ToolCall[];\n }) {\n super({ responses, sleep, emitCustomEvent });\n this.splitStrategy = splitStrategy;\n this.toolCalls = toolCalls;\n }\n\n private splitText(text: string): string[] {\n if (this.splitStrategy.type === 'regex') {\n return text.split(this.splitStrategy.value as RegExp);\n } else {\n const chunkSize = this.splitStrategy.value as number;\n const chunks: string[] = [];\n for (let i = 0; i < text.length; i += chunkSize) {\n chunks.push(text.slice(i, i + chunkSize));\n }\n return chunks;\n }\n }\n _createResponseChunk(text: string, tool_call_chunks?: ToolCallChunk[]): ChatGenerationChunk {\n return new ChatGenerationChunk({\n text,\n generationInfo: {},\n message: new AIMessageChunk({\n content: text,\n tool_call_chunks,\n additional_kwargs: tool_call_chunks ? {\n tool_calls: tool_call_chunks.map((toolCall) => ({\n index: toolCall.index ?? 0,\n id: toolCall.id ?? '',\n type: 'function',\n function: {\n name: toolCall.name ?? '',\n arguments: toolCall.args ?? '',\n },\n })),\n } : undefined,\n })});\n }\n\n async *_streamResponseChunks(\n _messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const response = this._currentResponse();\n this._incrementResponse();\n\n if (this.emitCustomEvent) {\n await runManager?.handleCustomEvent('some_test_event', {\n someval: true,\n });\n }\n\n const chunks = this.splitText(response);\n for await (const chunk of chunks) {\n await this._sleepIfRequested();\n\n if (options.thrownErrorString != null && options.thrownErrorString) {\n throw new Error(options.thrownErrorString);\n }\n\n const responseChunk = super._createResponseChunk(chunk);\n yield responseChunk;\n void runManager?.handleLLMNewToken(chunk);\n }\n\n await this._sleepIfRequested();\n if (this.toolCalls.length > 0 && !this.addedToolCalls) {\n this.addedToolCalls = true;\n const toolCallChunks = this.toolCalls.map((toolCall) => {;\n return {\n name: toolCall.name,\n args: JSON.stringify(toolCall.args),\n id: toolCall.id,\n type: 'tool_call_chunk',\n } as ToolCallChunk;\n });\n const responseChunk = this._createResponseChunk('', toolCallChunks);\n yield responseChunk;\n void runManager?.handleLLMNewToken('');\n }\n }\n}\n\nexport function createFakeStreamingLLM({\n responses,\n sleep,\n splitStrategy,\n toolCalls,\n} : {\n responses: string[],\n sleep?: number,\n splitStrategy?: SplitStrategy,\n toolCalls?: ToolCall[]\n}\n): FakeChatModel {\n return new FakeChatModel({\n sleep,\n responses,\n emitCustomEvent: true,\n splitStrategy,\n toolCalls,\n });\n}\n"],"names":["FakeListChatModel","ChatGenerationChunk","AIMessageChunk"],"mappings":";;;;;;AAYM,MAAO,aAAc,SAAQA,yBAAiB,CAAA;AAC1C,IAAA,aAAa;IACb,SAAS,GAAe,EAAE;IAC1B,cAAc,GAAY,KAAK;IAEvC,WAAY,CAAA,EACV,SAAS,EACT,KAAK,EACL,eAAe,EACf,aAAa,GAAG,EAAE,IAAI,EAAE,OAAO,EAAE,KAAK,EAAE,kBAAkB,EAAE,EAC5D,SAAS,GAAG,EAAE,EAOf,EAAA;QACC,KAAK,CAAC,EAAE,SAAS,EAAE,KAAK,EAAE,eAAe,EAAE,CAAC;AAC5C,QAAA,IAAI,CAAC,aAAa,GAAG,aAAa;AAClC,QAAA,IAAI,CAAC,SAAS,GAAG,SAAS;;AAGpB,IAAA,SAAS,CAAC,IAAY,EAAA;QAC5B,IAAI,IAAI,CAAC,aAAa,CAAC,IAAI,KAAK,OAAO,EAAE;YACvC,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,aAAa,CAAC,KAAe,CAAC;;aAChD;AACL,YAAA,MAAM,SAAS,GAAG,IAAI,CAAC,aAAa,CAAC,KAAe;YACpD,MAAM,MAAM,GAAa,EAAE;AAC3B,YAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,MAAM,EAAE,CAAC,IAAI,SAAS,EAAE;AAC/C,gBAAA,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,GAAG,SAAS,CAAC,CAAC;;AAE3C,YAAA,OAAO,MAAM;;;IAGjB,oBAAoB,CAAC,IAAY,EAAE,gBAAkC,EAAA;QACnE,OAAO,IAAIC,2BAAmB,CAAC;YAC7B,IAAI;AACJ,YAAA,cAAc,EAAE,EAAE;YAClB,OAAO,EAAE,IAAIC,uBAAc,CAAC;AAC1B,gBAAA,OAAO,EAAE,IAAI;gBACb,gBAAgB;AAChB,gBAAA,iBAAiB,EAAE,gBAAgB,GAAG;oBACpC,UAAU,EAAE,gBAAgB,CAAC,GAAG,CAAC,CAAC,QAAQ,MAAM;AAC9C,wBAAA,KAAK,EAAE,QAAQ,CAAC,KAAK,IAAI,CAAC;AAC1B,wBAAA,EAAE,EAAE,QAAQ,CAAC,EAAE,IAAI,EAAE;AACrB,wBAAA,IAAI,EAAE,UAAU;AAChB,wBAAA,QAAQ,EAAE;AACR,4BAAA,IAAI,EAAE,QAAQ,CAAC,IAAI,IAAI,EAAE;AACzB,4BAAA,SAAS,EAAE,QAAQ,CAAC,IAAI,IAAI,EAAE;AAC/B,yBAAA;AACF,qBAAA,CAAC,CAAC;iBACJ,GAAG,SAAS;aACd;AAAE,SAAA,CAAC;;IAGR,OAAO,qBAAqB,CAC1B,SAAwB,EACxB,OAAkC,EAClC,UAAqC,EAAA;AAErC,QAAA,MAAM,QAAQ,GAAG,IAAI,CAAC,gBAAgB,EAAE;QACxC,IAAI,CAAC,kBAAkB,EAAE;AAEzB,QAAA,IAAI,IAAI,CAAC,eAAe,EAAE;AACxB,YAAA,MAAM,UAAU,EAAE,iBAAiB,CAAC,iBAAiB,EAAE;AACrD,gBAAA,OAAO,EAAE,IAAI;AACd,aAAA,CAAC;;QAGJ,MAAM,MAAM,GAAG,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC;AACvC,QAAA,WAAW,MAAM,KAAK,IAAI,MAAM,EAAE;AAChC,YAAA,MAAM,IAAI,CAAC,iBAAiB,EAAE;YAE9B,IAAI,OAAO,CAAC,iBAAiB,IAAI,IAAI,IAAI,OAAO,CAAC,iBAAiB,EAAE;AAClE,gBAAA,MAAM,IAAI,KAAK,CAAC,OAAO,CAAC,iBAAiB,CAAC;;YAG5C,MAAM,aAAa,GAAG,KAAK,CAAC,oBAAoB,CAAC,KAAK,CAAC;AACvD,YAAA,MAAM,aAAa;AACnB,YAAA,KAAK,UAAU,EAAE,iBAAiB,CAAC,KAAK,CAAC;;AAG3C,QAAA,MAAM,IAAI,CAAC,iBAAiB,EAAE;AAC9B,QAAA,IAAI,IAAI,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,cAAc,EAAE;AACrD,YAAA,IAAI,CAAC,cAAc,GAAG,IAAI;YAC1B,MAAM,cAAc,GAAG,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,CAAC,QAAQ,KAAI;gBACrD,OAAO;oBACL,IAAI,EAAE,QAAQ,CAAC,IAAI;oBACnB,IAAI,EAAE,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC,IAAI,CAAC;oBACnC,EAAE,EAAE,QAAQ,CAAC,EAAE;AACf,oBAAA,IAAI,EAAE,iBAAiB;iBACP;AACpB,aAAC,CAAC;YACF,MAAM,aAAa,GAAG,IAAI,CAAC,oBAAoB,CAAC,EAAE,EAAE,cAAc,CAAC;AACnE,YAAA,MAAM,aAAa;AACnB,YAAA,KAAK,UAAU,EAAE,iBAAiB,CAAC,EAAE,CAAC;;;AAG3C;AAEK,SAAU,sBAAsB,CAAC,EACrC,SAAS,EACT,KAAK,EACL,aAAa,EACb,SAAS,GAMV,EAAA;IAEC,OAAO,IAAI,aAAa,CAAC;QACvB,KAAK;QACL,SAAS;AACT,QAAA,eAAe,EAAE,IAAI;QACrB,aAAa;QACb,SAAS;AACV,KAAA,CAAC;AACJ;;;;;"}
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var messages = require('@langchain/core/messages');
|
|
4
|
+
var outputs = require('@langchain/core/outputs');
|
|
5
|
+
var googleGenai = require('@langchain/google-genai');
|
|
6
|
+
var env = require('@langchain/core/utils/env');
|
|
7
|
+
var generativeAi = require('@google/generative-ai');
|
|
8
|
+
var common = require('./utils/common.cjs');
|
|
9
|
+
|
|
10
|
+
/* eslint-disable @typescript-eslint/ban-ts-comment */
|
|
11
|
+
class CustomChatGoogleGenerativeAI extends googleGenai.ChatGoogleGenerativeAI {
|
|
12
|
+
thinkingConfig;
|
|
13
|
+
/**
|
|
14
|
+
* Override to add gemini-3 model support for multimodal and function calling thought signatures
|
|
15
|
+
*/
|
|
16
|
+
get _isMultimodalModel() {
|
|
17
|
+
return (this.model.startsWith('gemini-1.5') ||
|
|
18
|
+
this.model.startsWith('gemini-2') ||
|
|
19
|
+
(this.model.startsWith('gemma-3-') &&
|
|
20
|
+
!this.model.startsWith('gemma-3-1b')) ||
|
|
21
|
+
this.model.startsWith('gemini-3'));
|
|
22
|
+
}
|
|
23
|
+
constructor(fields) {
|
|
24
|
+
super(fields);
|
|
25
|
+
this.model = fields.model.replace(/^models\//, '');
|
|
26
|
+
this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;
|
|
27
|
+
if (this.maxOutputTokens != null && this.maxOutputTokens < 0) {
|
|
28
|
+
throw new Error('`maxOutputTokens` must be a positive integer');
|
|
29
|
+
}
|
|
30
|
+
this.temperature = fields.temperature ?? this.temperature;
|
|
31
|
+
if (this.temperature != null &&
|
|
32
|
+
(this.temperature < 0 || this.temperature > 2)) {
|
|
33
|
+
throw new Error('`temperature` must be in the range of [0.0,2.0]');
|
|
34
|
+
}
|
|
35
|
+
this.topP = fields.topP ?? this.topP;
|
|
36
|
+
if (this.topP != null && this.topP < 0) {
|
|
37
|
+
throw new Error('`topP` must be a positive integer');
|
|
38
|
+
}
|
|
39
|
+
if (this.topP != null && this.topP > 1) {
|
|
40
|
+
throw new Error('`topP` must be below 1.');
|
|
41
|
+
}
|
|
42
|
+
this.topK = fields.topK ?? this.topK;
|
|
43
|
+
if (this.topK != null && this.topK < 0) {
|
|
44
|
+
throw new Error('`topK` must be a positive integer');
|
|
45
|
+
}
|
|
46
|
+
this.stopSequences = fields.stopSequences ?? this.stopSequences;
|
|
47
|
+
this.apiKey = fields.apiKey ?? env.getEnvironmentVariable('GOOGLE_API_KEY');
|
|
48
|
+
if (this.apiKey == null || this.apiKey === '') {
|
|
49
|
+
throw new Error('Please set an API key for Google GenerativeAI ' +
|
|
50
|
+
'in the environment variable GOOGLE_API_KEY ' +
|
|
51
|
+
'or in the `apiKey` field of the ' +
|
|
52
|
+
'ChatGoogleGenerativeAI constructor');
|
|
53
|
+
}
|
|
54
|
+
this.safetySettings = fields.safetySettings ?? this.safetySettings;
|
|
55
|
+
if (this.safetySettings && this.safetySettings.length > 0) {
|
|
56
|
+
const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
|
|
57
|
+
if (safetySettingsSet.size !== this.safetySettings.length) {
|
|
58
|
+
throw new Error('The categories in `safetySettings` array must be unique');
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;
|
|
62
|
+
this.streaming = fields.streaming ?? this.streaming;
|
|
63
|
+
this.json = fields.json;
|
|
64
|
+
// @ts-ignore - Accessing private property from parent class
|
|
65
|
+
this.client = new generativeAi.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
|
|
66
|
+
model: this.model,
|
|
67
|
+
safetySettings: this.safetySettings,
|
|
68
|
+
generationConfig: {
|
|
69
|
+
stopSequences: this.stopSequences,
|
|
70
|
+
maxOutputTokens: this.maxOutputTokens,
|
|
71
|
+
temperature: this.temperature,
|
|
72
|
+
topP: this.topP,
|
|
73
|
+
topK: this.topK,
|
|
74
|
+
...(this.json != null
|
|
75
|
+
? { responseMimeType: 'application/json' }
|
|
76
|
+
: {}),
|
|
77
|
+
},
|
|
78
|
+
}, {
|
|
79
|
+
apiVersion: fields.apiVersion,
|
|
80
|
+
baseUrl: fields.baseUrl,
|
|
81
|
+
customHeaders: fields.customHeaders,
|
|
82
|
+
});
|
|
83
|
+
this.streamUsage = fields.streamUsage ?? this.streamUsage;
|
|
84
|
+
}
|
|
85
|
+
static lc_name() {
|
|
86
|
+
return 'IllumaGoogleGenerativeAI';
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Helper function to convert Gemini API usage metadata to LangChain format
|
|
90
|
+
* Includes support for cached tokens and tier-based tracking for gemini-3-pro-preview
|
|
91
|
+
*/
|
|
92
|
+
_convertToUsageMetadata(usageMetadata, model) {
|
|
93
|
+
if (!usageMetadata) {
|
|
94
|
+
return undefined;
|
|
95
|
+
}
|
|
96
|
+
const output = {
|
|
97
|
+
input_tokens: usageMetadata.promptTokenCount ?? 0,
|
|
98
|
+
output_tokens: (usageMetadata.candidatesTokenCount ?? 0) +
|
|
99
|
+
(usageMetadata.thoughtsTokenCount ?? 0),
|
|
100
|
+
total_tokens: usageMetadata.totalTokenCount ?? 0,
|
|
101
|
+
};
|
|
102
|
+
if (usageMetadata.cachedContentTokenCount) {
|
|
103
|
+
output.input_token_details ??= {};
|
|
104
|
+
output.input_token_details.cache_read =
|
|
105
|
+
usageMetadata.cachedContentTokenCount;
|
|
106
|
+
}
|
|
107
|
+
// gemini-3-pro-preview has bracket based tracking of tokens per request
|
|
108
|
+
if (model === 'gemini-3-pro-preview') {
|
|
109
|
+
const over200k = Math.max(0, (usageMetadata.promptTokenCount ?? 0) - 200000);
|
|
110
|
+
const cachedOver200k = Math.max(0, (usageMetadata.cachedContentTokenCount ?? 0) - 200000);
|
|
111
|
+
if (over200k) {
|
|
112
|
+
output.input_token_details = {
|
|
113
|
+
...output.input_token_details,
|
|
114
|
+
over_200k: over200k,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
if (cachedOver200k) {
|
|
118
|
+
output.input_token_details = {
|
|
119
|
+
...output.input_token_details,
|
|
120
|
+
cache_read_over_200k: cachedOver200k,
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
return output;
|
|
125
|
+
}
|
|
126
|
+
invocationParams(options) {
|
|
127
|
+
const params = super.invocationParams(options);
|
|
128
|
+
if (this.thinkingConfig) {
|
|
129
|
+
/** @ts-ignore */
|
|
130
|
+
this.client.generationConfig = {
|
|
131
|
+
/** @ts-ignore */
|
|
132
|
+
...this.client.generationConfig,
|
|
133
|
+
/** @ts-ignore */
|
|
134
|
+
thinkingConfig: this.thinkingConfig,
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
return params;
|
|
138
|
+
}
|
|
139
|
+
async _generate(messages, options, runManager) {
|
|
140
|
+
const prompt = common.convertBaseMessagesToContent(messages, this._isMultimodalModel, this.useSystemInstruction, this.model);
|
|
141
|
+
let actualPrompt = prompt;
|
|
142
|
+
if (prompt?.[0].role === 'system') {
|
|
143
|
+
const [systemInstruction] = prompt;
|
|
144
|
+
/** @ts-ignore */
|
|
145
|
+
this.client.systemInstruction = systemInstruction;
|
|
146
|
+
actualPrompt = prompt.slice(1);
|
|
147
|
+
}
|
|
148
|
+
const parameters = this.invocationParams(options);
|
|
149
|
+
const request = {
|
|
150
|
+
...parameters,
|
|
151
|
+
contents: actualPrompt,
|
|
152
|
+
};
|
|
153
|
+
const res = await this.caller.callWithOptions({ signal: options.signal }, async () =>
|
|
154
|
+
/** @ts-ignore */
|
|
155
|
+
this.client.generateContent(request));
|
|
156
|
+
const response = res.response;
|
|
157
|
+
const usageMetadata = this._convertToUsageMetadata(
|
|
158
|
+
/** @ts-ignore */
|
|
159
|
+
response.usageMetadata, this.model);
|
|
160
|
+
/** @ts-ignore */
|
|
161
|
+
const generationResult = common.mapGenerateContentResultToChatResult(response, {
|
|
162
|
+
usageMetadata,
|
|
163
|
+
});
|
|
164
|
+
await runManager?.handleLLMNewToken(generationResult.generations[0].text || '', undefined, undefined, undefined, undefined, undefined);
|
|
165
|
+
return generationResult;
|
|
166
|
+
}
|
|
167
|
+
async *_streamResponseChunks(messages$1, options, runManager) {
|
|
168
|
+
const prompt = common.convertBaseMessagesToContent(messages$1, this._isMultimodalModel, this.useSystemInstruction, this.model);
|
|
169
|
+
let actualPrompt = prompt;
|
|
170
|
+
if (prompt?.[0].role === 'system') {
|
|
171
|
+
const [systemInstruction] = prompt;
|
|
172
|
+
/** @ts-ignore */
|
|
173
|
+
this.client.systemInstruction = systemInstruction;
|
|
174
|
+
actualPrompt = prompt.slice(1);
|
|
175
|
+
}
|
|
176
|
+
const parameters = this.invocationParams(options);
|
|
177
|
+
const request = {
|
|
178
|
+
...parameters,
|
|
179
|
+
contents: actualPrompt,
|
|
180
|
+
};
|
|
181
|
+
const stream = await this.caller.callWithOptions({ signal: options.signal }, async () => {
|
|
182
|
+
/** @ts-ignore */
|
|
183
|
+
const { stream } = await this.client.generateContentStream(request);
|
|
184
|
+
return stream;
|
|
185
|
+
});
|
|
186
|
+
let lastUsageMetadata;
|
|
187
|
+
for await (const response of stream) {
|
|
188
|
+
if ('usageMetadata' in response &&
|
|
189
|
+
this.streamUsage !== false &&
|
|
190
|
+
options.streamUsage !== false) {
|
|
191
|
+
lastUsageMetadata = this._convertToUsageMetadata(response.usageMetadata, this.model);
|
|
192
|
+
}
|
|
193
|
+
const chunk = common.convertResponseContentToChatGenerationChunk(response, {
|
|
194
|
+
usageMetadata: undefined});
|
|
195
|
+
if (!chunk) {
|
|
196
|
+
continue;
|
|
197
|
+
}
|
|
198
|
+
yield chunk;
|
|
199
|
+
await runManager?.handleLLMNewToken(chunk.text || '', undefined, undefined, undefined, undefined, { chunk });
|
|
200
|
+
}
|
|
201
|
+
if (lastUsageMetadata) {
|
|
202
|
+
const finalChunk = new outputs.ChatGenerationChunk({
|
|
203
|
+
text: '',
|
|
204
|
+
message: new messages.AIMessageChunk({
|
|
205
|
+
content: '',
|
|
206
|
+
usage_metadata: lastUsageMetadata,
|
|
207
|
+
}),
|
|
208
|
+
});
|
|
209
|
+
yield finalChunk;
|
|
210
|
+
await runManager?.handleLLMNewToken(finalChunk.text || '', undefined, undefined, undefined, undefined, { chunk: finalChunk });
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
exports.CustomChatGoogleGenerativeAI = CustomChatGoogleGenerativeAI;
|
|
216
|
+
//# sourceMappingURL=index.cjs.map
|