@librechat/agents 3.1.52 → 3.1.54

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs +16 -5
  2. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs.map +1 -1
  3. package/dist/cjs/llm/google/index.cjs.map +1 -1
  4. package/dist/cjs/llm/openrouter/index.cjs +59 -5
  5. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  6. package/dist/cjs/llm/vertexai/index.cjs +16 -2
  7. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  8. package/dist/cjs/main.cjs +2 -0
  9. package/dist/cjs/main.cjs.map +1 -1
  10. package/dist/esm/llm/bedrock/utils/message_outputs.mjs +16 -5
  11. package/dist/esm/llm/bedrock/utils/message_outputs.mjs.map +1 -1
  12. package/dist/esm/llm/google/index.mjs.map +1 -1
  13. package/dist/esm/llm/openrouter/index.mjs +59 -5
  14. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  15. package/dist/esm/llm/vertexai/index.mjs +16 -2
  16. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  17. package/dist/esm/main.mjs +1 -0
  18. package/dist/esm/main.mjs.map +1 -1
  19. package/dist/types/index.d.ts +2 -0
  20. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +1 -1
  21. package/dist/types/llm/google/index.d.ts +2 -3
  22. package/dist/types/llm/openrouter/index.d.ts +21 -1
  23. package/dist/types/llm/vertexai/index.d.ts +2 -1
  24. package/dist/types/types/llm.d.ts +7 -2
  25. package/package.json +1 -1
  26. package/src/index.ts +6 -0
  27. package/src/llm/bedrock/llm.spec.ts +233 -4
  28. package/src/llm/bedrock/utils/message_outputs.ts +51 -11
  29. package/src/llm/google/index.ts +2 -3
  30. package/src/llm/openrouter/index.ts +117 -6
  31. package/src/llm/openrouter/reasoning.test.ts +207 -0
  32. package/src/llm/vertexai/index.ts +20 -3
  33. package/src/scripts/bedrock-cache-debug.ts +250 -0
  34. package/src/specs/openrouter.simple.test.ts +163 -2
  35. package/src/types/llm.ts +7 -2
  36. package/src/utils/llmConfig.ts +3 -4
@@ -1 +1 @@
1
- {"version":3,"file":"message_outputs.mjs","sources":["../../../../../src/llm/bedrock/utils/message_outputs.ts"],"sourcesContent":["/**\n * Utility functions for converting Bedrock Converse responses to LangChain messages.\n * Ported from @langchain/aws common.js\n */\nimport { AIMessage, AIMessageChunk } from '@langchain/core/messages';\nimport { ChatGenerationChunk } from '@langchain/core/outputs';\nimport type {\n BedrockMessage,\n ConverseResponse,\n ContentBlockDeltaEvent,\n ConverseStreamMetadataEvent,\n ContentBlockStartEvent,\n ReasoningContentBlock,\n ReasoningContentBlockDelta,\n MessageContentReasoningBlock,\n MessageContentReasoningBlockReasoningTextPartial,\n MessageContentReasoningBlockRedacted,\n} from '../types';\n\n/**\n * Convert a Bedrock reasoning block delta to a LangChain partial reasoning block.\n */\nexport function bedrockReasoningDeltaToLangchainPartialReasoningBlock(\n reasoningContent: ReasoningContentBlockDelta\n):\n | MessageContentReasoningBlockReasoningTextPartial\n | MessageContentReasoningBlockRedacted {\n const { text, redactedContent, signature } =\n reasoningContent as ReasoningContentBlockDelta & {\n text?: string;\n redactedContent?: Uint8Array;\n signature?: string;\n };\n\n if (typeof text === 'string') {\n return {\n type: 'reasoning_content',\n reasoningText: { text },\n };\n }\n if (signature != null) {\n return {\n type: 'reasoning_content',\n reasoningText: { signature },\n };\n }\n if (redactedContent != null) {\n return {\n type: 'reasoning_content',\n redactedContent: Buffer.from(redactedContent).toString('base64'),\n };\n }\n throw new Error('Invalid reasoning content');\n}\n\n/**\n * Convert a Bedrock reasoning block to a LangChain reasoning block.\n */\nexport function bedrockReasoningBlockToLangchainReasoningBlock(\n reasoningContent: ReasoningContentBlock\n): MessageContentReasoningBlock {\n const { reasoningText, redactedContent } =\n reasoningContent as ReasoningContentBlock & {\n reasoningText?: { text?: string; signature?: string };\n redactedContent?: Uint8Array;\n };\n\n if (reasoningText != null) {\n return {\n type: 'reasoning_content',\n reasoningText: reasoningText,\n };\n }\n if (redactedContent != null) {\n return {\n type: 'reasoning_content',\n redactedContent: Buffer.from(redactedContent).toString('base64'),\n };\n }\n throw new Error('Invalid reasoning content');\n}\n\n/**\n * Convert a Bedrock Converse message to a LangChain message.\n */\nexport function convertConverseMessageToLangChainMessage(\n message: BedrockMessage,\n responseMetadata: Omit<ConverseResponse, 'output'>\n): AIMessage {\n if (message.content == null) {\n throw new Error('No message content found in response.');\n }\n if (message.role !== 'assistant') {\n throw new Error(\n `Unsupported message role received in ChatBedrockConverse response: ${message.role}`\n );\n }\n\n let requestId: string | undefined;\n if (\n '$metadata' in responseMetadata &&\n responseMetadata.$metadata != null &&\n typeof responseMetadata.$metadata === 'object' &&\n 'requestId' in responseMetadata.$metadata\n ) {\n requestId = responseMetadata.$metadata.requestId as string;\n }\n\n let tokenUsage:\n | { input_tokens: number; output_tokens: number; total_tokens: number }\n | undefined;\n if (responseMetadata.usage != null) {\n const input_tokens = responseMetadata.usage.inputTokens ?? 0;\n const output_tokens = responseMetadata.usage.outputTokens ?? 0;\n tokenUsage = {\n input_tokens,\n output_tokens,\n total_tokens:\n responseMetadata.usage.totalTokens ?? input_tokens + output_tokens,\n };\n }\n\n if (\n message.content.length === 1 &&\n 'text' in message.content[0] &&\n typeof message.content[0].text === 'string'\n ) {\n return new AIMessage({\n content: message.content[0].text,\n response_metadata: responseMetadata,\n usage_metadata: tokenUsage,\n id: requestId,\n });\n } else {\n const toolCalls: Array<{\n id?: string;\n name: string;\n args: Record<string, unknown>;\n type: 'tool_call';\n }> = [];\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const content: any[] = [];\n\n message.content.forEach((c) => {\n if (\n 'toolUse' in c &&\n c.toolUse != null &&\n c.toolUse.name != null &&\n c.toolUse.name !== '' &&\n c.toolUse.input != null &&\n typeof c.toolUse.input === 'object'\n ) {\n toolCalls.push({\n id: c.toolUse.toolUseId,\n name: c.toolUse.name,\n args: c.toolUse.input as Record<string, unknown>,\n type: 'tool_call',\n });\n } else if ('text' in c && typeof c.text === 'string') {\n content.push({ type: 'text', text: c.text });\n } else if ('reasoningContent' in c && c.reasoningContent != null) {\n content.push(\n bedrockReasoningBlockToLangchainReasoningBlock(c.reasoningContent)\n );\n } else {\n content.push(c);\n }\n });\n\n return new AIMessage({\n content: content.length ? content : '',\n tool_calls: toolCalls.length ? toolCalls : undefined,\n response_metadata: responseMetadata,\n usage_metadata: tokenUsage,\n id: requestId,\n });\n }\n}\n\n/**\n * Handle a content block delta event from Bedrock Converse stream.\n */\nexport function handleConverseStreamContentBlockDelta(\n contentBlockDelta: ContentBlockDeltaEvent\n): ChatGenerationChunk {\n if (contentBlockDelta.delta == null) {\n throw new Error('No delta found in content block.');\n }\n\n if (typeof contentBlockDelta.delta.text === 'string') {\n return new ChatGenerationChunk({\n text: contentBlockDelta.delta.text,\n message: new AIMessageChunk({\n content: contentBlockDelta.delta.text,\n response_metadata: {\n contentBlockIndex: contentBlockDelta.contentBlockIndex,\n },\n }),\n });\n } else if (contentBlockDelta.delta.toolUse != null) {\n const index = contentBlockDelta.contentBlockIndex;\n return new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: '',\n tool_call_chunks: [\n {\n args: contentBlockDelta.delta.toolUse.input as string,\n index,\n type: 'tool_call_chunk',\n },\n ],\n response_metadata: {\n contentBlockIndex: contentBlockDelta.contentBlockIndex,\n },\n }),\n });\n } else if (contentBlockDelta.delta.reasoningContent != null) {\n const reasoningBlock =\n bedrockReasoningDeltaToLangchainPartialReasoningBlock(\n contentBlockDelta.delta.reasoningContent\n );\n let reasoningText = '';\n if ('reasoningText' in reasoningBlock) {\n reasoningText = reasoningBlock.reasoningText.text ?? '';\n } else if ('redactedContent' in reasoningBlock) {\n reasoningText = reasoningBlock.redactedContent;\n }\n return new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: [reasoningBlock],\n additional_kwargs: {\n // Set reasoning_content for stream handler to detect reasoning mode\n reasoning_content: reasoningText,\n },\n response_metadata: {\n contentBlockIndex: contentBlockDelta.contentBlockIndex,\n },\n }),\n });\n } else {\n throw new Error(\n `Unsupported content block type(s): ${JSON.stringify(contentBlockDelta.delta, null, 2)}`\n );\n }\n}\n\n/**\n * Handle a content block start event from Bedrock Converse stream.\n */\nexport function handleConverseStreamContentBlockStart(\n contentBlockStart: ContentBlockStartEvent\n): ChatGenerationChunk | null {\n const index = contentBlockStart.contentBlockIndex;\n\n if (contentBlockStart.start?.toolUse != null) {\n return new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: '',\n tool_call_chunks: [\n {\n name: contentBlockStart.start.toolUse.name,\n id: contentBlockStart.start.toolUse.toolUseId,\n index,\n type: 'tool_call_chunk',\n },\n ],\n response_metadata: {\n contentBlockIndex: index,\n },\n }),\n });\n }\n\n // Return null for non-tool content block starts (text blocks don't need special handling)\n return null;\n}\n\n/**\n * Handle a metadata event from Bedrock Converse stream.\n */\nexport function handleConverseStreamMetadata(\n metadata: ConverseStreamMetadataEvent,\n extra: { streamUsage: boolean }\n): ChatGenerationChunk {\n const inputTokens = metadata.usage?.inputTokens ?? 0;\n const outputTokens = metadata.usage?.outputTokens ?? 0;\n const usage_metadata = {\n input_tokens: inputTokens,\n output_tokens: outputTokens,\n total_tokens: metadata.usage?.totalTokens ?? inputTokens + outputTokens,\n };\n\n return new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: '',\n usage_metadata: extra.streamUsage ? usage_metadata : undefined,\n response_metadata: {\n // Use the same key as returned from the Converse API\n metadata,\n },\n }),\n });\n}\n"],"names":[],"mappings":";;;AAAA;;;AAGG;AAgBH;;AAEG;AACG,SAAU,qDAAqD,CACnE,gBAA4C,EAAA;IAI5C,MAAM,EAAE,IAAI,EAAE,eAAe,EAAE,SAAS,EAAE,GACxC,gBAIC;AAEH,IAAA,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE;QAC5B,OAAO;AACL,YAAA,IAAI,EAAE,mBAAmB;YACzB,aAAa,EAAE,EAAE,IAAI,EAAE;SACxB;;AAEH,IAAA,IAAI,SAAS,IAAI,IAAI,EAAE;QACrB,OAAO;AACL,YAAA,IAAI,EAAE,mBAAmB;YACzB,aAAa,EAAE,EAAE,SAAS,EAAE;SAC7B;;AAEH,IAAA,IAAI,eAAe,IAAI,IAAI,EAAE;QAC3B,OAAO;AACL,YAAA,IAAI,EAAE,mBAAmB;YACzB,eAAe,EAAE,MAAM,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC;SACjE;;AAEH,IAAA,MAAM,IAAI,KAAK,CAAC,2BAA2B,CAAC;AAC9C;AA8HA;;AAEG;AACG,SAAU,qCAAqC,CACnD,iBAAyC,EAAA;AAEzC,IAAA,IAAI,iBAAiB,CAAC,KAAK,IAAI,IAAI,EAAE;AACnC,QAAA,MAAM,IAAI,KAAK,CAAC,kCAAkC,CAAC;;IAGrD,IAAI,OAAO,iBAAiB,CAAC,KAAK,CAAC,IAAI,KAAK,QAAQ,EAAE;QACpD,OAAO,IAAI,mBAAmB,CAAC;AAC7B,YAAA,IAAI,EAAE,iBAAiB,CAAC,KAAK,CAAC,IAAI;YAClC,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,gBAAA,OAAO,EAAE,iBAAiB,CAAC,KAAK,CAAC,IAAI;AACrC,gBAAA,iBAAiB,EAAE;oBACjB,iBAAiB,EAAE,iBAAiB,CAAC,iBAAiB;AACvD,iBAAA;aACF,CAAC;AACH,SAAA,CAAC;;SACG,IAAI,iBAAiB,CAAC,KAAK,CAAC,OAAO,IAAI,IAAI,EAAE;AAClD,QAAA,MAAM,KAAK,GAAG,iBAAiB,CAAC,iBAAiB;QACjD,OAAO,IAAI,mBAAmB,CAAC;AAC7B,YAAA,IAAI,EAAE,EAAE;YACR,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,gBAAA,OAAO,EAAE,EAAE;AACX,gBAAA,gBAAgB,EAAE;AAChB,oBAAA;AACE,wBAAA,IAAI,EAAE,iBAAiB,CAAC,KAAK,CAAC,OAAO,CAAC,KAAe;wBACrD,KAAK;AACL,wBAAA,IAAI,EAAE,iBAAiB;AACxB,qBAAA;AACF,iBAAA;AACD,gBAAA,iBAAiB,EAAE;oBACjB,iBAAiB,EAAE,iBAAiB,CAAC,iBAAiB;AACvD,iBAAA;aACF,CAAC;AACH,SAAA,CAAC;;SACG,IAAI,iBAAiB,CAAC,KAAK,CAAC,gBAAgB,IAAI,IAAI,EAAE;QAC3D,MAAM,cAAc,GAClB,qDAAqD,CACnD,iBAAiB,CAAC,KAAK,CAAC,gBAAgB,CACzC;QACH,IAAI,aAAa,GAAG,EAAE;AACtB,QAAA,IAAI,eAAe,IAAI,cAAc,EAAE;YACrC,aAAa,GAAG,cAAc,CAAC,aAAa,CAAC,IAAI,IAAI,EAAE;;AAClD,aAAA,IAAI,iBAAiB,IAAI,cAAc,EAAE;AAC9C,YAAA,aAAa,GAAG,cAAc,CAAC,eAAe;;QAEhD,OAAO,IAAI,mBAAmB,CAAC;AAC7B,YAAA,IAAI,EAAE,EAAE;YACR,OAAO,EAAE,IAAI,cAAc,CAAC;gBAC1B,OAAO,EAAE,CAAC,cAAc,CAAC;AACzB,gBAAA,iBAAiB,EAAE;;AAEjB,oBAAA,iBAAiB,EAAE,aAAa;AACjC,iBAAA;AACD,gBAAA,iBAAiB,EAAE;oBACjB,iBAAiB,EAAE,iBAAiB,CAAC,iBAAiB;AACvD,iBAAA;aACF,CAAC;AACH,SAAA,CAAC;;SACG;AACL,QAAA,MAAM,IAAI,KAAK,CACb,sCAAsC,IAAI,CAAC,SAAS,CAAC,iBAAiB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAA,CAAE,CACzF;;AAEL;AAEA;;AAEG;AACG,SAAU,qCAAqC,CACnD,iBAAyC,EAAA;AAEzC,IAAA,MAAM,KAAK,GAAG,iBAAiB,CAAC,iBAAiB;IAEjD,IAAI,iBAAiB,CAAC,KAAK,EAAE,OAAO,IAAI,IAAI,EAAE;QAC5C,OAAO,IAAI,mBAAmB,CAAC;AAC7B,YAAA,IAAI,EAAE,EAAE;YACR,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,gBAAA,OAAO,EAAE,EAAE;AACX,gBAAA,gBAAgB,EAAE;AAChB,oBAAA;AACE,wBAAA,IAAI,EAAE,iBAAiB,CAAC,KAAK,CAAC,OAAO,CAAC,IAAI;AAC1C,wBAAA,EAAE,EAAE,iBAAiB,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS;wBAC7C,KAAK;AACL,wBAAA,IAAI,EAAE,iBAAiB;AACxB,qBAAA;AACF,iBAAA;AACD,gBAAA,iBAAiB,EAAE;AACjB,oBAAA,iBAAiB,EAAE,KAAK;AACzB,iBAAA;aACF,CAAC;AACH,SAAA,CAAC;;;AAIJ,IAAA,OAAO,IAAI;AACb;AAEA;;AAEG;AACa,SAAA,4BAA4B,CAC1C,QAAqC,EACrC,KAA+B,EAAA;IAE/B,MAAM,WAAW,GAAG,QAAQ,CAAC,KAAK,EAAE,WAAW,IAAI,CAAC;IACpD,MAAM,YAAY,GAAG,QAAQ,CAAC,KAAK,EAAE,YAAY,IAAI,CAAC;AACtD,IAAA,MAAM,cAAc,GAAG;AACrB,QAAA,YAAY,EAAE,WAAW;AACzB,QAAA,aAAa,EAAE,YAAY;QAC3B,YAAY,EAAE,QAAQ,CAAC,KAAK,EAAE,WAAW,IAAI,WAAW,GAAG,YAAY;KACxE;IAED,OAAO,IAAI,mBAAmB,CAAC;AAC7B,QAAA,IAAI,EAAE,EAAE;QACR,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,YAAA,OAAO,EAAE,EAAE;YACX,cAAc,EAAE,KAAK,CAAC,WAAW,GAAG,cAAc,GAAG,SAAS;AAC9D,YAAA,iBAAiB,EAAE;;gBAEjB,QAAQ;AACT,aAAA;SACF,CAAC;AACH,KAAA,CAAC;AACJ;;;;"}
1
+ {"version":3,"file":"message_outputs.mjs","sources":["../../../../../src/llm/bedrock/utils/message_outputs.ts"],"sourcesContent":["/**\n * Utility functions for converting Bedrock Converse responses to LangChain messages.\n * Ported from @langchain/aws common.js\n */\nimport { ChatGenerationChunk } from '@langchain/core/outputs';\nimport { AIMessage, AIMessageChunk } from '@langchain/core/messages';\nimport type { UsageMetadata } from '@langchain/core/messages';\nimport type {\n BedrockMessage,\n ConverseResponse,\n ContentBlockDeltaEvent,\n ConverseStreamMetadataEvent,\n ContentBlockStartEvent,\n ReasoningContentBlock,\n ReasoningContentBlockDelta,\n MessageContentReasoningBlock,\n MessageContentReasoningBlockReasoningTextPartial,\n MessageContentReasoningBlockRedacted,\n} from '../types';\n\n/**\n * Convert a Bedrock reasoning block delta to a LangChain partial reasoning block.\n */\nexport function bedrockReasoningDeltaToLangchainPartialReasoningBlock(\n reasoningContent: ReasoningContentBlockDelta\n):\n | MessageContentReasoningBlockReasoningTextPartial\n | MessageContentReasoningBlockRedacted {\n const { text, redactedContent, signature } =\n reasoningContent as ReasoningContentBlockDelta & {\n text?: string;\n redactedContent?: Uint8Array;\n signature?: string;\n };\n\n if (typeof text === 'string') {\n return {\n type: 'reasoning_content',\n reasoningText: { text },\n };\n }\n if (signature != null) {\n return {\n type: 'reasoning_content',\n reasoningText: { signature },\n };\n }\n if (redactedContent != null) {\n return {\n type: 'reasoning_content',\n redactedContent: Buffer.from(redactedContent).toString('base64'),\n };\n }\n throw new Error('Invalid reasoning content');\n}\n\n/**\n * Convert a Bedrock reasoning block to a LangChain reasoning block.\n */\nexport function bedrockReasoningBlockToLangchainReasoningBlock(\n reasoningContent: ReasoningContentBlock\n): MessageContentReasoningBlock {\n const { reasoningText, redactedContent } =\n reasoningContent as ReasoningContentBlock & {\n reasoningText?: { text?: string; signature?: string };\n redactedContent?: Uint8Array;\n };\n\n if (reasoningText != null) {\n return {\n type: 'reasoning_content',\n reasoningText: reasoningText,\n };\n }\n if (redactedContent != null) {\n return {\n type: 'reasoning_content',\n redactedContent: Buffer.from(redactedContent).toString('base64'),\n };\n }\n throw new Error('Invalid reasoning content');\n}\n\n/**\n * Convert a Bedrock Converse message to a LangChain message.\n */\nexport function convertConverseMessageToLangChainMessage(\n message: BedrockMessage,\n responseMetadata: Omit<ConverseResponse, 'output'>\n): AIMessage {\n if (message.content == null) {\n throw new Error('No message content found in response.');\n }\n if (message.role !== 'assistant') {\n throw new Error(\n `Unsupported message role received in ChatBedrockConverse response: ${message.role}`\n );\n }\n\n let requestId: string | undefined;\n if (\n '$metadata' in responseMetadata &&\n responseMetadata.$metadata != null &&\n typeof responseMetadata.$metadata === 'object' &&\n 'requestId' in responseMetadata.$metadata\n ) {\n requestId = responseMetadata.$metadata.requestId as string;\n }\n\n let tokenUsage:\n | {\n input_tokens: number;\n output_tokens: number;\n total_tokens: number;\n input_token_details?: {\n cache_read: number;\n cache_creation: number;\n };\n }\n | undefined;\n if (responseMetadata.usage != null) {\n const usage = responseMetadata.usage as NonNullable<\n typeof responseMetadata.usage\n > & {\n cacheReadInputTokens?: number;\n cacheWriteInputTokens?: number;\n };\n const input_tokens = usage.inputTokens ?? 0;\n const output_tokens = usage.outputTokens ?? 0;\n const cacheRead = usage.cacheReadInputTokens;\n const cacheWrite = usage.cacheWriteInputTokens;\n tokenUsage = {\n input_tokens,\n output_tokens,\n total_tokens: usage.totalTokens ?? input_tokens + output_tokens,\n };\n if (cacheRead != null || cacheWrite != null) {\n tokenUsage.input_token_details = {\n cache_read: cacheRead ?? 0,\n cache_creation: cacheWrite ?? 0,\n };\n }\n }\n\n if (\n message.content.length === 1 &&\n 'text' in message.content[0] &&\n typeof message.content[0].text === 'string'\n ) {\n return new AIMessage({\n content: message.content[0].text,\n response_metadata: responseMetadata,\n usage_metadata: tokenUsage,\n id: requestId,\n });\n } else {\n const toolCalls: Array<{\n id?: string;\n name: string;\n args: Record<string, unknown>;\n type: 'tool_call';\n }> = [];\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const content: any[] = [];\n\n message.content.forEach((c) => {\n if (\n 'toolUse' in c &&\n c.toolUse != null &&\n c.toolUse.name != null &&\n c.toolUse.name !== '' &&\n c.toolUse.input != null &&\n typeof c.toolUse.input === 'object'\n ) {\n toolCalls.push({\n id: c.toolUse.toolUseId,\n name: c.toolUse.name,\n args: c.toolUse.input as Record<string, unknown>,\n type: 'tool_call',\n });\n } else if ('text' in c && typeof c.text === 'string') {\n content.push({ type: 'text', text: c.text });\n } else if ('reasoningContent' in c && c.reasoningContent != null) {\n content.push(\n bedrockReasoningBlockToLangchainReasoningBlock(c.reasoningContent)\n );\n } else {\n content.push(c);\n }\n });\n\n return new AIMessage({\n content: content.length ? content : '',\n tool_calls: toolCalls.length ? toolCalls : undefined,\n response_metadata: responseMetadata,\n usage_metadata: tokenUsage,\n id: requestId,\n });\n }\n}\n\n/**\n * Handle a content block delta event from Bedrock Converse stream.\n */\nexport function handleConverseStreamContentBlockDelta(\n contentBlockDelta: ContentBlockDeltaEvent\n): ChatGenerationChunk {\n if (contentBlockDelta.delta == null) {\n throw new Error('No delta found in content block.');\n }\n\n if (typeof contentBlockDelta.delta.text === 'string') {\n return new ChatGenerationChunk({\n text: contentBlockDelta.delta.text,\n message: new AIMessageChunk({\n content: contentBlockDelta.delta.text,\n response_metadata: {\n contentBlockIndex: contentBlockDelta.contentBlockIndex,\n },\n }),\n });\n } else if (contentBlockDelta.delta.toolUse != null) {\n const index = contentBlockDelta.contentBlockIndex;\n return new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: '',\n tool_call_chunks: [\n {\n args: contentBlockDelta.delta.toolUse.input as string,\n index,\n type: 'tool_call_chunk',\n },\n ],\n response_metadata: {\n contentBlockIndex: contentBlockDelta.contentBlockIndex,\n },\n }),\n });\n } else if (contentBlockDelta.delta.reasoningContent != null) {\n const reasoningBlock =\n bedrockReasoningDeltaToLangchainPartialReasoningBlock(\n contentBlockDelta.delta.reasoningContent\n );\n let reasoningText = '';\n if ('reasoningText' in reasoningBlock) {\n reasoningText = reasoningBlock.reasoningText.text ?? '';\n } else if ('redactedContent' in reasoningBlock) {\n reasoningText = reasoningBlock.redactedContent;\n }\n return new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: [reasoningBlock],\n additional_kwargs: {\n // Set reasoning_content for stream handler to detect reasoning mode\n reasoning_content: reasoningText,\n },\n response_metadata: {\n contentBlockIndex: contentBlockDelta.contentBlockIndex,\n },\n }),\n });\n } else {\n throw new Error(\n `Unsupported content block type(s): ${JSON.stringify(contentBlockDelta.delta, null, 2)}`\n );\n }\n}\n\n/**\n * Handle a content block start event from Bedrock Converse stream.\n */\nexport function handleConverseStreamContentBlockStart(\n contentBlockStart: ContentBlockStartEvent\n): ChatGenerationChunk | null {\n const index = contentBlockStart.contentBlockIndex;\n\n if (contentBlockStart.start?.toolUse != null) {\n return new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: '',\n tool_call_chunks: [\n {\n name: contentBlockStart.start.toolUse.name,\n id: contentBlockStart.start.toolUse.toolUseId,\n index,\n type: 'tool_call_chunk',\n },\n ],\n response_metadata: {\n contentBlockIndex: index,\n },\n }),\n });\n }\n\n // Return null for non-tool content block starts (text blocks don't need special handling)\n return null;\n}\n\n/**\n * Handle a metadata event from Bedrock Converse stream.\n */\nexport function handleConverseStreamMetadata(\n metadata: ConverseStreamMetadataEvent,\n extra: { streamUsage: boolean }\n): ChatGenerationChunk {\n const usage = metadata.usage as\n | (NonNullable<ConverseStreamMetadataEvent['usage']> & {\n cacheReadInputTokens?: number;\n cacheWriteInputTokens?: number;\n })\n | undefined;\n const inputTokens = usage?.inputTokens ?? 0;\n const outputTokens = usage?.outputTokens ?? 0;\n const cacheRead = usage?.cacheReadInputTokens;\n const cacheWrite = usage?.cacheWriteInputTokens;\n\n const usage_metadata: Record<string, unknown> = {\n input_tokens: inputTokens,\n output_tokens: outputTokens,\n total_tokens: usage?.totalTokens ?? inputTokens + outputTokens,\n };\n\n if (cacheRead != null || cacheWrite != null) {\n usage_metadata.input_token_details = {\n cache_read: cacheRead ?? 0,\n cache_creation: cacheWrite ?? 0,\n };\n }\n\n return new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: '',\n usage_metadata: extra.streamUsage\n ? (usage_metadata as UsageMetadata)\n : undefined,\n response_metadata: {\n // Use the same key as returned from the Converse API\n metadata,\n },\n }),\n });\n}\n"],"names":[],"mappings":";;;AAAA;;;AAGG;AAiBH;;AAEG;AACG,SAAU,qDAAqD,CACnE,gBAA4C,EAAA;IAI5C,MAAM,EAAE,IAAI,EAAE,eAAe,EAAE,SAAS,EAAE,GACxC,gBAIC;AAEH,IAAA,IAAI,OAAO,IAAI,KAAK,QAAQ,EAAE;QAC5B,OAAO;AACL,YAAA,IAAI,EAAE,mBAAmB;YACzB,aAAa,EAAE,EAAE,IAAI,EAAE;SACxB;;AAEH,IAAA,IAAI,SAAS,IAAI,IAAI,EAAE;QACrB,OAAO;AACL,YAAA,IAAI,EAAE,mBAAmB;YACzB,aAAa,EAAE,EAAE,SAAS,EAAE;SAC7B;;AAEH,IAAA,IAAI,eAAe,IAAI,IAAI,EAAE;QAC3B,OAAO;AACL,YAAA,IAAI,EAAE,mBAAmB;YACzB,eAAe,EAAE,MAAM,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC;SACjE;;AAEH,IAAA,MAAM,IAAI,KAAK,CAAC,2BAA2B,CAAC;AAC9C;AAmJA;;AAEG;AACG,SAAU,qCAAqC,CACnD,iBAAyC,EAAA;AAEzC,IAAA,IAAI,iBAAiB,CAAC,KAAK,IAAI,IAAI,EAAE;AACnC,QAAA,MAAM,IAAI,KAAK,CAAC,kCAAkC,CAAC;;IAGrD,IAAI,OAAO,iBAAiB,CAAC,KAAK,CAAC,IAAI,KAAK,QAAQ,EAAE;QACpD,OAAO,IAAI,mBAAmB,CAAC;AAC7B,YAAA,IAAI,EAAE,iBAAiB,CAAC,KAAK,CAAC,IAAI;YAClC,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,gBAAA,OAAO,EAAE,iBAAiB,CAAC,KAAK,CAAC,IAAI;AACrC,gBAAA,iBAAiB,EAAE;oBACjB,iBAAiB,EAAE,iBAAiB,CAAC,iBAAiB;AACvD,iBAAA;aACF,CAAC;AACH,SAAA,CAAC;;SACG,IAAI,iBAAiB,CAAC,KAAK,CAAC,OAAO,IAAI,IAAI,EAAE;AAClD,QAAA,MAAM,KAAK,GAAG,iBAAiB,CAAC,iBAAiB;QACjD,OAAO,IAAI,mBAAmB,CAAC;AAC7B,YAAA,IAAI,EAAE,EAAE;YACR,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,gBAAA,OAAO,EAAE,EAAE;AACX,gBAAA,gBAAgB,EAAE;AAChB,oBAAA;AACE,wBAAA,IAAI,EAAE,iBAAiB,CAAC,KAAK,CAAC,OAAO,CAAC,KAAe;wBACrD,KAAK;AACL,wBAAA,IAAI,EAAE,iBAAiB;AACxB,qBAAA;AACF,iBAAA;AACD,gBAAA,iBAAiB,EAAE;oBACjB,iBAAiB,EAAE,iBAAiB,CAAC,iBAAiB;AACvD,iBAAA;aACF,CAAC;AACH,SAAA,CAAC;;SACG,IAAI,iBAAiB,CAAC,KAAK,CAAC,gBAAgB,IAAI,IAAI,EAAE;QAC3D,MAAM,cAAc,GAClB,qDAAqD,CACnD,iBAAiB,CAAC,KAAK,CAAC,gBAAgB,CACzC;QACH,IAAI,aAAa,GAAG,EAAE;AACtB,QAAA,IAAI,eAAe,IAAI,cAAc,EAAE;YACrC,aAAa,GAAG,cAAc,CAAC,aAAa,CAAC,IAAI,IAAI,EAAE;;AAClD,aAAA,IAAI,iBAAiB,IAAI,cAAc,EAAE;AAC9C,YAAA,aAAa,GAAG,cAAc,CAAC,eAAe;;QAEhD,OAAO,IAAI,mBAAmB,CAAC;AAC7B,YAAA,IAAI,EAAE,EAAE;YACR,OAAO,EAAE,IAAI,cAAc,CAAC;gBAC1B,OAAO,EAAE,CAAC,cAAc,CAAC;AACzB,gBAAA,iBAAiB,EAAE;;AAEjB,oBAAA,iBAAiB,EAAE,aAAa;AACjC,iBAAA;AACD,gBAAA,iBAAiB,EAAE;oBACjB,iBAAiB,EAAE,iBAAiB,CAAC,iBAAiB;AACvD,iBAAA;aACF,CAAC;AACH,SAAA,CAAC;;SACG;AACL,QAAA,MAAM,IAAI,KAAK,CACb,sCAAsC,IAAI,CAAC,SAAS,CAAC,iBAAiB,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAA,CAAE,CACzF;;AAEL;AAEA;;AAEG;AACG,SAAU,qCAAqC,CACnD,iBAAyC,EAAA;AAEzC,IAAA,MAAM,KAAK,GAAG,iBAAiB,CAAC,iBAAiB;IAEjD,IAAI,iBAAiB,CAAC,KAAK,EAAE,OAAO,IAAI,IAAI,EAAE;QAC5C,OAAO,IAAI,mBAAmB,CAAC;AAC7B,YAAA,IAAI,EAAE,EAAE;YACR,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,gBAAA,OAAO,EAAE,EAAE;AACX,gBAAA,gBAAgB,EAAE;AAChB,oBAAA;AACE,wBAAA,IAAI,EAAE,iBAAiB,CAAC,KAAK,CAAC,OAAO,CAAC,IAAI;AAC1C,wBAAA,EAAE,EAAE,iBAAiB,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS;wBAC7C,KAAK;AACL,wBAAA,IAAI,EAAE,iBAAiB;AACxB,qBAAA;AACF,iBAAA;AACD,gBAAA,iBAAiB,EAAE;AACjB,oBAAA,iBAAiB,EAAE,KAAK;AACzB,iBAAA;aACF,CAAC;AACH,SAAA,CAAC;;;AAIJ,IAAA,OAAO,IAAI;AACb;AAEA;;AAEG;AACa,SAAA,4BAA4B,CAC1C,QAAqC,EACrC,KAA+B,EAAA;AAE/B,IAAA,MAAM,KAAK,GAAG,QAAQ,CAAC,KAKV;AACb,IAAA,MAAM,WAAW,GAAG,KAAK,EAAE,WAAW,IAAI,CAAC;AAC3C,IAAA,MAAM,YAAY,GAAG,KAAK,EAAE,YAAY,IAAI,CAAC;AAC7C,IAAA,MAAM,SAAS,GAAG,KAAK,EAAE,oBAAoB;AAC7C,IAAA,MAAM,UAAU,GAAG,KAAK,EAAE,qBAAqB;AAE/C,IAAA,MAAM,cAAc,GAA4B;AAC9C,QAAA,YAAY,EAAE,WAAW;AACzB,QAAA,aAAa,EAAE,YAAY;AAC3B,QAAA,YAAY,EAAE,KAAK,EAAE,WAAW,IAAI,WAAW,GAAG,YAAY;KAC/D;IAED,IAAI,SAAS,IAAI,IAAI,IAAI,UAAU,IAAI,IAAI,EAAE;QAC3C,cAAc,CAAC,mBAAmB,GAAG;YACnC,UAAU,EAAE,SAAS,IAAI,CAAC;YAC1B,cAAc,EAAE,UAAU,IAAI,CAAC;SAChC;;IAGH,OAAO,IAAI,mBAAmB,CAAC;AAC7B,QAAA,IAAI,EAAE,EAAE;QACR,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,YAAA,OAAO,EAAE,EAAE;YACX,cAAc,EAAE,KAAK,CAAC;AACpB,kBAAG;AACH,kBAAE,SAAS;AACb,YAAA,iBAAiB,EAAE;;gBAEjB,QAAQ;AACT,aAAA;SACF,CAAC;AACH,KAAA,CAAC;AACJ;;;;"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.mjs","sources":["../../../../src/llm/google/index.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/ban-ts-comment */\nimport { AIMessageChunk } from '@langchain/core/messages';\nimport { ChatGenerationChunk } from '@langchain/core/outputs';\nimport { ChatGoogleGenerativeAI } from '@langchain/google-genai';\nimport { getEnvironmentVariable } from '@langchain/core/utils/env';\nimport { GoogleGenerativeAI as GenerativeAI } from '@google/generative-ai';\nimport type {\n GenerateContentRequest,\n SafetySetting,\n} from '@google/generative-ai';\nimport type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport type { BaseMessage, UsageMetadata } from '@langchain/core/messages';\nimport type { GeminiGenerationConfig } from '@langchain/google-common';\nimport type { GeminiApiUsageMetadata, InputTokenDetails } from './types';\nimport type { GoogleClientOptions } from '@/types';\nimport {\n convertResponseContentToChatGenerationChunk,\n convertBaseMessagesToContent,\n mapGenerateContentResultToChatResult,\n} from './utils/common';\n\nexport class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {\n thinkingConfig?: GeminiGenerationConfig['thinkingConfig'];\n\n /**\n * Override to add gemini-3 model support for multimodal and function calling thought signatures\n */\n get _isMultimodalModel(): boolean {\n return (\n this.model.startsWith('gemini-1.5') ||\n this.model.startsWith('gemini-2') ||\n (this.model.startsWith('gemma-3-') &&\n !this.model.startsWith('gemma-3-1b')) ||\n this.model.startsWith('gemini-3')\n );\n }\n\n constructor(fields: GoogleClientOptions) {\n super(fields);\n\n this.model = fields.model.replace(/^models\\//, '');\n\n this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;\n\n if (this.maxOutputTokens != null && this.maxOutputTokens < 0) {\n throw new Error('`maxOutputTokens` must be a positive integer');\n }\n\n this.temperature = fields.temperature ?? this.temperature;\n if (\n this.temperature != null &&\n (this.temperature < 0 || this.temperature > 2)\n ) {\n throw new Error('`temperature` must be in the range of [0.0,2.0]');\n }\n\n this.topP = fields.topP ?? this.topP;\n if (this.topP != null && this.topP < 0) {\n throw new Error('`topP` must be a positive integer');\n }\n\n if (this.topP != null && this.topP > 1) {\n throw new Error('`topP` must be below 1.');\n }\n\n this.topK = fields.topK ?? this.topK;\n if (this.topK != null && this.topK < 0) {\n throw new Error('`topK` must be a positive integer');\n }\n\n this.stopSequences = fields.stopSequences ?? this.stopSequences;\n\n this.apiKey = fields.apiKey ?? getEnvironmentVariable('GOOGLE_API_KEY');\n if (this.apiKey == null || this.apiKey === '') {\n throw new Error(\n 'Please set an API key for Google GenerativeAI ' +\n 'in the environment variable GOOGLE_API_KEY ' +\n 'or in the `apiKey` field of the ' +\n 'ChatGoogleGenerativeAI constructor'\n );\n }\n\n this.safetySettings = fields.safetySettings ?? this.safetySettings;\n if (this.safetySettings && this.safetySettings.length > 0) {\n const safetySettingsSet = new Set(\n this.safetySettings.map((s) => s.category)\n );\n if (safetySettingsSet.size !== this.safetySettings.length) {\n throw new Error(\n 'The categories in `safetySettings` array must be unique'\n );\n }\n }\n\n this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;\n\n this.streaming = fields.streaming ?? this.streaming;\n this.json = fields.json;\n\n // @ts-ignore - Accessing private property from parent class\n this.client = new GenerativeAI(this.apiKey).getGenerativeModel(\n {\n model: this.model,\n safetySettings: this.safetySettings as SafetySetting[],\n generationConfig: {\n stopSequences: this.stopSequences,\n maxOutputTokens: this.maxOutputTokens,\n temperature: this.temperature,\n topP: this.topP,\n topK: this.topK,\n ...(this.json != null\n ? { responseMimeType: 'application/json' }\n : {}),\n },\n },\n {\n apiVersion: fields.apiVersion,\n baseUrl: fields.baseUrl,\n customHeaders: fields.customHeaders,\n }\n );\n this.streamUsage = fields.streamUsage ?? this.streamUsage;\n }\n\n static lc_name(): 'LibreChatGoogleGenerativeAI' {\n return 'LibreChatGoogleGenerativeAI';\n }\n\n /**\n * Helper function to convert Gemini API usage metadata to LangChain format\n * Includes support for cached tokens and tier-based tracking for gemini-3-pro-preview\n */\n private _convertToUsageMetadata(\n usageMetadata: GeminiApiUsageMetadata | undefined,\n model: string\n ): UsageMetadata | undefined {\n if (!usageMetadata) {\n return undefined;\n }\n\n const output: UsageMetadata = {\n input_tokens: usageMetadata.promptTokenCount ?? 0,\n output_tokens:\n (usageMetadata.candidatesTokenCount ?? 0) +\n (usageMetadata.thoughtsTokenCount ?? 0),\n total_tokens: usageMetadata.totalTokenCount ?? 0,\n };\n\n if (usageMetadata.cachedContentTokenCount) {\n output.input_token_details ??= {};\n output.input_token_details.cache_read =\n usageMetadata.cachedContentTokenCount;\n }\n\n // gemini-3-pro-preview has bracket based tracking of tokens per request\n if (model === 'gemini-3-pro-preview') {\n const over200k = Math.max(\n 0,\n (usageMetadata.promptTokenCount ?? 0) - 200000\n );\n const cachedOver200k = Math.max(\n 0,\n (usageMetadata.cachedContentTokenCount ?? 0) - 200000\n );\n if (over200k) {\n output.input_token_details = {\n ...output.input_token_details,\n over_200k: over200k,\n } as InputTokenDetails;\n }\n if (cachedOver200k) {\n output.input_token_details = {\n ...output.input_token_details,\n cache_read_over_200k: cachedOver200k,\n } as InputTokenDetails;\n }\n }\n\n return output;\n }\n\n invocationParams(\n options?: this['ParsedCallOptions']\n ): Omit<GenerateContentRequest, 'contents'> {\n const params = super.invocationParams(options);\n if (this.thinkingConfig) {\n /** @ts-ignore */\n this.client.generationConfig = {\n /** @ts-ignore */\n ...this.client.generationConfig,\n /** @ts-ignore */\n thinkingConfig: this.thinkingConfig,\n };\n }\n return params;\n }\n\n async _generate(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun\n ): Promise<import('@langchain/core/outputs').ChatResult> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt?.[0].role === 'system') {\n const [systemInstruction] = prompt;\n /** @ts-ignore */\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n const request = {\n ...parameters,\n contents: actualPrompt,\n };\n\n const res = await this.caller.callWithOptions(\n { signal: options.signal },\n async () =>\n /** @ts-ignore */\n this.client.generateContent(request)\n );\n\n const response = res.response;\n const usageMetadata = this._convertToUsageMetadata(\n /** @ts-ignore */\n response.usageMetadata,\n this.model\n );\n\n /** @ts-ignore */\n const generationResult = mapGenerateContentResultToChatResult(response, {\n usageMetadata,\n });\n\n await runManager?.handleLLMNewToken(\n generationResult.generations[0].text || '',\n undefined,\n undefined,\n undefined,\n undefined,\n undefined\n );\n return generationResult;\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt?.[0].role === 'system') {\n const [systemInstruction] = prompt;\n /** @ts-ignore */\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n const request = {\n ...parameters,\n contents: actualPrompt,\n };\n const stream = await this.caller.callWithOptions(\n { signal: options.signal },\n async () => {\n /** @ts-ignore */\n const { stream } = await this.client.generateContentStream(request);\n return stream;\n }\n );\n\n let index = 0;\n let lastUsageMetadata: UsageMetadata | undefined;\n for await (const response of stream) {\n if (\n 'usageMetadata' in response &&\n this.streamUsage !== false &&\n options.streamUsage !== false\n ) {\n lastUsageMetadata = this._convertToUsageMetadata(\n response.usageMetadata as GeminiApiUsageMetadata | undefined,\n this.model\n );\n }\n\n const chunk = convertResponseContentToChatGenerationChunk(response, {\n usageMetadata: undefined,\n index,\n });\n index += 1;\n if (!chunk) {\n continue;\n }\n\n yield chunk;\n await runManager?.handleLLMNewToken(\n chunk.text || '',\n undefined,\n undefined,\n undefined,\n undefined,\n { chunk }\n );\n }\n\n if (lastUsageMetadata) {\n const finalChunk = new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: '',\n usage_metadata: lastUsageMetadata,\n }),\n });\n yield finalChunk;\n await runManager?.handleLLMNewToken(\n finalChunk.text || '',\n undefined,\n undefined,\n undefined,\n undefined,\n { chunk: finalChunk }\n );\n }\n }\n}\n"],"names":["GenerativeAI"],"mappings":";;;;;;;AAAA;AAqBM,MAAO,4BAA6B,SAAQ,sBAAsB,CAAA;AACtE,IAAA,cAAc;AAEd;;AAEG;AACH,IAAA,IAAI,kBAAkB,GAAA;QACpB,QACE,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,YAAY,CAAC;AACnC,YAAA,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC;AACjC,aAAC,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC;gBAChC,CAAC,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,YAAY,CAAC,CAAC;YACvC,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC;;AAIrC,IAAA,WAAA,CAAY,MAA2B,EAAA;QACrC,KAAK,CAAC,MAAM,CAAC;AAEb,QAAA,IAAI,CAAC,KAAK,GAAG,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,WAAW,EAAE,EAAE,CAAC;QAElD,IAAI,CAAC,eAAe,GAAG,MAAM,CAAC,eAAe,IAAI,IAAI,CAAC,eAAe;AAErE,QAAA,IAAI,IAAI,CAAC,eAAe,IAAI,IAAI,IAAI,IAAI,CAAC,eAAe,GAAG,CAAC,EAAE;AAC5D,YAAA,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC;;QAGjE,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC,WAAW,IAAI,IAAI,CAAC,WAAW;AACzD,QAAA,IACE,IAAI,CAAC,WAAW,IAAI,IAAI;AACxB,aAAC,IAAI,CAAC,WAAW,GAAG,CAAC,IAAI,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,EAC9C;AACA,YAAA,MAAM,IAAI,KAAK,CAAC,iDAAiD,CAAC;;QAGpE,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,IAAI,IAAI,IAAI,CAAC,IAAI;AACpC,QAAA,IAAI,IAAI,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,CAAC,IAAI,GAAG,CAAC,EAAE;AACtC,YAAA,MAAM,IAAI,KAAK,CAAC,mCAAmC,CAAC;;AAGtD,QAAA,IAAI,IAAI,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,CAAC,IAAI,GAAG,CAAC,EAAE;AACtC,YAAA,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC;;QAG5C,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,IAAI,IAAI,IAAI,CAAC,IAAI;AACpC,QAAA,IAAI,IAAI,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,CAAC,IAAI,GAAG,CAAC,EAAE;AACtC,YAAA,MAAM,IAAI,KAAK,CAAC,mCAAmC,CAAC;;QAGtD,IAAI,CAAC,aAAa,GAAG,MAAM,CAAC,aAAa,IAAI,IAAI,CAAC,aAAa;QAE/D,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,sBAAsB,CAAC,gBAAgB,CAAC;AACvE,QAAA,IAAI,IAAI,CAAC,MAAM,IAAI,IAAI,IAAI,IAAI,CAAC,MAAM,KAAK,EAAE,EAAE;YAC7C,MAAM,IAAI,KAAK,CACb,gDAAgD;gBAC9C,6CAA6C;gBAC7C,kCAAkC;AAClC,gBAAA,oCAAoC,CACvC;;QAGH,IAAI,CAAC,cAAc,GAAG,MAAM,CAAC,cAAc,IAAI,IAAI,CAAC,cAAc;AAClE,QAAA,IAAI,IAAI,CAAC,cAAc,IAAI,IAAI,CAAC,cAAc,CAAC,MAAM,GAAG,CAAC,EAAE;YACzD,MAAM,iBAAiB,GAAG,IAAI,GAAG,CAC/B,IAAI,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAC3C;YACD,IAAI,iBAAiB,CAAC,IAAI,KAAK,IAAI,CAAC,cAAc,CAAC,MAAM,EAAE;AACzD,gBAAA,MAAM,IAAI,KAAK,CACb,yDAAyD,CAC1D;;;QAIL,IAAI,CAAC,cAAc,GAAG,MAAM,CAAC,cAAc,IAAI,IAAI,CAAC,cAAc;QAElE,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC,SAAS,IAAI,IAAI,CAAC,SAAS;AACnD,QAAA,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,IAAI;;AAGvB,QAAA,IAAI,CAAC,MAAM,GAAG,IAAIA,kBAAY,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,kBAAkB,CAC5D;YACE,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,cAAc,EAAE,IAAI,CAAC,cAAiC;AACtD,YAAA,gBAAgB,EAAE;gBAChB,aAAa,EAAE,IAAI,CAAC,aAAa;gBACjC,eAAe,EAAE,IAAI,CAAC,eAAe;gBACrC,WAAW,EAAE,IAAI,CAAC,WAAW;gBAC7B,IAAI,EAAE,IAAI,CAAC,IAAI;gBACf,IAAI,EAAE,IAAI,CAAC,IAAI;AACf,gBAAA,IAAI,IAAI,CAAC,IAAI,IAAI;AACf,sBAAE,EAAE,gBAAgB,EAAE,kBAAkB;sBACtC,EAAE,CAAC;AACR,aAAA;SACF,EACD;YACE,UAAU,EAAE,MAAM,CAAC,UAAU;YAC7B,OAAO,EAAE,MAAM,CAAC,OAAO;YACvB,aAAa,EAAE,MAAM,CAAC,aAAa;AACpC,SAAA,CACF;QACD,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC,WAAW,IAAI,IAAI,CAAC,WAAW;;AAG3D,IAAA,OAAO,OAAO,GAAA;AACZ,QAAA,OAAO,6BAA6B;;AAGtC;;;AAGG;IACK,uBAAuB,CAC7B,aAAiD,EACjD,KAAa,EAAA;QAEb,IAAI,CAAC,aAAa,EAAE;AAClB,YAAA,OAAO,SAAS;;AAGlB,QAAA,MAAM,MAAM,GAAkB;AAC5B,YAAA,YAAY,EAAE,aAAa,CAAC,gBAAgB,IAAI,CAAC;AACjD,YAAA,aAAa,EACX,CAAC,aAAa,CAAC,oBAAoB,IAAI,CAAC;AACxC,iBAAC,aAAa,CAAC,kBAAkB,IAAI,CAAC,CAAC;AACzC,YAAA,YAAY,EAAE,aAAa,CAAC,eAAe,IAAI,CAAC;SACjD;AAED,QAAA,IAAI,aAAa,CAAC,uBAAuB,EAAE;AACzC,YAAA,MAAM,CAAC,mBAAmB,KAAK,EAAE;YACjC,MAAM,CAAC,mBAAmB,CAAC,UAAU;gBACnC,aAAa,CAAC,uBAAuB;;;AAIzC,QAAA,IAAI,KAAK,KAAK,sBAAsB,EAAE;AACpC,YAAA,MAAM,QAAQ,GAAG,IAAI,CAAC,GAAG,CACvB,CAAC,EACD,CAAC,aAAa,CAAC,gBAAgB,IAAI,CAAC,IAAI,MAAM,CAC/C;AACD,YAAA,MAAM,cAAc,GAAG,IAAI,CAAC,GAAG,CAC7B,CAAC,EACD,CAAC,aAAa,CAAC,uBAAuB,IAAI,CAAC,IAAI,MAAM,CACtD;YACD,IAAI,QAAQ,EAAE;gBACZ,MAAM,CAAC,mBAAmB,GAAG;oBAC3B,GAAG,MAAM,CAAC,mBAAmB;AAC7B,oBAAA,SAAS,EAAE,QAAQ;iBACC;;YAExB,IAAI,cAAc,EAAE;gBAClB,MAAM,CAAC,mBAAmB,GAAG;oBAC3B,GAAG,MAAM,CAAC,mBAAmB;AAC7B,oBAAA,oBAAoB,EAAE,cAAc;iBAChB;;;AAI1B,QAAA,OAAO,MAAM;;AAGf,IAAA,gBAAgB,CACd,OAAmC,EAAA;QAEnC,MAAM,MAAM,GAAG,KAAK,CAAC,gBAAgB,CAAC,OAAO,CAAC;AAC9C,QAAA,IAAI,IAAI,CAAC,cAAc,EAAE;;AAEvB,YAAA,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG;;AAE7B,gBAAA,GAAG,IAAI,CAAC,MAAM,CAAC,gBAAgB;;gBAE/B,cAAc,EAAE,IAAI,CAAC,cAAc;aACpC;;AAEH,QAAA,OAAO,MAAM;;AAGf,IAAA,MAAM,SAAS,CACb,QAAuB,EACvB,OAAkC,EAClC,UAAqC,EAAA;AAErC,QAAA,MAAM,MAAM,GAAG,4BAA4B,CACzC,QAAQ,EACR,IAAI,CAAC,kBAAkB,EACvB,IAAI,CAAC,oBAAoB,EACzB,IAAI,CAAC,KAAK,CACX;QACD,IAAI,YAAY,GAAG,MAAM;QACzB,IAAI,MAAM,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,EAAE;AACjC,YAAA,MAAM,CAAC,iBAAiB,CAAC,GAAG,MAAM;;AAElC,YAAA,IAAI,CAAC,MAAM,CAAC,iBAAiB,GAAG,iBAAiB;AACjD,YAAA,YAAY,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC;;QAEhC,MAAM,UAAU,GAAG,IAAI,CAAC,gBAAgB,CAAC,OAAO,CAAC;AACjD,QAAA,MAAM,OAAO,GAAG;AACd,YAAA,GAAG,UAAU;AACb,YAAA,QAAQ,EAAE,YAAY;SACvB;AAED,QAAA,MAAM,GAAG,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,eAAe,CAC3C,EAAE,MAAM,EAAE,OAAO,CAAC,MAAM,EAAE,EAC1B;;QAEE,IAAI,CAAC,MAAM,CAAC,eAAe,CAAC,OAAO,CAAC,CACvC;AAED,QAAA,MAAM,QAAQ,GAAG,GAAG,CAAC,QAAQ;AAC7B,QAAA,MAAM,aAAa,GAAG,IAAI,CAAC,uBAAuB;;AAEhD,QAAA,QAAQ,CAAC,aAAa,EACtB,IAAI,CAAC,KAAK,CACX;;AAGD,QAAA,MAAM,gBAAgB,GAAG,oCAAoC,CAAC,QAAQ,EAAE;YACtE,aAAa;AACd,SAAA,CAAC;QAEF,MAAM,UAAU,EAAE,iBAAiB,CACjC,gBAAgB,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,IAAI,IAAI,EAAE,EAC1C,SAAS,EACT,SAAS,EACT,SAAS,EACT,SAAS,EACT,SAAS,CACV;AACD,QAAA,OAAO,gBAAgB;;IAGzB,OAAO,qBAAqB,CAC1B,QAAuB,EACvB,OAAkC,EAClC,UAAqC,EAAA;AAErC,QAAA,MAAM,MAAM,GAAG,4BAA4B,CACzC,QAAQ,EACR,IAAI,CAAC,kBAAkB,EACvB,IAAI,CAAC,oBAAoB,EACzB,IAAI,CAAC,KAAK,CACX;QACD,IAAI,YAAY,GAAG,MAAM;QACzB,IAAI,MAAM,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,EAAE;AACjC,YAAA,MAAM,CAAC,iBAAiB,CAAC,GAAG,MAAM;;AAElC,YAAA,IAAI,CAAC,MAAM,CAAC,iBAAiB,GAAG,iBAAiB;AACjD,YAAA,YAAY,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC;;QAEhC,MAAM,UAAU,GAAG,IAAI,CAAC,gBAAgB,CAAC,OAAO,CAAC;AACjD,QAAA,MAAM,OAAO,GAAG;AACd,YAAA,GAAG,UAAU;AACb,YAAA,QAAQ,EAAE,YAAY;SACvB;AACD,QAAA,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,eAAe,CAC9C,EAAE,MAAM,EAAE,OAAO,CAAC,MAAM,EAAE,EAC1B,YAAW;;AAET,YAAA,MAAM,EAAE,MAAM,EAAE,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,qBAAqB,CAAC,OAAO,CAAC;AACnE,YAAA,OAAO,MAAM;AACf,SAAC,CACF;AAGD,QAAA,IAAI,iBAA4C;AAChD,QAAA,WAAW,MAAM,QAAQ,IAAI,MAAM,EAAE;YACnC,IACE,eAAe,IAAI,QAAQ;gBAC3B,IAAI,CAAC,WAAW,KAAK,KAAK;AAC1B,gBAAA,OAAO,CAAC,WAAW,KAAK,KAAK,EAC7B;AACA,gBAAA,iBAAiB,GAAG,IAAI,CAAC,uBAAuB,CAC9C,QAAQ,CAAC,aAAmD,EAC5D,IAAI,CAAC,KAAK,CACX;;AAGH,YAAA,MAAM,KAAK,GAAG,2CAA2C,CAAC,QAAQ,EAAE;AAClE,gBAAA,aAAa,EAAE,SAEhB,CAAA,CAAC;YAEF,IAAI,CAAC,KAAK,EAAE;gBACV;;AAGF,YAAA,MAAM,KAAK;YACX,MAAM,UAAU,EAAE,iBAAiB,CACjC,KAAK,CAAC,IAAI,IAAI,EAAE,EAChB,SAAS,EACT,SAAS,EACT,SAAS,EACT,SAAS,EACT,EAAE,KAAK,EAAE,CACV;;QAGH,IAAI,iBAAiB,EAAE;AACrB,YAAA,MAAM,UAAU,GAAG,IAAI,mBAAmB,CAAC;AACzC,gBAAA,IAAI,EAAE,EAAE;gBACR,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,oBAAA,OAAO,EAAE,EAAE;AACX,oBAAA,cAAc,EAAE,iBAAiB;iBAClC,CAAC;AACH,aAAA,CAAC;AACF,YAAA,MAAM,UAAU;YAChB,MAAM,UAAU,EAAE,iBAAiB,CACjC,UAAU,CAAC,IAAI,IAAI,EAAE,EACrB,SAAS,EACT,SAAS,EACT,SAAS,EACT,SAAS,EACT,EAAE,KAAK,EAAE,UAAU,EAAE,CACtB;;;AAGN;;;;"}
1
+ {"version":3,"file":"index.mjs","sources":["../../../../src/llm/google/index.ts"],"sourcesContent":["/* eslint-disable @typescript-eslint/ban-ts-comment */\nimport { AIMessageChunk } from '@langchain/core/messages';\nimport { ChatGenerationChunk } from '@langchain/core/outputs';\nimport { ChatGoogleGenerativeAI } from '@langchain/google-genai';\nimport { getEnvironmentVariable } from '@langchain/core/utils/env';\nimport { GoogleGenerativeAI as GenerativeAI } from '@google/generative-ai';\nimport type {\n GenerateContentRequest,\n SafetySetting,\n} from '@google/generative-ai';\nimport type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport type { BaseMessage, UsageMetadata } from '@langchain/core/messages';\nimport type { GeminiApiUsageMetadata, InputTokenDetails } from './types';\nimport type { GoogleClientOptions, GoogleThinkingConfig } from '@/types';\nimport {\n convertResponseContentToChatGenerationChunk,\n convertBaseMessagesToContent,\n mapGenerateContentResultToChatResult,\n} from './utils/common';\n\nexport class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {\n thinkingConfig?: GoogleThinkingConfig;\n\n /**\n * Override to add gemini-3 model support for multimodal and function calling thought signatures\n */\n get _isMultimodalModel(): boolean {\n return (\n this.model.startsWith('gemini-1.5') ||\n this.model.startsWith('gemini-2') ||\n (this.model.startsWith('gemma-3-') &&\n !this.model.startsWith('gemma-3-1b')) ||\n this.model.startsWith('gemini-3')\n );\n }\n\n constructor(fields: GoogleClientOptions) {\n super(fields);\n\n this.model = fields.model.replace(/^models\\//, '');\n\n this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;\n\n if (this.maxOutputTokens != null && this.maxOutputTokens < 0) {\n throw new Error('`maxOutputTokens` must be a positive integer');\n }\n\n this.temperature = fields.temperature ?? this.temperature;\n if (\n this.temperature != null &&\n (this.temperature < 0 || this.temperature > 2)\n ) {\n throw new Error('`temperature` must be in the range of [0.0,2.0]');\n }\n\n this.topP = fields.topP ?? this.topP;\n if (this.topP != null && this.topP < 0) {\n throw new Error('`topP` must be a positive integer');\n }\n\n if (this.topP != null && this.topP > 1) {\n throw new Error('`topP` must be below 1.');\n }\n\n this.topK = fields.topK ?? this.topK;\n if (this.topK != null && this.topK < 0) {\n throw new Error('`topK` must be a positive integer');\n }\n\n this.stopSequences = fields.stopSequences ?? this.stopSequences;\n\n this.apiKey = fields.apiKey ?? getEnvironmentVariable('GOOGLE_API_KEY');\n if (this.apiKey == null || this.apiKey === '') {\n throw new Error(\n 'Please set an API key for Google GenerativeAI ' +\n 'in the environment variable GOOGLE_API_KEY ' +\n 'or in the `apiKey` field of the ' +\n 'ChatGoogleGenerativeAI constructor'\n );\n }\n\n this.safetySettings = fields.safetySettings ?? this.safetySettings;\n if (this.safetySettings && this.safetySettings.length > 0) {\n const safetySettingsSet = new Set(\n this.safetySettings.map((s) => s.category)\n );\n if (safetySettingsSet.size !== this.safetySettings.length) {\n throw new Error(\n 'The categories in `safetySettings` array must be unique'\n );\n }\n }\n\n this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;\n\n this.streaming = fields.streaming ?? this.streaming;\n this.json = fields.json;\n\n // @ts-ignore - Accessing private property from parent class\n this.client = new GenerativeAI(this.apiKey).getGenerativeModel(\n {\n model: this.model,\n safetySettings: this.safetySettings as SafetySetting[],\n generationConfig: {\n stopSequences: this.stopSequences,\n maxOutputTokens: this.maxOutputTokens,\n temperature: this.temperature,\n topP: this.topP,\n topK: this.topK,\n ...(this.json != null\n ? { responseMimeType: 'application/json' }\n : {}),\n },\n },\n {\n apiVersion: fields.apiVersion,\n baseUrl: fields.baseUrl,\n customHeaders: fields.customHeaders,\n }\n );\n this.streamUsage = fields.streamUsage ?? this.streamUsage;\n }\n\n static lc_name(): 'LibreChatGoogleGenerativeAI' {\n return 'LibreChatGoogleGenerativeAI';\n }\n\n /**\n * Helper function to convert Gemini API usage metadata to LangChain format\n * Includes support for cached tokens and tier-based tracking for gemini-3-pro-preview\n */\n private _convertToUsageMetadata(\n usageMetadata: GeminiApiUsageMetadata | undefined,\n model: string\n ): UsageMetadata | undefined {\n if (!usageMetadata) {\n return undefined;\n }\n\n const output: UsageMetadata = {\n input_tokens: usageMetadata.promptTokenCount ?? 0,\n output_tokens:\n (usageMetadata.candidatesTokenCount ?? 0) +\n (usageMetadata.thoughtsTokenCount ?? 0),\n total_tokens: usageMetadata.totalTokenCount ?? 0,\n };\n\n if (usageMetadata.cachedContentTokenCount) {\n output.input_token_details ??= {};\n output.input_token_details.cache_read =\n usageMetadata.cachedContentTokenCount;\n }\n\n // gemini-3-pro-preview has bracket based tracking of tokens per request\n if (model === 'gemini-3-pro-preview') {\n const over200k = Math.max(\n 0,\n (usageMetadata.promptTokenCount ?? 0) - 200000\n );\n const cachedOver200k = Math.max(\n 0,\n (usageMetadata.cachedContentTokenCount ?? 0) - 200000\n );\n if (over200k) {\n output.input_token_details = {\n ...output.input_token_details,\n over_200k: over200k,\n } as InputTokenDetails;\n }\n if (cachedOver200k) {\n output.input_token_details = {\n ...output.input_token_details,\n cache_read_over_200k: cachedOver200k,\n } as InputTokenDetails;\n }\n }\n\n return output;\n }\n\n invocationParams(\n options?: this['ParsedCallOptions']\n ): Omit<GenerateContentRequest, 'contents'> {\n const params = super.invocationParams(options);\n if (this.thinkingConfig) {\n /** @ts-ignore */\n this.client.generationConfig = {\n /** @ts-ignore */\n ...this.client.generationConfig,\n /** @ts-ignore */\n thinkingConfig: this.thinkingConfig,\n };\n }\n return params;\n }\n\n async _generate(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun\n ): Promise<import('@langchain/core/outputs').ChatResult> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt?.[0].role === 'system') {\n const [systemInstruction] = prompt;\n /** @ts-ignore */\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n const request = {\n ...parameters,\n contents: actualPrompt,\n };\n\n const res = await this.caller.callWithOptions(\n { signal: options.signal },\n async () =>\n /** @ts-ignore */\n this.client.generateContent(request)\n );\n\n const response = res.response;\n const usageMetadata = this._convertToUsageMetadata(\n /** @ts-ignore */\n response.usageMetadata,\n this.model\n );\n\n /** @ts-ignore */\n const generationResult = mapGenerateContentResultToChatResult(response, {\n usageMetadata,\n });\n\n await runManager?.handleLLMNewToken(\n generationResult.generations[0].text || '',\n undefined,\n undefined,\n undefined,\n undefined,\n undefined\n );\n return generationResult;\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt?.[0].role === 'system') {\n const [systemInstruction] = prompt;\n /** @ts-ignore */\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n const request = {\n ...parameters,\n contents: actualPrompt,\n };\n const stream = await this.caller.callWithOptions(\n { signal: options.signal },\n async () => {\n /** @ts-ignore */\n const { stream } = await this.client.generateContentStream(request);\n return stream;\n }\n );\n\n let index = 0;\n let lastUsageMetadata: UsageMetadata | undefined;\n for await (const response of stream) {\n if (\n 'usageMetadata' in response &&\n this.streamUsage !== false &&\n options.streamUsage !== false\n ) {\n lastUsageMetadata = this._convertToUsageMetadata(\n response.usageMetadata as GeminiApiUsageMetadata | undefined,\n this.model\n );\n }\n\n const chunk = convertResponseContentToChatGenerationChunk(response, {\n usageMetadata: undefined,\n index,\n });\n index += 1;\n if (!chunk) {\n continue;\n }\n\n yield chunk;\n await runManager?.handleLLMNewToken(\n chunk.text || '',\n undefined,\n undefined,\n undefined,\n undefined,\n { chunk }\n );\n }\n\n if (lastUsageMetadata) {\n const finalChunk = new ChatGenerationChunk({\n text: '',\n message: new AIMessageChunk({\n content: '',\n usage_metadata: lastUsageMetadata,\n }),\n });\n yield finalChunk;\n await runManager?.handleLLMNewToken(\n finalChunk.text || '',\n undefined,\n undefined,\n undefined,\n undefined,\n { chunk: finalChunk }\n );\n }\n }\n}\n"],"names":["GenerativeAI"],"mappings":";;;;;;;AAAA;AAoBM,MAAO,4BAA6B,SAAQ,sBAAsB,CAAA;AACtE,IAAA,cAAc;AAEd;;AAEG;AACH,IAAA,IAAI,kBAAkB,GAAA;QACpB,QACE,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,YAAY,CAAC;AACnC,YAAA,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC;AACjC,aAAC,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC;gBAChC,CAAC,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,YAAY,CAAC,CAAC;YACvC,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC;;AAIrC,IAAA,WAAA,CAAY,MAA2B,EAAA;QACrC,KAAK,CAAC,MAAM,CAAC;AAEb,QAAA,IAAI,CAAC,KAAK,GAAG,MAAM,CAAC,KAAK,CAAC,OAAO,CAAC,WAAW,EAAE,EAAE,CAAC;QAElD,IAAI,CAAC,eAAe,GAAG,MAAM,CAAC,eAAe,IAAI,IAAI,CAAC,eAAe;AAErE,QAAA,IAAI,IAAI,CAAC,eAAe,IAAI,IAAI,IAAI,IAAI,CAAC,eAAe,GAAG,CAAC,EAAE;AAC5D,YAAA,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC;;QAGjE,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC,WAAW,IAAI,IAAI,CAAC,WAAW;AACzD,QAAA,IACE,IAAI,CAAC,WAAW,IAAI,IAAI;AACxB,aAAC,IAAI,CAAC,WAAW,GAAG,CAAC,IAAI,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,EAC9C;AACA,YAAA,MAAM,IAAI,KAAK,CAAC,iDAAiD,CAAC;;QAGpE,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,IAAI,IAAI,IAAI,CAAC,IAAI;AACpC,QAAA,IAAI,IAAI,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,CAAC,IAAI,GAAG,CAAC,EAAE;AACtC,YAAA,MAAM,IAAI,KAAK,CAAC,mCAAmC,CAAC;;AAGtD,QAAA,IAAI,IAAI,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,CAAC,IAAI,GAAG,CAAC,EAAE;AACtC,YAAA,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC;;QAG5C,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,IAAI,IAAI,IAAI,CAAC,IAAI;AACpC,QAAA,IAAI,IAAI,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,CAAC,IAAI,GAAG,CAAC,EAAE;AACtC,YAAA,MAAM,IAAI,KAAK,CAAC,mCAAmC,CAAC;;QAGtD,IAAI,CAAC,aAAa,GAAG,MAAM,CAAC,aAAa,IAAI,IAAI,CAAC,aAAa;QAE/D,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,sBAAsB,CAAC,gBAAgB,CAAC;AACvE,QAAA,IAAI,IAAI,CAAC,MAAM,IAAI,IAAI,IAAI,IAAI,CAAC,MAAM,KAAK,EAAE,EAAE;YAC7C,MAAM,IAAI,KAAK,CACb,gDAAgD;gBAC9C,6CAA6C;gBAC7C,kCAAkC;AAClC,gBAAA,oCAAoC,CACvC;;QAGH,IAAI,CAAC,cAAc,GAAG,MAAM,CAAC,cAAc,IAAI,IAAI,CAAC,cAAc;AAClE,QAAA,IAAI,IAAI,CAAC,cAAc,IAAI,IAAI,CAAC,cAAc,CAAC,MAAM,GAAG,CAAC,EAAE;YACzD,MAAM,iBAAiB,GAAG,IAAI,GAAG,CAC/B,IAAI,CAAC,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAC3C;YACD,IAAI,iBAAiB,CAAC,IAAI,KAAK,IAAI,CAAC,cAAc,CAAC,MAAM,EAAE;AACzD,gBAAA,MAAM,IAAI,KAAK,CACb,yDAAyD,CAC1D;;;QAIL,IAAI,CAAC,cAAc,GAAG,MAAM,CAAC,cAAc,IAAI,IAAI,CAAC,cAAc;QAElE,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC,SAAS,IAAI,IAAI,CAAC,SAAS;AACnD,QAAA,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,IAAI;;AAGvB,QAAA,IAAI,CAAC,MAAM,GAAG,IAAIA,kBAAY,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,kBAAkB,CAC5D;YACE,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,cAAc,EAAE,IAAI,CAAC,cAAiC;AACtD,YAAA,gBAAgB,EAAE;gBAChB,aAAa,EAAE,IAAI,CAAC,aAAa;gBACjC,eAAe,EAAE,IAAI,CAAC,eAAe;gBACrC,WAAW,EAAE,IAAI,CAAC,WAAW;gBAC7B,IAAI,EAAE,IAAI,CAAC,IAAI;gBACf,IAAI,EAAE,IAAI,CAAC,IAAI;AACf,gBAAA,IAAI,IAAI,CAAC,IAAI,IAAI;AACf,sBAAE,EAAE,gBAAgB,EAAE,kBAAkB;sBACtC,EAAE,CAAC;AACR,aAAA;SACF,EACD;YACE,UAAU,EAAE,MAAM,CAAC,UAAU;YAC7B,OAAO,EAAE,MAAM,CAAC,OAAO;YACvB,aAAa,EAAE,MAAM,CAAC,aAAa;AACpC,SAAA,CACF;QACD,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC,WAAW,IAAI,IAAI,CAAC,WAAW;;AAG3D,IAAA,OAAO,OAAO,GAAA;AACZ,QAAA,OAAO,6BAA6B;;AAGtC;;;AAGG;IACK,uBAAuB,CAC7B,aAAiD,EACjD,KAAa,EAAA;QAEb,IAAI,CAAC,aAAa,EAAE;AAClB,YAAA,OAAO,SAAS;;AAGlB,QAAA,MAAM,MAAM,GAAkB;AAC5B,YAAA,YAAY,EAAE,aAAa,CAAC,gBAAgB,IAAI,CAAC;AACjD,YAAA,aAAa,EACX,CAAC,aAAa,CAAC,oBAAoB,IAAI,CAAC;AACxC,iBAAC,aAAa,CAAC,kBAAkB,IAAI,CAAC,CAAC;AACzC,YAAA,YAAY,EAAE,aAAa,CAAC,eAAe,IAAI,CAAC;SACjD;AAED,QAAA,IAAI,aAAa,CAAC,uBAAuB,EAAE;AACzC,YAAA,MAAM,CAAC,mBAAmB,KAAK,EAAE;YACjC,MAAM,CAAC,mBAAmB,CAAC,UAAU;gBACnC,aAAa,CAAC,uBAAuB;;;AAIzC,QAAA,IAAI,KAAK,KAAK,sBAAsB,EAAE;AACpC,YAAA,MAAM,QAAQ,GAAG,IAAI,CAAC,GAAG,CACvB,CAAC,EACD,CAAC,aAAa,CAAC,gBAAgB,IAAI,CAAC,IAAI,MAAM,CAC/C;AACD,YAAA,MAAM,cAAc,GAAG,IAAI,CAAC,GAAG,CAC7B,CAAC,EACD,CAAC,aAAa,CAAC,uBAAuB,IAAI,CAAC,IAAI,MAAM,CACtD;YACD,IAAI,QAAQ,EAAE;gBACZ,MAAM,CAAC,mBAAmB,GAAG;oBAC3B,GAAG,MAAM,CAAC,mBAAmB;AAC7B,oBAAA,SAAS,EAAE,QAAQ;iBACC;;YAExB,IAAI,cAAc,EAAE;gBAClB,MAAM,CAAC,mBAAmB,GAAG;oBAC3B,GAAG,MAAM,CAAC,mBAAmB;AAC7B,oBAAA,oBAAoB,EAAE,cAAc;iBAChB;;;AAI1B,QAAA,OAAO,MAAM;;AAGf,IAAA,gBAAgB,CACd,OAAmC,EAAA;QAEnC,MAAM,MAAM,GAAG,KAAK,CAAC,gBAAgB,CAAC,OAAO,CAAC;AAC9C,QAAA,IAAI,IAAI,CAAC,cAAc,EAAE;;AAEvB,YAAA,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG;;AAE7B,gBAAA,GAAG,IAAI,CAAC,MAAM,CAAC,gBAAgB;;gBAE/B,cAAc,EAAE,IAAI,CAAC,cAAc;aACpC;;AAEH,QAAA,OAAO,MAAM;;AAGf,IAAA,MAAM,SAAS,CACb,QAAuB,EACvB,OAAkC,EAClC,UAAqC,EAAA;AAErC,QAAA,MAAM,MAAM,GAAG,4BAA4B,CACzC,QAAQ,EACR,IAAI,CAAC,kBAAkB,EACvB,IAAI,CAAC,oBAAoB,EACzB,IAAI,CAAC,KAAK,CACX;QACD,IAAI,YAAY,GAAG,MAAM;QACzB,IAAI,MAAM,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,EAAE;AACjC,YAAA,MAAM,CAAC,iBAAiB,CAAC,GAAG,MAAM;;AAElC,YAAA,IAAI,CAAC,MAAM,CAAC,iBAAiB,GAAG,iBAAiB;AACjD,YAAA,YAAY,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC;;QAEhC,MAAM,UAAU,GAAG,IAAI,CAAC,gBAAgB,CAAC,OAAO,CAAC;AACjD,QAAA,MAAM,OAAO,GAAG;AACd,YAAA,GAAG,UAAU;AACb,YAAA,QAAQ,EAAE,YAAY;SACvB;AAED,QAAA,MAAM,GAAG,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,eAAe,CAC3C,EAAE,MAAM,EAAE,OAAO,CAAC,MAAM,EAAE,EAC1B;;QAEE,IAAI,CAAC,MAAM,CAAC,eAAe,CAAC,OAAO,CAAC,CACvC;AAED,QAAA,MAAM,QAAQ,GAAG,GAAG,CAAC,QAAQ;AAC7B,QAAA,MAAM,aAAa,GAAG,IAAI,CAAC,uBAAuB;;AAEhD,QAAA,QAAQ,CAAC,aAAa,EACtB,IAAI,CAAC,KAAK,CACX;;AAGD,QAAA,MAAM,gBAAgB,GAAG,oCAAoC,CAAC,QAAQ,EAAE;YACtE,aAAa;AACd,SAAA,CAAC;QAEF,MAAM,UAAU,EAAE,iBAAiB,CACjC,gBAAgB,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,IAAI,IAAI,EAAE,EAC1C,SAAS,EACT,SAAS,EACT,SAAS,EACT,SAAS,EACT,SAAS,CACV;AACD,QAAA,OAAO,gBAAgB;;IAGzB,OAAO,qBAAqB,CAC1B,QAAuB,EACvB,OAAkC,EAClC,UAAqC,EAAA;AAErC,QAAA,MAAM,MAAM,GAAG,4BAA4B,CACzC,QAAQ,EACR,IAAI,CAAC,kBAAkB,EACvB,IAAI,CAAC,oBAAoB,EACzB,IAAI,CAAC,KAAK,CACX;QACD,IAAI,YAAY,GAAG,MAAM;QACzB,IAAI,MAAM,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,EAAE;AACjC,YAAA,MAAM,CAAC,iBAAiB,CAAC,GAAG,MAAM;;AAElC,YAAA,IAAI,CAAC,MAAM,CAAC,iBAAiB,GAAG,iBAAiB;AACjD,YAAA,YAAY,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC;;QAEhC,MAAM,UAAU,GAAG,IAAI,CAAC,gBAAgB,CAAC,OAAO,CAAC;AACjD,QAAA,MAAM,OAAO,GAAG;AACd,YAAA,GAAG,UAAU;AACb,YAAA,QAAQ,EAAE,YAAY;SACvB;AACD,QAAA,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,eAAe,CAC9C,EAAE,MAAM,EAAE,OAAO,CAAC,MAAM,EAAE,EAC1B,YAAW;;AAET,YAAA,MAAM,EAAE,MAAM,EAAE,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,qBAAqB,CAAC,OAAO,CAAC;AACnE,YAAA,OAAO,MAAM;AACf,SAAC,CACF;AAGD,QAAA,IAAI,iBAA4C;AAChD,QAAA,WAAW,MAAM,QAAQ,IAAI,MAAM,EAAE;YACnC,IACE,eAAe,IAAI,QAAQ;gBAC3B,IAAI,CAAC,WAAW,KAAK,KAAK;AAC1B,gBAAA,OAAO,CAAC,WAAW,KAAK,KAAK,EAC7B;AACA,gBAAA,iBAAiB,GAAG,IAAI,CAAC,uBAAuB,CAC9C,QAAQ,CAAC,aAAmD,EAC5D,IAAI,CAAC,KAAK,CACX;;AAGH,YAAA,MAAM,KAAK,GAAG,2CAA2C,CAAC,QAAQ,EAAE;AAClE,gBAAA,aAAa,EAAE,SAEhB,CAAA,CAAC;YAEF,IAAI,CAAC,KAAK,EAAE;gBACV;;AAGF,YAAA,MAAM,KAAK;YACX,MAAM,UAAU,EAAE,iBAAiB,CACjC,KAAK,CAAC,IAAI,IAAI,EAAE,EAChB,SAAS,EACT,SAAS,EACT,SAAS,EACT,SAAS,EACT,EAAE,KAAK,EAAE,CACV;;QAGH,IAAI,iBAAiB,EAAE;AACrB,YAAA,MAAM,UAAU,GAAG,IAAI,mBAAmB,CAAC;AACzC,gBAAA,IAAI,EAAE,EAAE;gBACR,OAAO,EAAE,IAAI,cAAc,CAAC;AAC1B,oBAAA,OAAO,EAAE,EAAE;AACX,oBAAA,cAAc,EAAE,iBAAiB;iBAClC,CAAC;AACH,aAAA,CAAC;AACF,YAAA,MAAM,UAAU;YAChB,MAAM,UAAU,EAAE,iBAAiB,CACjC,UAAU,CAAC,IAAI,IAAI,EAAE,EACrB,SAAS,EACT,SAAS,EACT,SAAS,EACT,SAAS,EACT,EAAE,KAAK,EAAE,UAAU,EAAE,CACtB;;;AAGN;;;;"}
@@ -4,19 +4,73 @@ import { AIMessageChunk } from '@langchain/core/messages';
4
4
  import { _convertMessagesToOpenAIParams } from '../openai/utils/index.mjs';
5
5
 
6
6
  class ChatOpenRouter extends ChatOpenAI {
7
+ openRouterReasoning;
8
+ /** @deprecated Use `reasoning` object instead */
9
+ includeReasoning;
7
10
  constructor(_fields) {
8
- const { include_reasoning, modelKwargs = {}, ...fields } = _fields;
11
+ const { include_reasoning, reasoning: openRouterReasoning, modelKwargs = {}, ...fields } = _fields;
12
+ // Extract reasoning from modelKwargs if provided there (e.g., from LLMConfig)
13
+ const { reasoning: mkReasoning, ...restModelKwargs } = modelKwargs;
9
14
  super({
10
15
  ...fields,
11
- modelKwargs: {
12
- ...modelKwargs,
13
- include_reasoning,
14
- },
16
+ modelKwargs: restModelKwargs,
15
17
  });
18
+ // Merge reasoning config: modelKwargs.reasoning < constructor reasoning
19
+ if (mkReasoning != null || openRouterReasoning != null) {
20
+ this.openRouterReasoning = {
21
+ ...mkReasoning,
22
+ ...openRouterReasoning,
23
+ };
24
+ }
25
+ this.includeReasoning = include_reasoning;
16
26
  }
17
27
  static lc_name() {
18
28
  return 'LibreChatOpenRouter';
19
29
  }
30
+ // @ts-expect-error - OpenRouter reasoning extends OpenAI Reasoning with additional
31
+ // effort levels ('xhigh' | 'none' | 'minimal') not in ReasoningEffort.
32
+ // The parent's generic conditional return type cannot be widened in an override.
33
+ invocationParams(options, extra) {
34
+ const params = super.invocationParams(options, extra);
35
+ // Remove the OpenAI-native reasoning_effort that the parent sets;
36
+ // OpenRouter uses a `reasoning` object instead
37
+ delete params.reasoning_effort;
38
+ // Build the OpenRouter reasoning config
39
+ const reasoning = this.buildOpenRouterReasoning(options);
40
+ if (reasoning != null) {
41
+ params.reasoning = reasoning;
42
+ }
43
+ else {
44
+ delete params.reasoning;
45
+ }
46
+ return params;
47
+ }
48
+ buildOpenRouterReasoning(options) {
49
+ let reasoning;
50
+ // 1. Instance-level reasoning config (from constructor)
51
+ if (this.openRouterReasoning != null) {
52
+ reasoning = { ...this.openRouterReasoning };
53
+ }
54
+ // 2. LangChain-style reasoning params (from parent's `this.reasoning`)
55
+ const lcReasoning = this.getReasoningParams(options);
56
+ if (lcReasoning?.effort != null) {
57
+ reasoning = {
58
+ ...reasoning,
59
+ effort: lcReasoning.effort,
60
+ };
61
+ }
62
+ // 3. Call-level reasoning override
63
+ const callReasoning = options
64
+ ?.reasoning;
65
+ if (callReasoning != null) {
66
+ reasoning = { ...reasoning, ...callReasoning };
67
+ }
68
+ // 4. Legacy include_reasoning backward compatibility
69
+ if (reasoning == null && this.includeReasoning === true) {
70
+ reasoning = { enabled: true };
71
+ }
72
+ return reasoning;
73
+ }
20
74
  _convertOpenAIDeltaToBaseMessageChunk(
21
75
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
22
76
  delta, rawResponse, defaultRole) {
@@ -1 +1 @@
1
- {"version":3,"file":"index.mjs","sources":["../../../../src/llm/openrouter/index.ts"],"sourcesContent":["import { ChatOpenAI } from '@/llm/openai';\nimport { ChatGenerationChunk } from '@langchain/core/outputs';\nimport { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport { AIMessageChunk as AIMessageChunkClass } from '@langchain/core/messages';\nimport type {\n FunctionMessageChunk,\n SystemMessageChunk,\n HumanMessageChunk,\n ToolMessageChunk,\n ChatMessageChunk,\n AIMessageChunk,\n BaseMessage,\n} from '@langchain/core/messages';\nimport type {\n ChatOpenAICallOptions,\n OpenAIChatInput,\n OpenAIClient,\n} from '@langchain/openai';\nimport { _convertMessagesToOpenAIParams } from '@/llm/openai/utils';\n\ntype OpenAICompletionParam =\n OpenAIClient.Chat.Completions.ChatCompletionMessageParam;\n\ntype OpenAIRoleEnum =\n | 'system'\n | 'developer'\n | 'assistant'\n | 'user'\n | 'function'\n | 'tool';\n\nexport interface ChatOpenRouterCallOptions extends ChatOpenAICallOptions {\n include_reasoning?: boolean;\n modelKwargs?: OpenAIChatInput['modelKwargs'];\n}\nexport class ChatOpenRouter extends ChatOpenAI {\n constructor(_fields: Partial<ChatOpenRouterCallOptions>) {\n const { include_reasoning, modelKwargs = {}, ...fields } = _fields;\n super({\n ...fields,\n modelKwargs: {\n ...modelKwargs,\n include_reasoning,\n },\n });\n }\n static lc_name(): 'LibreChatOpenRouter' {\n return 'LibreChatOpenRouter';\n }\n protected override _convertOpenAIDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | 'function'\n | 'user'\n | 'system'\n | 'developer'\n | 'assistant'\n | 'tool'\n ):\n | AIMessageChunk\n | HumanMessageChunk\n | SystemMessageChunk\n | FunctionMessageChunk\n | ToolMessageChunk\n | ChatMessageChunk {\n const messageChunk = super._convertOpenAIDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n if (delta.reasoning != null) {\n messageChunk.additional_kwargs.reasoning = delta.reasoning;\n }\n if (delta.reasoning_details != null) {\n messageChunk.additional_kwargs.reasoning_details =\n delta.reasoning_details;\n }\n return messageChunk;\n }\n\n async *_streamResponseChunks2(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const messagesMapped: OpenAICompletionParam[] =\n _convertMessagesToOpenAIParams(messages, this.model, {\n includeReasoningDetails: true,\n convertReasoningDetailsToContent: true,\n });\n\n const params = {\n ...this.invocationParams(options, {\n streaming: true,\n }),\n messages: messagesMapped,\n stream: true as const,\n };\n let defaultRole: OpenAIRoleEnum | undefined;\n\n const streamIterable = await this.completionWithRetry(params, options);\n let usage: OpenAIClient.Completions.CompletionUsage | undefined;\n\n // Store reasoning_details keyed by unique identifier to prevent incorrect merging\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const reasoningTextByIndex: Map<number, Record<string, any>> = new Map();\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const reasoningEncryptedById: Map<string, Record<string, any>> = new Map();\n\n for await (const data of streamIterable) {\n const choice = data.choices[0] as\n | Partial<OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice>\n | undefined;\n if (data.usage) {\n usage = data.usage;\n }\n if (!choice) {\n continue;\n }\n\n const { delta } = choice;\n if (!delta) {\n continue;\n }\n\n // Accumulate reasoning_details from each delta\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const deltaAny = delta as Record<string, any>;\n // Extract current chunk's reasoning text for streaming (before accumulation)\n let currentChunkReasoningText = '';\n if (\n deltaAny.reasoning_details != null &&\n Array.isArray(deltaAny.reasoning_details)\n ) {\n for (const detail of deltaAny.reasoning_details) {\n // For encrypted reasoning (thought signatures), store by ID - MUST be separate\n if (detail.type === 'reasoning.encrypted' && detail.id) {\n reasoningEncryptedById.set(detail.id, {\n type: detail.type,\n id: detail.id,\n data: detail.data,\n format: detail.format,\n index: detail.index,\n });\n } else if (detail.type === 'reasoning.text') {\n // Extract current chunk's text for streaming\n currentChunkReasoningText += detail.text || '';\n // For text reasoning, accumulate text by index for final message\n const idx = detail.index ?? 0;\n const existing = reasoningTextByIndex.get(idx);\n if (existing) {\n // Only append text, keep other fields from first entry\n existing.text = (existing.text || '') + (detail.text || '');\n } else {\n reasoningTextByIndex.set(idx, {\n type: detail.type,\n text: detail.text || '',\n format: detail.format,\n index: idx,\n });\n }\n }\n }\n }\n\n const chunk = this._convertOpenAIDeltaToBaseMessageChunk(\n delta,\n data,\n defaultRole\n );\n\n // For models that send reasoning_details (Gemini style) instead of reasoning (DeepSeek style),\n // set the current chunk's reasoning text to additional_kwargs.reasoning for streaming\n if (currentChunkReasoningText && !chunk.additional_kwargs.reasoning) {\n chunk.additional_kwargs.reasoning = currentChunkReasoningText;\n }\n\n // IMPORTANT: Only set reasoning_details on the FINAL chunk to prevent\n // LangChain's chunk concatenation from corrupting the array\n // Check if this is the final chunk (has finish_reason)\n if (choice.finish_reason != null) {\n // Build properly structured reasoning_details array\n // Text entries first (but we only need the encrypted ones for thought signatures)\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const finalReasoningDetails: Record<string, any>[] = [\n ...reasoningTextByIndex.values(),\n ...reasoningEncryptedById.values(),\n ];\n\n if (finalReasoningDetails.length > 0) {\n chunk.additional_kwargs.reasoning_details = finalReasoningDetails;\n }\n } else {\n // Clear reasoning_details from intermediate chunks to prevent concatenation issues\n delete chunk.additional_kwargs.reasoning_details;\n }\n\n defaultRole = delta.role ?? defaultRole;\n const newTokenIndices = {\n prompt: options.promptIndex ?? 0,\n completion: choice.index ?? 0,\n };\n if (typeof chunk.content !== 'string') {\n // eslint-disable-next-line no-console\n console.log(\n '[WARNING]: Received non-string content from OpenAI. This is currently not supported.'\n );\n continue;\n }\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const generationInfo: Record<string, any> = { ...newTokenIndices };\n if (choice.finish_reason != null) {\n generationInfo.finish_reason = choice.finish_reason;\n generationInfo.system_fingerprint = data.system_fingerprint;\n generationInfo.model_name = data.model;\n generationInfo.service_tier = data.service_tier;\n }\n if (this.logprobs == true) {\n generationInfo.logprobs = choice.logprobs;\n }\n const generationChunk = new ChatGenerationChunk({\n message: chunk,\n text: chunk.content,\n generationInfo,\n });\n yield generationChunk;\n if (this._lc_stream_delay != null) {\n await new Promise((resolve) =>\n setTimeout(resolve, this._lc_stream_delay)\n );\n }\n await runManager?.handleLLMNewToken(\n generationChunk.text || '',\n newTokenIndices,\n undefined,\n undefined,\n undefined,\n { chunk: generationChunk }\n );\n }\n if (usage) {\n const inputTokenDetails = {\n ...(usage.prompt_tokens_details?.audio_tokens != null && {\n audio: usage.prompt_tokens_details.audio_tokens,\n }),\n ...(usage.prompt_tokens_details?.cached_tokens != null && {\n cache_read: usage.prompt_tokens_details.cached_tokens,\n }),\n };\n const outputTokenDetails = {\n ...(usage.completion_tokens_details?.audio_tokens != null && {\n audio: usage.completion_tokens_details.audio_tokens,\n }),\n ...(usage.completion_tokens_details?.reasoning_tokens != null && {\n reasoning: usage.completion_tokens_details.reasoning_tokens,\n }),\n };\n const generationChunk = new ChatGenerationChunk({\n message: new AIMessageChunkClass({\n content: '',\n response_metadata: {\n usage: { ...usage },\n },\n usage_metadata: {\n input_tokens: usage.prompt_tokens,\n output_tokens: usage.completion_tokens,\n total_tokens: usage.total_tokens,\n ...(Object.keys(inputTokenDetails).length > 0 && {\n input_token_details: inputTokenDetails,\n }),\n ...(Object.keys(outputTokenDetails).length > 0 && {\n output_token_details: outputTokenDetails,\n }),\n },\n }),\n text: '',\n });\n yield generationChunk;\n if (this._lc_stream_delay != null) {\n await new Promise((resolve) =>\n setTimeout(resolve, this._lc_stream_delay)\n );\n }\n }\n if (options.signal?.aborted === true) {\n throw new Error('AbortError');\n }\n }\n}\n"],"names":["AIMessageChunkClass"],"mappings":";;;;;AAmCM,MAAO,cAAe,SAAQ,UAAU,CAAA;AAC5C,IAAA,WAAA,CAAY,OAA2C,EAAA;AACrD,QAAA,MAAM,EAAE,iBAAiB,EAAE,WAAW,GAAG,EAAE,EAAE,GAAG,MAAM,EAAE,GAAG,OAAO;AAClE,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,MAAM;AACT,YAAA,WAAW,EAAE;AACX,gBAAA,GAAG,WAAW;gBACd,iBAAiB;AAClB,aAAA;AACF,SAAA,CAAC;;AAEJ,IAAA,OAAO,OAAO,GAAA;AACZ,QAAA,OAAO,qBAAqB;;IAEX,qCAAqC;;IAEtD,KAA0B,EAC1B,WAA6C,EAC7C,WAMU,EAAA;AAQV,QAAA,MAAM,YAAY,GAAG,KAAK,CAAC,qCAAqC,CAC9D,KAAK,EACL,WAAW,EACX,WAAW,CACZ;AACD,QAAA,IAAI,KAAK,CAAC,SAAS,IAAI,IAAI,EAAE;YAC3B,YAAY,CAAC,iBAAiB,CAAC,SAAS,GAAG,KAAK,CAAC,SAAS;;AAE5D,QAAA,IAAI,KAAK,CAAC,iBAAiB,IAAI,IAAI,EAAE;YACnC,YAAY,CAAC,iBAAiB,CAAC,iBAAiB;gBAC9C,KAAK,CAAC,iBAAiB;;AAE3B,QAAA,OAAO,YAAY;;IAGrB,OAAO,sBAAsB,CAC3B,QAAuB,EACvB,OAAkC,EAClC,UAAqC,EAAA;QAErC,MAAM,cAAc,GAClB,8BAA8B,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AACnD,YAAA,uBAAuB,EAAE,IAAI;AAC7B,YAAA,gCAAgC,EAAE,IAAI;AACvC,SAAA,CAAC;AAEJ,QAAA,MAAM,MAAM,GAAG;AACb,YAAA,GAAG,IAAI,CAAC,gBAAgB,CAAC,OAAO,EAAE;AAChC,gBAAA,SAAS,EAAE,IAAI;aAChB,CAAC;AACF,YAAA,QAAQ,EAAE,cAAc;AACxB,YAAA,MAAM,EAAE,IAAa;SACtB;AACD,QAAA,IAAI,WAAuC;QAE3C,MAAM,cAAc,GAAG,MAAM,IAAI,CAAC,mBAAmB,CAAC,MAAM,EAAE,OAAO,CAAC;AACtE,QAAA,IAAI,KAA2D;;;AAI/D,QAAA,MAAM,oBAAoB,GAAqC,IAAI,GAAG,EAAE;;AAExE,QAAA,MAAM,sBAAsB,GAAqC,IAAI,GAAG,EAAE;AAE1E,QAAA,WAAW,MAAM,IAAI,IAAI,cAAc,EAAE;YACvC,MAAM,MAAM,GAAG,IAAI,CAAC,OAAO,CAAC,CAAC,CAEhB;AACb,YAAA,IAAI,IAAI,CAAC,KAAK,EAAE;AACd,gBAAA,KAAK,GAAG,IAAI,CAAC,KAAK;;YAEpB,IAAI,CAAC,MAAM,EAAE;gBACX;;AAGF,YAAA,MAAM,EAAE,KAAK,EAAE,GAAG,MAAM;YACxB,IAAI,CAAC,KAAK,EAAE;gBACV;;;;YAKF,MAAM,QAAQ,GAAG,KAA4B;;YAE7C,IAAI,yBAAyB,GAAG,EAAE;AAClC,YAAA,IACE,QAAQ,CAAC,iBAAiB,IAAI,IAAI;gBAClC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAAC,EACzC;AACA,gBAAA,KAAK,MAAM,MAAM,IAAI,QAAQ,CAAC,iBAAiB,EAAE;;oBAE/C,IAAI,MAAM,CAAC,IAAI,KAAK,qBAAqB,IAAI,MAAM,CAAC,EAAE,EAAE;AACtD,wBAAA,sBAAsB,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,EAAE;4BACpC,IAAI,EAAE,MAAM,CAAC,IAAI;4BACjB,EAAE,EAAE,MAAM,CAAC,EAAE;4BACb,IAAI,EAAE,MAAM,CAAC,IAAI;4BACjB,MAAM,EAAE,MAAM,CAAC,MAAM;4BACrB,KAAK,EAAE,MAAM,CAAC,KAAK;AACpB,yBAAA,CAAC;;AACG,yBAAA,IAAI,MAAM,CAAC,IAAI,KAAK,gBAAgB,EAAE;;AAE3C,wBAAA,yBAAyB,IAAI,MAAM,CAAC,IAAI,IAAI,EAAE;;AAE9C,wBAAA,MAAM,GAAG,GAAG,MAAM,CAAC,KAAK,IAAI,CAAC;wBAC7B,MAAM,QAAQ,GAAG,oBAAoB,CAAC,GAAG,CAAC,GAAG,CAAC;wBAC9C,IAAI,QAAQ,EAAE;;AAEZ,4BAAA,QAAQ,CAAC,IAAI,GAAG,CAAC,QAAQ,CAAC,IAAI,IAAI,EAAE,KAAK,MAAM,CAAC,IAAI,IAAI,EAAE,CAAC;;6BACtD;AACL,4BAAA,oBAAoB,CAAC,GAAG,CAAC,GAAG,EAAE;gCAC5B,IAAI,EAAE,MAAM,CAAC,IAAI;AACjB,gCAAA,IAAI,EAAE,MAAM,CAAC,IAAI,IAAI,EAAE;gCACvB,MAAM,EAAE,MAAM,CAAC,MAAM;AACrB,gCAAA,KAAK,EAAE,GAAG;AACX,6BAAA,CAAC;;;;;AAMV,YAAA,MAAM,KAAK,GAAG,IAAI,CAAC,qCAAqC,CACtD,KAAK,EACL,IAAI,EACJ,WAAW,CACZ;;;YAID,IAAI,yBAAyB,IAAI,CAAC,KAAK,CAAC,iBAAiB,CAAC,SAAS,EAAE;AACnE,gBAAA,KAAK,CAAC,iBAAiB,CAAC,SAAS,GAAG,yBAAyB;;;;;AAM/D,YAAA,IAAI,MAAM,CAAC,aAAa,IAAI,IAAI,EAAE;;;;AAIhC,gBAAA,MAAM,qBAAqB,GAA0B;oBACnD,GAAG,oBAAoB,CAAC,MAAM,EAAE;oBAChC,GAAG,sBAAsB,CAAC,MAAM,EAAE;iBACnC;AAED,gBAAA,IAAI,qBAAqB,CAAC,MAAM,GAAG,CAAC,EAAE;AACpC,oBAAA,KAAK,CAAC,iBAAiB,CAAC,iBAAiB,GAAG,qBAAqB;;;iBAE9D;;AAEL,gBAAA,OAAO,KAAK,CAAC,iBAAiB,CAAC,iBAAiB;;AAGlD,YAAA,WAAW,GAAG,KAAK,CAAC,IAAI,IAAI,WAAW;AACvC,YAAA,MAAM,eAAe,GAAG;AACtB,gBAAA,MAAM,EAAE,OAAO,CAAC,WAAW,IAAI,CAAC;AAChC,gBAAA,UAAU,EAAE,MAAM,CAAC,KAAK,IAAI,CAAC;aAC9B;AACD,YAAA,IAAI,OAAO,KAAK,CAAC,OAAO,KAAK,QAAQ,EAAE;;AAErC,gBAAA,OAAO,CAAC,GAAG,CACT,sFAAsF,CACvF;gBACD;;;AAGF,YAAA,MAAM,cAAc,GAAwB,EAAE,GAAG,eAAe,EAAE;AAClE,YAAA,IAAI,MAAM,CAAC,aAAa,IAAI,IAAI,EAAE;AAChC,gBAAA,cAAc,CAAC,aAAa,GAAG,MAAM,CAAC,aAAa;AACnD,gBAAA,cAAc,CAAC,kBAAkB,GAAG,IAAI,CAAC,kBAAkB;AAC3D,gBAAA,cAAc,CAAC,UAAU,GAAG,IAAI,CAAC,KAAK;AACtC,gBAAA,cAAc,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;;AAEjD,YAAA,IAAI,IAAI,CAAC,QAAQ,IAAI,IAAI,EAAE;AACzB,gBAAA,cAAc,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ;;AAE3C,YAAA,MAAM,eAAe,GAAG,IAAI,mBAAmB,CAAC;AAC9C,gBAAA,OAAO,EAAE,KAAK;gBACd,IAAI,EAAE,KAAK,CAAC,OAAO;gBACnB,cAAc;AACf,aAAA,CAAC;AACF,YAAA,MAAM,eAAe;AACrB,YAAA,IAAI,IAAI,CAAC,gBAAgB,IAAI,IAAI,EAAE;AACjC,gBAAA,MAAM,IAAI,OAAO,CAAC,CAAC,OAAO,KACxB,UAAU,CAAC,OAAO,EAAE,IAAI,CAAC,gBAAgB,CAAC,CAC3C;;YAEH,MAAM,UAAU,EAAE,iBAAiB,CACjC,eAAe,CAAC,IAAI,IAAI,EAAE,EAC1B,eAAe,EACf,SAAS,EACT,SAAS,EACT,SAAS,EACT,EAAE,KAAK,EAAE,eAAe,EAAE,CAC3B;;QAEH,IAAI,KAAK,EAAE;AACT,YAAA,MAAM,iBAAiB,GAAG;gBACxB,IAAI,KAAK,CAAC,qBAAqB,EAAE,YAAY,IAAI,IAAI,IAAI;AACvD,oBAAA,KAAK,EAAE,KAAK,CAAC,qBAAqB,CAAC,YAAY;iBAChD,CAAC;gBACF,IAAI,KAAK,CAAC,qBAAqB,EAAE,aAAa,IAAI,IAAI,IAAI;AACxD,oBAAA,UAAU,EAAE,KAAK,CAAC,qBAAqB,CAAC,aAAa;iBACtD,CAAC;aACH;AACD,YAAA,MAAM,kBAAkB,GAAG;gBACzB,IAAI,KAAK,CAAC,yBAAyB,EAAE,YAAY,IAAI,IAAI,IAAI;AAC3D,oBAAA,KAAK,EAAE,KAAK,CAAC,yBAAyB,CAAC,YAAY;iBACpD,CAAC;gBACF,IAAI,KAAK,CAAC,yBAAyB,EAAE,gBAAgB,IAAI,IAAI,IAAI;AAC/D,oBAAA,SAAS,EAAE,KAAK,CAAC,yBAAyB,CAAC,gBAAgB;iBAC5D,CAAC;aACH;AACD,YAAA,MAAM,eAAe,GAAG,IAAI,mBAAmB,CAAC;gBAC9C,OAAO,EAAE,IAAIA,cAAmB,CAAC;AAC/B,oBAAA,OAAO,EAAE,EAAE;AACX,oBAAA,iBAAiB,EAAE;AACjB,wBAAA,KAAK,EAAE,EAAE,GAAG,KAAK,EAAE;AACpB,qBAAA;AACD,oBAAA,cAAc,EAAE;wBACd,YAAY,EAAE,KAAK,CAAC,aAAa;wBACjC,aAAa,EAAE,KAAK,CAAC,iBAAiB;wBACtC,YAAY,EAAE,KAAK,CAAC,YAAY;wBAChC,IAAI,MAAM,CAAC,IAAI,CAAC,iBAAiB,CAAC,CAAC,MAAM,GAAG,CAAC,IAAI;AAC/C,4BAAA,mBAAmB,EAAE,iBAAiB;yBACvC,CAAC;wBACF,IAAI,MAAM,CAAC,IAAI,CAAC,kBAAkB,CAAC,CAAC,MAAM,GAAG,CAAC,IAAI;AAChD,4BAAA,oBAAoB,EAAE,kBAAkB;yBACzC,CAAC;AACH,qBAAA;iBACF,CAAC;AACF,gBAAA,IAAI,EAAE,EAAE;AACT,aAAA,CAAC;AACF,YAAA,MAAM,eAAe;AACrB,YAAA,IAAI,IAAI,CAAC,gBAAgB,IAAI,IAAI,EAAE;AACjC,gBAAA,MAAM,IAAI,OAAO,CAAC,CAAC,OAAO,KACxB,UAAU,CAAC,OAAO,EAAE,IAAI,CAAC,gBAAgB,CAAC,CAC3C;;;QAGL,IAAI,OAAO,CAAC,MAAM,EAAE,OAAO,KAAK,IAAI,EAAE;AACpC,YAAA,MAAM,IAAI,KAAK,CAAC,YAAY,CAAC;;;AAGlC;;;;"}
1
+ {"version":3,"file":"index.mjs","sources":["../../../../src/llm/openrouter/index.ts"],"sourcesContent":["import { ChatOpenAI } from '@/llm/openai';\nimport { ChatGenerationChunk } from '@langchain/core/outputs';\nimport { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport { AIMessageChunk as AIMessageChunkClass } from '@langchain/core/messages';\nimport type {\n FunctionMessageChunk,\n SystemMessageChunk,\n HumanMessageChunk,\n ToolMessageChunk,\n ChatMessageChunk,\n AIMessageChunk,\n BaseMessage,\n} from '@langchain/core/messages';\nimport type {\n ChatOpenAICallOptions,\n OpenAIChatInput,\n OpenAIClient,\n} from '@langchain/openai';\nimport { _convertMessagesToOpenAIParams } from '@/llm/openai/utils';\n\ntype OpenAICompletionParam =\n OpenAIClient.Chat.Completions.ChatCompletionMessageParam;\n\ntype OpenAIRoleEnum =\n | 'system'\n | 'developer'\n | 'assistant'\n | 'user'\n | 'function'\n | 'tool';\n\nexport type OpenRouterReasoningEffort =\n | 'xhigh'\n | 'high'\n | 'medium'\n | 'low'\n | 'minimal'\n | 'none';\n\nexport interface OpenRouterReasoning {\n effort?: OpenRouterReasoningEffort;\n max_tokens?: number;\n exclude?: boolean;\n enabled?: boolean;\n}\n\nexport interface ChatOpenRouterCallOptions\n extends Omit<ChatOpenAICallOptions, 'reasoning'> {\n /** @deprecated Use `reasoning` object instead */\n include_reasoning?: boolean;\n reasoning?: OpenRouterReasoning;\n modelKwargs?: OpenAIChatInput['modelKwargs'];\n}\n\n/** invocationParams return type extended with OpenRouter reasoning */\nexport type OpenRouterInvocationParams = Omit<\n OpenAIClient.Chat.ChatCompletionCreateParams,\n 'messages'\n> & {\n reasoning?: OpenRouterReasoning;\n};\nexport class ChatOpenRouter extends ChatOpenAI {\n private openRouterReasoning?: OpenRouterReasoning;\n /** @deprecated Use `reasoning` object instead */\n private includeReasoning?: boolean;\n\n constructor(_fields: Partial<ChatOpenRouterCallOptions>) {\n const {\n include_reasoning,\n reasoning: openRouterReasoning,\n modelKwargs = {},\n ...fields\n } = _fields;\n\n // Extract reasoning from modelKwargs if provided there (e.g., from LLMConfig)\n const { reasoning: mkReasoning, ...restModelKwargs } = modelKwargs as {\n reasoning?: OpenRouterReasoning;\n } & Record<string, unknown>;\n\n super({\n ...fields,\n modelKwargs: restModelKwargs,\n });\n\n // Merge reasoning config: modelKwargs.reasoning < constructor reasoning\n if (mkReasoning != null || openRouterReasoning != null) {\n this.openRouterReasoning = {\n ...mkReasoning,\n ...openRouterReasoning,\n };\n }\n\n this.includeReasoning = include_reasoning;\n }\n static lc_name(): 'LibreChatOpenRouter' {\n return 'LibreChatOpenRouter';\n }\n\n // @ts-expect-error - OpenRouter reasoning extends OpenAI Reasoning with additional\n // effort levels ('xhigh' | 'none' | 'minimal') not in ReasoningEffort.\n // The parent's generic conditional return type cannot be widened in an override.\n override invocationParams(\n options?: this['ParsedCallOptions'],\n extra?: { streaming?: boolean }\n ): OpenRouterInvocationParams {\n type MutableParams = Omit<\n OpenAIClient.Chat.ChatCompletionCreateParams,\n 'messages'\n > & { reasoning_effort?: string; reasoning?: OpenRouterReasoning };\n\n const params = super.invocationParams(options, extra) as MutableParams;\n\n // Remove the OpenAI-native reasoning_effort that the parent sets;\n // OpenRouter uses a `reasoning` object instead\n delete params.reasoning_effort;\n\n // Build the OpenRouter reasoning config\n const reasoning = this.buildOpenRouterReasoning(options);\n if (reasoning != null) {\n params.reasoning = reasoning;\n } else {\n delete params.reasoning;\n }\n\n return params;\n }\n\n private buildOpenRouterReasoning(\n options?: this['ParsedCallOptions']\n ): OpenRouterReasoning | undefined {\n let reasoning: OpenRouterReasoning | undefined;\n\n // 1. Instance-level reasoning config (from constructor)\n if (this.openRouterReasoning != null) {\n reasoning = { ...this.openRouterReasoning };\n }\n\n // 2. LangChain-style reasoning params (from parent's `this.reasoning`)\n const lcReasoning = this.getReasoningParams(options);\n if (lcReasoning?.effort != null) {\n reasoning = {\n ...reasoning,\n effort: lcReasoning.effort as OpenRouterReasoningEffort,\n };\n }\n\n // 3. Call-level reasoning override\n const callReasoning = (options as ChatOpenRouterCallOptions | undefined)\n ?.reasoning;\n if (callReasoning != null) {\n reasoning = { ...reasoning, ...callReasoning };\n }\n\n // 4. Legacy include_reasoning backward compatibility\n if (reasoning == null && this.includeReasoning === true) {\n reasoning = { enabled: true };\n }\n\n return reasoning;\n }\n protected override _convertOpenAIDeltaToBaseMessageChunk(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n delta: Record<string, any>,\n rawResponse: OpenAIClient.ChatCompletionChunk,\n defaultRole?:\n | 'function'\n | 'user'\n | 'system'\n | 'developer'\n | 'assistant'\n | 'tool'\n ):\n | AIMessageChunk\n | HumanMessageChunk\n | SystemMessageChunk\n | FunctionMessageChunk\n | ToolMessageChunk\n | ChatMessageChunk {\n const messageChunk = super._convertOpenAIDeltaToBaseMessageChunk(\n delta,\n rawResponse,\n defaultRole\n );\n if (delta.reasoning != null) {\n messageChunk.additional_kwargs.reasoning = delta.reasoning;\n }\n if (delta.reasoning_details != null) {\n messageChunk.additional_kwargs.reasoning_details =\n delta.reasoning_details;\n }\n return messageChunk;\n }\n\n async *_streamResponseChunks2(\n messages: BaseMessage[],\n options: this['ParsedCallOptions'],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const messagesMapped: OpenAICompletionParam[] =\n _convertMessagesToOpenAIParams(messages, this.model, {\n includeReasoningDetails: true,\n convertReasoningDetailsToContent: true,\n });\n\n const params = {\n ...this.invocationParams(options, {\n streaming: true,\n }),\n messages: messagesMapped,\n stream: true as const,\n };\n let defaultRole: OpenAIRoleEnum | undefined;\n\n const streamIterable = await this.completionWithRetry(params, options);\n let usage: OpenAIClient.Completions.CompletionUsage | undefined;\n\n // Store reasoning_details keyed by unique identifier to prevent incorrect merging\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const reasoningTextByIndex: Map<number, Record<string, any>> = new Map();\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const reasoningEncryptedById: Map<string, Record<string, any>> = new Map();\n\n for await (const data of streamIterable) {\n const choice = data.choices[0] as\n | Partial<OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice>\n | undefined;\n if (data.usage) {\n usage = data.usage;\n }\n if (!choice) {\n continue;\n }\n\n const { delta } = choice;\n if (!delta) {\n continue;\n }\n\n // Accumulate reasoning_details from each delta\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const deltaAny = delta as Record<string, any>;\n // Extract current chunk's reasoning text for streaming (before accumulation)\n let currentChunkReasoningText = '';\n if (\n deltaAny.reasoning_details != null &&\n Array.isArray(deltaAny.reasoning_details)\n ) {\n for (const detail of deltaAny.reasoning_details) {\n // For encrypted reasoning (thought signatures), store by ID - MUST be separate\n if (detail.type === 'reasoning.encrypted' && detail.id) {\n reasoningEncryptedById.set(detail.id, {\n type: detail.type,\n id: detail.id,\n data: detail.data,\n format: detail.format,\n index: detail.index,\n });\n } else if (detail.type === 'reasoning.text') {\n // Extract current chunk's text for streaming\n currentChunkReasoningText += detail.text || '';\n // For text reasoning, accumulate text by index for final message\n const idx = detail.index ?? 0;\n const existing = reasoningTextByIndex.get(idx);\n if (existing) {\n // Only append text, keep other fields from first entry\n existing.text = (existing.text || '') + (detail.text || '');\n } else {\n reasoningTextByIndex.set(idx, {\n type: detail.type,\n text: detail.text || '',\n format: detail.format,\n index: idx,\n });\n }\n }\n }\n }\n\n const chunk = this._convertOpenAIDeltaToBaseMessageChunk(\n delta,\n data,\n defaultRole\n );\n\n // For models that send reasoning_details (Gemini style) instead of reasoning (DeepSeek style),\n // set the current chunk's reasoning text to additional_kwargs.reasoning for streaming\n if (currentChunkReasoningText && !chunk.additional_kwargs.reasoning) {\n chunk.additional_kwargs.reasoning = currentChunkReasoningText;\n }\n\n // IMPORTANT: Only set reasoning_details on the FINAL chunk to prevent\n // LangChain's chunk concatenation from corrupting the array\n // Check if this is the final chunk (has finish_reason)\n if (choice.finish_reason != null) {\n // Build properly structured reasoning_details array\n // Text entries first (but we only need the encrypted ones for thought signatures)\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const finalReasoningDetails: Record<string, any>[] = [\n ...reasoningTextByIndex.values(),\n ...reasoningEncryptedById.values(),\n ];\n\n if (finalReasoningDetails.length > 0) {\n chunk.additional_kwargs.reasoning_details = finalReasoningDetails;\n }\n } else {\n // Clear reasoning_details from intermediate chunks to prevent concatenation issues\n delete chunk.additional_kwargs.reasoning_details;\n }\n\n defaultRole = delta.role ?? defaultRole;\n const newTokenIndices = {\n prompt: options.promptIndex ?? 0,\n completion: choice.index ?? 0,\n };\n if (typeof chunk.content !== 'string') {\n // eslint-disable-next-line no-console\n console.log(\n '[WARNING]: Received non-string content from OpenAI. This is currently not supported.'\n );\n continue;\n }\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const generationInfo: Record<string, any> = { ...newTokenIndices };\n if (choice.finish_reason != null) {\n generationInfo.finish_reason = choice.finish_reason;\n generationInfo.system_fingerprint = data.system_fingerprint;\n generationInfo.model_name = data.model;\n generationInfo.service_tier = data.service_tier;\n }\n if (this.logprobs == true) {\n generationInfo.logprobs = choice.logprobs;\n }\n const generationChunk = new ChatGenerationChunk({\n message: chunk,\n text: chunk.content,\n generationInfo,\n });\n yield generationChunk;\n if (this._lc_stream_delay != null) {\n await new Promise((resolve) =>\n setTimeout(resolve, this._lc_stream_delay)\n );\n }\n await runManager?.handleLLMNewToken(\n generationChunk.text || '',\n newTokenIndices,\n undefined,\n undefined,\n undefined,\n { chunk: generationChunk }\n );\n }\n if (usage) {\n const inputTokenDetails = {\n ...(usage.prompt_tokens_details?.audio_tokens != null && {\n audio: usage.prompt_tokens_details.audio_tokens,\n }),\n ...(usage.prompt_tokens_details?.cached_tokens != null && {\n cache_read: usage.prompt_tokens_details.cached_tokens,\n }),\n };\n const outputTokenDetails = {\n ...(usage.completion_tokens_details?.audio_tokens != null && {\n audio: usage.completion_tokens_details.audio_tokens,\n }),\n ...(usage.completion_tokens_details?.reasoning_tokens != null && {\n reasoning: usage.completion_tokens_details.reasoning_tokens,\n }),\n };\n const generationChunk = new ChatGenerationChunk({\n message: new AIMessageChunkClass({\n content: '',\n response_metadata: {\n usage: { ...usage },\n },\n usage_metadata: {\n input_tokens: usage.prompt_tokens,\n output_tokens: usage.completion_tokens,\n total_tokens: usage.total_tokens,\n ...(Object.keys(inputTokenDetails).length > 0 && {\n input_token_details: inputTokenDetails,\n }),\n ...(Object.keys(outputTokenDetails).length > 0 && {\n output_token_details: outputTokenDetails,\n }),\n },\n }),\n text: '',\n });\n yield generationChunk;\n if (this._lc_stream_delay != null) {\n await new Promise((resolve) =>\n setTimeout(resolve, this._lc_stream_delay)\n );\n }\n }\n if (options.signal?.aborted === true) {\n throw new Error('AbortError');\n }\n }\n}\n"],"names":["AIMessageChunkClass"],"mappings":";;;;;AA6DM,MAAO,cAAe,SAAQ,UAAU,CAAA;AACpC,IAAA,mBAAmB;;AAEnB,IAAA,gBAAgB;AAExB,IAAA,WAAA,CAAY,OAA2C,EAAA;AACrD,QAAA,MAAM,EACJ,iBAAiB,EACjB,SAAS,EAAE,mBAAmB,EAC9B,WAAW,GAAG,EAAE,EAChB,GAAG,MAAM,EACV,GAAG,OAAO;;QAGX,MAAM,EAAE,SAAS,EAAE,WAAW,EAAE,GAAG,eAAe,EAAE,GAAG,WAE5B;AAE3B,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,MAAM;AACT,YAAA,WAAW,EAAE,eAAe;AAC7B,SAAA,CAAC;;QAGF,IAAI,WAAW,IAAI,IAAI,IAAI,mBAAmB,IAAI,IAAI,EAAE;YACtD,IAAI,CAAC,mBAAmB,GAAG;AACzB,gBAAA,GAAG,WAAW;AACd,gBAAA,GAAG,mBAAmB;aACvB;;AAGH,QAAA,IAAI,CAAC,gBAAgB,GAAG,iBAAiB;;AAE3C,IAAA,OAAO,OAAO,GAAA;AACZ,QAAA,OAAO,qBAAqB;;;;;IAMrB,gBAAgB,CACvB,OAAmC,EACnC,KAA+B,EAAA;QAO/B,MAAM,MAAM,GAAG,KAAK,CAAC,gBAAgB,CAAC,OAAO,EAAE,KAAK,CAAkB;;;QAItE,OAAO,MAAM,CAAC,gBAAgB;;QAG9B,MAAM,SAAS,GAAG,IAAI,CAAC,wBAAwB,CAAC,OAAO,CAAC;AACxD,QAAA,IAAI,SAAS,IAAI,IAAI,EAAE;AACrB,YAAA,MAAM,CAAC,SAAS,GAAG,SAAS;;aACvB;YACL,OAAO,MAAM,CAAC,SAAS;;AAGzB,QAAA,OAAO,MAAM;;AAGP,IAAA,wBAAwB,CAC9B,OAAmC,EAAA;AAEnC,QAAA,IAAI,SAA0C;;AAG9C,QAAA,IAAI,IAAI,CAAC,mBAAmB,IAAI,IAAI,EAAE;AACpC,YAAA,SAAS,GAAG,EAAE,GAAG,IAAI,CAAC,mBAAmB,EAAE;;;QAI7C,MAAM,WAAW,GAAG,IAAI,CAAC,kBAAkB,CAAC,OAAO,CAAC;AACpD,QAAA,IAAI,WAAW,EAAE,MAAM,IAAI,IAAI,EAAE;AAC/B,YAAA,SAAS,GAAG;AACV,gBAAA,GAAG,SAAS;gBACZ,MAAM,EAAE,WAAW,CAAC,MAAmC;aACxD;;;QAIH,MAAM,aAAa,GAAI;AACrB,cAAE,SAAS;AACb,QAAA,IAAI,aAAa,IAAI,IAAI,EAAE;YACzB,SAAS,GAAG,EAAE,GAAG,SAAS,EAAE,GAAG,aAAa,EAAE;;;QAIhD,IAAI,SAAS,IAAI,IAAI,IAAI,IAAI,CAAC,gBAAgB,KAAK,IAAI,EAAE;AACvD,YAAA,SAAS,GAAG,EAAE,OAAO,EAAE,IAAI,EAAE;;AAG/B,QAAA,OAAO,SAAS;;IAEC,qCAAqC;;IAEtD,KAA0B,EAC1B,WAA6C,EAC7C,WAMU,EAAA;AAQV,QAAA,MAAM,YAAY,GAAG,KAAK,CAAC,qCAAqC,CAC9D,KAAK,EACL,WAAW,EACX,WAAW,CACZ;AACD,QAAA,IAAI,KAAK,CAAC,SAAS,IAAI,IAAI,EAAE;YAC3B,YAAY,CAAC,iBAAiB,CAAC,SAAS,GAAG,KAAK,CAAC,SAAS;;AAE5D,QAAA,IAAI,KAAK,CAAC,iBAAiB,IAAI,IAAI,EAAE;YACnC,YAAY,CAAC,iBAAiB,CAAC,iBAAiB;gBAC9C,KAAK,CAAC,iBAAiB;;AAE3B,QAAA,OAAO,YAAY;;IAGrB,OAAO,sBAAsB,CAC3B,QAAuB,EACvB,OAAkC,EAClC,UAAqC,EAAA;QAErC,MAAM,cAAc,GAClB,8BAA8B,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE;AACnD,YAAA,uBAAuB,EAAE,IAAI;AAC7B,YAAA,gCAAgC,EAAE,IAAI;AACvC,SAAA,CAAC;AAEJ,QAAA,MAAM,MAAM,GAAG;AACb,YAAA,GAAG,IAAI,CAAC,gBAAgB,CAAC,OAAO,EAAE;AAChC,gBAAA,SAAS,EAAE,IAAI;aAChB,CAAC;AACF,YAAA,QAAQ,EAAE,cAAc;AACxB,YAAA,MAAM,EAAE,IAAa;SACtB;AACD,QAAA,IAAI,WAAuC;QAE3C,MAAM,cAAc,GAAG,MAAM,IAAI,CAAC,mBAAmB,CAAC,MAAM,EAAE,OAAO,CAAC;AACtE,QAAA,IAAI,KAA2D;;;AAI/D,QAAA,MAAM,oBAAoB,GAAqC,IAAI,GAAG,EAAE;;AAExE,QAAA,MAAM,sBAAsB,GAAqC,IAAI,GAAG,EAAE;AAE1E,QAAA,WAAW,MAAM,IAAI,IAAI,cAAc,EAAE;YACvC,MAAM,MAAM,GAAG,IAAI,CAAC,OAAO,CAAC,CAAC,CAEhB;AACb,YAAA,IAAI,IAAI,CAAC,KAAK,EAAE;AACd,gBAAA,KAAK,GAAG,IAAI,CAAC,KAAK;;YAEpB,IAAI,CAAC,MAAM,EAAE;gBACX;;AAGF,YAAA,MAAM,EAAE,KAAK,EAAE,GAAG,MAAM;YACxB,IAAI,CAAC,KAAK,EAAE;gBACV;;;;YAKF,MAAM,QAAQ,GAAG,KAA4B;;YAE7C,IAAI,yBAAyB,GAAG,EAAE;AAClC,YAAA,IACE,QAAQ,CAAC,iBAAiB,IAAI,IAAI;gBAClC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAAC,EACzC;AACA,gBAAA,KAAK,MAAM,MAAM,IAAI,QAAQ,CAAC,iBAAiB,EAAE;;oBAE/C,IAAI,MAAM,CAAC,IAAI,KAAK,qBAAqB,IAAI,MAAM,CAAC,EAAE,EAAE;AACtD,wBAAA,sBAAsB,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,EAAE;4BACpC,IAAI,EAAE,MAAM,CAAC,IAAI;4BACjB,EAAE,EAAE,MAAM,CAAC,EAAE;4BACb,IAAI,EAAE,MAAM,CAAC,IAAI;4BACjB,MAAM,EAAE,MAAM,CAAC,MAAM;4BACrB,KAAK,EAAE,MAAM,CAAC,KAAK;AACpB,yBAAA,CAAC;;AACG,yBAAA,IAAI,MAAM,CAAC,IAAI,KAAK,gBAAgB,EAAE;;AAE3C,wBAAA,yBAAyB,IAAI,MAAM,CAAC,IAAI,IAAI,EAAE;;AAE9C,wBAAA,MAAM,GAAG,GAAG,MAAM,CAAC,KAAK,IAAI,CAAC;wBAC7B,MAAM,QAAQ,GAAG,oBAAoB,CAAC,GAAG,CAAC,GAAG,CAAC;wBAC9C,IAAI,QAAQ,EAAE;;AAEZ,4BAAA,QAAQ,CAAC,IAAI,GAAG,CAAC,QAAQ,CAAC,IAAI,IAAI,EAAE,KAAK,MAAM,CAAC,IAAI,IAAI,EAAE,CAAC;;6BACtD;AACL,4BAAA,oBAAoB,CAAC,GAAG,CAAC,GAAG,EAAE;gCAC5B,IAAI,EAAE,MAAM,CAAC,IAAI;AACjB,gCAAA,IAAI,EAAE,MAAM,CAAC,IAAI,IAAI,EAAE;gCACvB,MAAM,EAAE,MAAM,CAAC,MAAM;AACrB,gCAAA,KAAK,EAAE,GAAG;AACX,6BAAA,CAAC;;;;;AAMV,YAAA,MAAM,KAAK,GAAG,IAAI,CAAC,qCAAqC,CACtD,KAAK,EACL,IAAI,EACJ,WAAW,CACZ;;;YAID,IAAI,yBAAyB,IAAI,CAAC,KAAK,CAAC,iBAAiB,CAAC,SAAS,EAAE;AACnE,gBAAA,KAAK,CAAC,iBAAiB,CAAC,SAAS,GAAG,yBAAyB;;;;;AAM/D,YAAA,IAAI,MAAM,CAAC,aAAa,IAAI,IAAI,EAAE;;;;AAIhC,gBAAA,MAAM,qBAAqB,GAA0B;oBACnD,GAAG,oBAAoB,CAAC,MAAM,EAAE;oBAChC,GAAG,sBAAsB,CAAC,MAAM,EAAE;iBACnC;AAED,gBAAA,IAAI,qBAAqB,CAAC,MAAM,GAAG,CAAC,EAAE;AACpC,oBAAA,KAAK,CAAC,iBAAiB,CAAC,iBAAiB,GAAG,qBAAqB;;;iBAE9D;;AAEL,gBAAA,OAAO,KAAK,CAAC,iBAAiB,CAAC,iBAAiB;;AAGlD,YAAA,WAAW,GAAG,KAAK,CAAC,IAAI,IAAI,WAAW;AACvC,YAAA,MAAM,eAAe,GAAG;AACtB,gBAAA,MAAM,EAAE,OAAO,CAAC,WAAW,IAAI,CAAC;AAChC,gBAAA,UAAU,EAAE,MAAM,CAAC,KAAK,IAAI,CAAC;aAC9B;AACD,YAAA,IAAI,OAAO,KAAK,CAAC,OAAO,KAAK,QAAQ,EAAE;;AAErC,gBAAA,OAAO,CAAC,GAAG,CACT,sFAAsF,CACvF;gBACD;;;AAGF,YAAA,MAAM,cAAc,GAAwB,EAAE,GAAG,eAAe,EAAE;AAClE,YAAA,IAAI,MAAM,CAAC,aAAa,IAAI,IAAI,EAAE;AAChC,gBAAA,cAAc,CAAC,aAAa,GAAG,MAAM,CAAC,aAAa;AACnD,gBAAA,cAAc,CAAC,kBAAkB,GAAG,IAAI,CAAC,kBAAkB;AAC3D,gBAAA,cAAc,CAAC,UAAU,GAAG,IAAI,CAAC,KAAK;AACtC,gBAAA,cAAc,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY;;AAEjD,YAAA,IAAI,IAAI,CAAC,QAAQ,IAAI,IAAI,EAAE;AACzB,gBAAA,cAAc,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ;;AAE3C,YAAA,MAAM,eAAe,GAAG,IAAI,mBAAmB,CAAC;AAC9C,gBAAA,OAAO,EAAE,KAAK;gBACd,IAAI,EAAE,KAAK,CAAC,OAAO;gBACnB,cAAc;AACf,aAAA,CAAC;AACF,YAAA,MAAM,eAAe;AACrB,YAAA,IAAI,IAAI,CAAC,gBAAgB,IAAI,IAAI,EAAE;AACjC,gBAAA,MAAM,IAAI,OAAO,CAAC,CAAC,OAAO,KACxB,UAAU,CAAC,OAAO,EAAE,IAAI,CAAC,gBAAgB,CAAC,CAC3C;;YAEH,MAAM,UAAU,EAAE,iBAAiB,CACjC,eAAe,CAAC,IAAI,IAAI,EAAE,EAC1B,eAAe,EACf,SAAS,EACT,SAAS,EACT,SAAS,EACT,EAAE,KAAK,EAAE,eAAe,EAAE,CAC3B;;QAEH,IAAI,KAAK,EAAE;AACT,YAAA,MAAM,iBAAiB,GAAG;gBACxB,IAAI,KAAK,CAAC,qBAAqB,EAAE,YAAY,IAAI,IAAI,IAAI;AACvD,oBAAA,KAAK,EAAE,KAAK,CAAC,qBAAqB,CAAC,YAAY;iBAChD,CAAC;gBACF,IAAI,KAAK,CAAC,qBAAqB,EAAE,aAAa,IAAI,IAAI,IAAI;AACxD,oBAAA,UAAU,EAAE,KAAK,CAAC,qBAAqB,CAAC,aAAa;iBACtD,CAAC;aACH;AACD,YAAA,MAAM,kBAAkB,GAAG;gBACzB,IAAI,KAAK,CAAC,yBAAyB,EAAE,YAAY,IAAI,IAAI,IAAI;AAC3D,oBAAA,KAAK,EAAE,KAAK,CAAC,yBAAyB,CAAC,YAAY;iBACpD,CAAC;gBACF,IAAI,KAAK,CAAC,yBAAyB,EAAE,gBAAgB,IAAI,IAAI,IAAI;AAC/D,oBAAA,SAAS,EAAE,KAAK,CAAC,yBAAyB,CAAC,gBAAgB;iBAC5D,CAAC;aACH;AACD,YAAA,MAAM,eAAe,GAAG,IAAI,mBAAmB,CAAC;gBAC9C,OAAO,EAAE,IAAIA,cAAmB,CAAC;AAC/B,oBAAA,OAAO,EAAE,EAAE;AACX,oBAAA,iBAAiB,EAAE;AACjB,wBAAA,KAAK,EAAE,EAAE,GAAG,KAAK,EAAE;AACpB,qBAAA;AACD,oBAAA,cAAc,EAAE;wBACd,YAAY,EAAE,KAAK,CAAC,aAAa;wBACjC,aAAa,EAAE,KAAK,CAAC,iBAAiB;wBACtC,YAAY,EAAE,KAAK,CAAC,YAAY;wBAChC,IAAI,MAAM,CAAC,IAAI,CAAC,iBAAiB,CAAC,CAAC,MAAM,GAAG,CAAC,IAAI;AAC/C,4BAAA,mBAAmB,EAAE,iBAAiB;yBACvC,CAAC;wBACF,IAAI,MAAM,CAAC,IAAI,CAAC,kBAAkB,CAAC,CAAC,MAAM,GAAG,CAAC,IAAI;AAChD,4BAAA,oBAAoB,EAAE,kBAAkB;yBACzC,CAAC;AACH,qBAAA;iBACF,CAAC;AACF,gBAAA,IAAI,EAAE,EAAE;AACT,aAAA,CAAC;AACF,YAAA,MAAM,eAAe;AACrB,YAAA,IAAI,IAAI,CAAC,gBAAgB,IAAI,IAAI,EAAE;AACjC,gBAAA,MAAM,IAAI,OAAO,CAAC,CAAC,OAAO,KACxB,UAAU,CAAC,OAAO,EAAE,IAAI,CAAC,gBAAgB,CAAC,CAC3C;;;QAGL,IAAI,OAAO,CAAC,MAAM,EAAE,OAAO,KAAK,IAAI,EAAE;AACpC,YAAA,MAAM,IAAI,KAAK,CAAC,YAAY,CAAC;;;AAGlC;;;;"}
@@ -2,6 +2,7 @@ import { ChatGoogle } from '@langchain/google-gauth';
2
2
  import { ChatConnection } from '@langchain/google-common';
3
3
 
4
4
  class CustomChatConnection extends ChatConnection {
5
+ thinkingConfig;
5
6
  async formatData(input, parameters) {
6
7
  const formattedData = (await super.formatData(input, parameters));
7
8
  if (formattedData.generationConfig?.thinkingConfig?.thinkingBudget === -1) {
@@ -11,6 +12,13 @@ class CustomChatConnection extends ChatConnection {
11
12
  }
12
13
  delete formattedData.generationConfig.thinkingConfig.thinkingBudget;
13
14
  }
15
+ if (this.thinkingConfig?.thinkingLevel) {
16
+ formattedData.generationConfig ??= {};
17
+ formattedData.generationConfig.thinkingConfig = {
18
+ ...formattedData.generationConfig.thinkingConfig,
19
+ thinkingLevel: this.thinkingConfig.thinkingLevel,
20
+ };
21
+ }
14
22
  return formattedData;
15
23
  }
16
24
  }
@@ -299,6 +307,7 @@ class CustomChatConnection extends ChatConnection {
299
307
  class ChatVertexAI extends ChatGoogle {
300
308
  lc_namespace = ['langchain', 'chat_models', 'vertexai'];
301
309
  dynamicThinkingBudget = false;
310
+ thinkingConfig;
302
311
  static lc_name() {
303
312
  return 'LibreChatVertexAI';
304
313
  }
@@ -309,6 +318,7 @@ class ChatVertexAI extends ChatGoogle {
309
318
  platformType: 'gcp',
310
319
  });
311
320
  this.dynamicThinkingBudget = dynamicThinkingBudget;
321
+ this.thinkingConfig = fields?.thinkingConfig;
312
322
  }
313
323
  invocationParams(options) {
314
324
  const params = super.invocationParams(options);
@@ -318,8 +328,12 @@ class ChatVertexAI extends ChatGoogle {
318
328
  return params;
319
329
  }
320
330
  buildConnection(fields, client) {
321
- this.connection = new CustomChatConnection({ ...fields, ...this }, this.caller, client, false);
322
- this.streamedConnection = new CustomChatConnection({ ...fields, ...this }, this.caller, client, true);
331
+ const connection = new CustomChatConnection({ ...fields, ...this }, this.caller, client, false);
332
+ connection.thinkingConfig = this.thinkingConfig;
333
+ this.connection = connection;
334
+ const streamedConnection = new CustomChatConnection({ ...fields, ...this }, this.caller, client, true);
335
+ streamedConnection.thinkingConfig = this.thinkingConfig;
336
+ this.streamedConnection = streamedConnection;
323
337
  }
324
338
  }
325
339
 
@@ -1 +1 @@
1
- {"version":3,"file":"index.mjs","sources":["../../../../src/llm/vertexai/index.ts"],"sourcesContent":["import { ChatGoogle } from '@langchain/google-gauth';\nimport { ChatConnection } from '@langchain/google-common';\nimport type {\n GeminiRequest,\n GoogleAIModelRequestParams,\n GoogleAbstractedClient,\n} from '@langchain/google-common';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { VertexAIClientOptions } from '@/types';\n\nclass CustomChatConnection extends ChatConnection<VertexAIClientOptions> {\n async formatData(\n input: BaseMessage[],\n parameters: GoogleAIModelRequestParams\n ): Promise<unknown> {\n const formattedData = (await super.formatData(\n input,\n parameters\n )) as GeminiRequest;\n if (formattedData.generationConfig?.thinkingConfig?.thinkingBudget === -1) {\n // -1 means \"let the model decide\" - delete the property so the API doesn't receive an invalid value\n if (\n formattedData.generationConfig.thinkingConfig.includeThoughts === false\n ) {\n formattedData.generationConfig.thinkingConfig.includeThoughts = true;\n }\n delete formattedData.generationConfig.thinkingConfig.thinkingBudget;\n }\n return formattedData;\n }\n}\n\n/**\n * Integration with Google Vertex AI chat models.\n *\n * Setup:\n * Install `@langchain/google-vertexai` and set your stringified\n * Vertex AI credentials as an environment variable named `GOOGLE_APPLICATION_CREDENTIALS`.\n *\n * ```bash\n * npm install @langchain/google-vertexai\n * export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/credentials\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai.index.ChatVertexAI.html#constructor.new_ChatVertexAI)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatVertexAI } from '@langchain/google-vertexai';\n *\n * const llm = new ChatVertexAI({\n * model: \"gemini-1.5-pro\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"J'adore programmer\\\" \\n\\nHere's why this is the best translation:\\n\\n* **J'adore** means \\\"I love\\\" and conveys a strong passion.\\n* **Programmer** is the French verb for \\\"to program.\\\"\\n\\nThis translation is natural and idiomatic in French. \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 63,\n * \"total_tokens\": 72\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"J'adore programmer\\\" \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 8,\n * \"total_tokens\": 17\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"J'adore programmer\\\" \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 8,\n * \"total_tokens\": 17\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York City, NY' },\n * id: '33c1c1f47e2f492799c77d2800a43912',\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: 'What do you call a cat that loves to bowl?',\n * punchline: 'An alley cat!'\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Stream Usage Metadata</strong></summary>\n *\n * ```typescript\n * const streamForMetadata = await llm.stream(\n * input,\n * {\n * streamUsage: true\n * }\n * );\n * let fullForMetadata: AIMessageChunk | undefined;\n * for await (const chunk of streamForMetadata) {\n * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);\n * }\n * console.log(fullForMetadata?.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatVertexAI extends ChatGoogle {\n lc_namespace = ['langchain', 'chat_models', 'vertexai'];\n dynamicThinkingBudget = false;\n\n static lc_name(): 'LibreChatVertexAI' {\n return 'LibreChatVertexAI';\n }\n\n constructor(fields?: VertexAIClientOptions) {\n const dynamicThinkingBudget = fields?.thinkingBudget === -1;\n super({\n ...fields,\n platformType: 'gcp',\n });\n this.dynamicThinkingBudget = dynamicThinkingBudget;\n }\n invocationParams(\n options?: this['ParsedCallOptions'] | undefined\n ): GoogleAIModelRequestParams {\n const params = super.invocationParams(options);\n if (this.dynamicThinkingBudget) {\n params.maxReasoningTokens = -1;\n }\n return params;\n }\n\n buildConnection(\n fields: VertexAIClientOptions,\n client: GoogleAbstractedClient\n ): void {\n this.connection = new CustomChatConnection(\n { ...fields, ...this },\n this.caller,\n client,\n false\n );\n\n this.streamedConnection = new CustomChatConnection(\n { ...fields, ...this },\n this.caller,\n client,\n true\n );\n }\n}\n"],"names":[],"mappings":";;;AAUA,MAAM,oBAAqB,SAAQ,cAAqC,CAAA;AACtE,IAAA,MAAM,UAAU,CACd,KAAoB,EACpB,UAAsC,EAAA;AAEtC,QAAA,MAAM,aAAa,IAAI,MAAM,KAAK,CAAC,UAAU,CAC3C,KAAK,EACL,UAAU,CACX,CAAkB;QACnB,IAAI,aAAa,CAAC,gBAAgB,EAAE,cAAc,EAAE,cAAc,KAAK,EAAE,EAAE;;YAEzE,IACE,aAAa,CAAC,gBAAgB,CAAC,cAAc,CAAC,eAAe,KAAK,KAAK,EACvE;gBACA,aAAa,CAAC,gBAAgB,CAAC,cAAc,CAAC,eAAe,GAAG,IAAI;;AAEtE,YAAA,OAAO,aAAa,CAAC,gBAAgB,CAAC,cAAc,CAAC,cAAc;;AAErE,QAAA,OAAO,aAAa;;AAEvB;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyRG;AACG,MAAO,YAAa,SAAQ,UAAU,CAAA;IAC1C,YAAY,GAAG,CAAC,WAAW,EAAE,aAAa,EAAE,UAAU,CAAC;IACvD,qBAAqB,GAAG,KAAK;AAE7B,IAAA,OAAO,OAAO,GAAA;AACZ,QAAA,OAAO,mBAAmB;;AAG5B,IAAA,WAAA,CAAY,MAA8B,EAAA;QACxC,MAAM,qBAAqB,GAAG,MAAM,EAAE,cAAc,KAAK,EAAE;AAC3D,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,MAAM;AACT,YAAA,YAAY,EAAE,KAAK;AACpB,SAAA,CAAC;AACF,QAAA,IAAI,CAAC,qBAAqB,GAAG,qBAAqB;;AAEpD,IAAA,gBAAgB,CACd,OAA+C,EAAA;QAE/C,MAAM,MAAM,GAAG,KAAK,CAAC,gBAAgB,CAAC,OAAO,CAAC;AAC9C,QAAA,IAAI,IAAI,CAAC,qBAAqB,EAAE;AAC9B,YAAA,MAAM,CAAC,kBAAkB,GAAG,EAAE;;AAEhC,QAAA,OAAO,MAAM;;IAGf,eAAe,CACb,MAA6B,EAC7B,MAA8B,EAAA;QAE9B,IAAI,CAAC,UAAU,GAAG,IAAI,oBAAoB,CACxC,EAAE,GAAG,MAAM,EAAE,GAAG,IAAI,EAAE,EACtB,IAAI,CAAC,MAAM,EACX,MAAM,EACN,KAAK,CACN;QAED,IAAI,CAAC,kBAAkB,GAAG,IAAI,oBAAoB,CAChD,EAAE,GAAG,MAAM,EAAE,GAAG,IAAI,EAAE,EACtB,IAAI,CAAC,MAAM,EACX,MAAM,EACN,IAAI,CACL;;AAEJ;;;;"}
1
+ {"version":3,"file":"index.mjs","sources":["../../../../src/llm/vertexai/index.ts"],"sourcesContent":["import { ChatGoogle } from '@langchain/google-gauth';\nimport { ChatConnection } from '@langchain/google-common';\nimport type {\n GeminiRequest,\n GoogleAIModelRequestParams,\n GoogleAbstractedClient,\n} from '@langchain/google-common';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { GoogleThinkingConfig, VertexAIClientOptions } from '@/types';\n\nclass CustomChatConnection extends ChatConnection<VertexAIClientOptions> {\n thinkingConfig?: GoogleThinkingConfig;\n\n async formatData(\n input: BaseMessage[],\n parameters: GoogleAIModelRequestParams\n ): Promise<unknown> {\n const formattedData = (await super.formatData(\n input,\n parameters\n )) as GeminiRequest;\n if (formattedData.generationConfig?.thinkingConfig?.thinkingBudget === -1) {\n // -1 means \"let the model decide\" - delete the property so the API doesn't receive an invalid value\n if (\n formattedData.generationConfig.thinkingConfig.includeThoughts === false\n ) {\n formattedData.generationConfig.thinkingConfig.includeThoughts = true;\n }\n delete formattedData.generationConfig.thinkingConfig.thinkingBudget;\n }\n if (this.thinkingConfig?.thinkingLevel) {\n formattedData.generationConfig ??= {};\n (\n formattedData.generationConfig as Record<string, unknown>\n ).thinkingConfig = {\n ...formattedData.generationConfig.thinkingConfig,\n thinkingLevel: this.thinkingConfig.thinkingLevel,\n };\n }\n return formattedData;\n }\n}\n\n/**\n * Integration with Google Vertex AI chat models.\n *\n * Setup:\n * Install `@langchain/google-vertexai` and set your stringified\n * Vertex AI credentials as an environment variable named `GOOGLE_APPLICATION_CREDENTIALS`.\n *\n * ```bash\n * npm install @langchain/google-vertexai\n * export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/credentials\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai.index.ChatVertexAI.html#constructor.new_ChatVertexAI)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * tools: [...],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatVertexAI } from '@langchain/google-vertexai';\n *\n * const llm = new ChatVertexAI({\n * model: \"gemini-1.5-pro\",\n * temperature: 0,\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"J'adore programmer\\\" \\n\\nHere's why this is the best translation:\\n\\n* **J'adore** means \\\"I love\\\" and conveys a strong passion.\\n* **Programmer** is the French verb for \\\"to program.\\\"\\n\\nThis translation is natural and idiomatic in French. \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 63,\n * \"total_tokens\": 72\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"J'adore programmer\\\" \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 8,\n * \"total_tokens\": 17\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"J'adore programmer\\\" \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 8,\n * \"total_tokens\": 17\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York City, NY' },\n * id: '33c1c1f47e2f492799c77d2800a43912',\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: 'What do you call a cat that loves to bowl?',\n * punchline: 'An alley cat!'\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Stream Usage Metadata</strong></summary>\n *\n * ```typescript\n * const streamForMetadata = await llm.stream(\n * input,\n * {\n * streamUsage: true\n * }\n * );\n * let fullForMetadata: AIMessageChunk | undefined;\n * for await (const chunk of streamForMetadata) {\n * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);\n * }\n * console.log(fullForMetadata?.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatVertexAI extends ChatGoogle {\n lc_namespace = ['langchain', 'chat_models', 'vertexai'];\n dynamicThinkingBudget = false;\n thinkingConfig?: GoogleThinkingConfig;\n\n static lc_name(): 'LibreChatVertexAI' {\n return 'LibreChatVertexAI';\n }\n\n constructor(fields?: VertexAIClientOptions) {\n const dynamicThinkingBudget = fields?.thinkingBudget === -1;\n super({\n ...fields,\n platformType: 'gcp',\n });\n this.dynamicThinkingBudget = dynamicThinkingBudget;\n this.thinkingConfig = fields?.thinkingConfig;\n }\n invocationParams(\n options?: this['ParsedCallOptions'] | undefined\n ): GoogleAIModelRequestParams {\n const params = super.invocationParams(options);\n if (this.dynamicThinkingBudget) {\n params.maxReasoningTokens = -1;\n }\n return params;\n }\n\n buildConnection(\n fields: VertexAIClientOptions,\n client: GoogleAbstractedClient\n ): void {\n const connection = new CustomChatConnection(\n { ...fields, ...this },\n this.caller,\n client,\n false\n );\n connection.thinkingConfig = this.thinkingConfig;\n this.connection = connection;\n\n const streamedConnection = new CustomChatConnection(\n { ...fields, ...this },\n this.caller,\n client,\n true\n );\n streamedConnection.thinkingConfig = this.thinkingConfig;\n this.streamedConnection = streamedConnection;\n }\n}\n"],"names":[],"mappings":";;;AAUA,MAAM,oBAAqB,SAAQ,cAAqC,CAAA;AACtE,IAAA,cAAc;AAEd,IAAA,MAAM,UAAU,CACd,KAAoB,EACpB,UAAsC,EAAA;AAEtC,QAAA,MAAM,aAAa,IAAI,MAAM,KAAK,CAAC,UAAU,CAC3C,KAAK,EACL,UAAU,CACX,CAAkB;QACnB,IAAI,aAAa,CAAC,gBAAgB,EAAE,cAAc,EAAE,cAAc,KAAK,EAAE,EAAE;;YAEzE,IACE,aAAa,CAAC,gBAAgB,CAAC,cAAc,CAAC,eAAe,KAAK,KAAK,EACvE;gBACA,aAAa,CAAC,gBAAgB,CAAC,cAAc,CAAC,eAAe,GAAG,IAAI;;AAEtE,YAAA,OAAO,aAAa,CAAC,gBAAgB,CAAC,cAAc,CAAC,cAAc;;AAErE,QAAA,IAAI,IAAI,CAAC,cAAc,EAAE,aAAa,EAAE;AACtC,YAAA,aAAa,CAAC,gBAAgB,KAAK,EAAE;AAEnC,YAAA,aAAa,CAAC,gBACf,CAAC,cAAc,GAAG;AACjB,gBAAA,GAAG,aAAa,CAAC,gBAAgB,CAAC,cAAc;AAChD,gBAAA,aAAa,EAAE,IAAI,CAAC,cAAc,CAAC,aAAa;aACjD;;AAEH,QAAA,OAAO,aAAa;;AAEvB;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyRG;AACG,MAAO,YAAa,SAAQ,UAAU,CAAA;IAC1C,YAAY,GAAG,CAAC,WAAW,EAAE,aAAa,EAAE,UAAU,CAAC;IACvD,qBAAqB,GAAG,KAAK;AAC7B,IAAA,cAAc;AAEd,IAAA,OAAO,OAAO,GAAA;AACZ,QAAA,OAAO,mBAAmB;;AAG5B,IAAA,WAAA,CAAY,MAA8B,EAAA;QACxC,MAAM,qBAAqB,GAAG,MAAM,EAAE,cAAc,KAAK,EAAE;AAC3D,QAAA,KAAK,CAAC;AACJ,YAAA,GAAG,MAAM;AACT,YAAA,YAAY,EAAE,KAAK;AACpB,SAAA,CAAC;AACF,QAAA,IAAI,CAAC,qBAAqB,GAAG,qBAAqB;AAClD,QAAA,IAAI,CAAC,cAAc,GAAG,MAAM,EAAE,cAAc;;AAE9C,IAAA,gBAAgB,CACd,OAA+C,EAAA;QAE/C,MAAM,MAAM,GAAG,KAAK,CAAC,gBAAgB,CAAC,OAAO,CAAC;AAC9C,QAAA,IAAI,IAAI,CAAC,qBAAqB,EAAE;AAC9B,YAAA,MAAM,CAAC,kBAAkB,GAAG,EAAE;;AAEhC,QAAA,OAAO,MAAM;;IAGf,eAAe,CACb,MAA6B,EAC7B,MAA8B,EAAA;QAE9B,MAAM,UAAU,GAAG,IAAI,oBAAoB,CACzC,EAAE,GAAG,MAAM,EAAE,GAAG,IAAI,EAAE,EACtB,IAAI,CAAC,MAAM,EACX,MAAM,EACN,KAAK,CACN;AACD,QAAA,UAAU,CAAC,cAAc,GAAG,IAAI,CAAC,cAAc;AAC/C,QAAA,IAAI,CAAC,UAAU,GAAG,UAAU;QAE5B,MAAM,kBAAkB,GAAG,IAAI,oBAAoB,CACjD,EAAE,GAAG,MAAM,EAAE,GAAG,IAAI,EAAE,EACtB,IAAI,CAAC,MAAM,EACX,MAAM,EACN,IAAI,CACL;AACD,QAAA,kBAAkB,CAAC,cAAc,GAAG,IAAI,CAAC,cAAc;AACvD,QAAA,IAAI,CAAC,kBAAkB,GAAG,kBAAkB;;AAE/C;;;;"}
package/dist/esm/main.mjs CHANGED
@@ -29,4 +29,5 @@ export { RunnableCallable, sleep } from './utils/run.mjs';
29
29
  export { TokenEncoderManager, createTokenCounter, getTokenCountForMessage } from './utils/tokens.mjs';
30
30
  export { isZodSchema, toJsonSchema } from './utils/schema.mjs';
31
31
  export { CustomOpenAIClient } from './llm/openai/index.mjs';
32
+ export { ChatOpenRouter } from './llm/openrouter/index.mjs';
32
33
  //# sourceMappingURL=main.mjs.map
@@ -1 +1 @@
1
- {"version":3,"file":"main.mjs","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}
1
+ {"version":3,"file":"main.mjs","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}
@@ -16,3 +16,5 @@ export * from './common';
16
16
  export * from './utils';
17
17
  export type * from './types';
18
18
  export { CustomOpenAIClient } from './llm/openai';
19
+ export { ChatOpenRouter } from './llm/openrouter';
20
+ export type { OpenRouterReasoning, OpenRouterReasoningEffort, ChatOpenRouterCallOptions, } from './llm/openrouter';
@@ -2,8 +2,8 @@
2
2
  * Utility functions for converting Bedrock Converse responses to LangChain messages.
3
3
  * Ported from @langchain/aws common.js
4
4
  */
5
- import { AIMessage } from '@langchain/core/messages';
6
5
  import { ChatGenerationChunk } from '@langchain/core/outputs';
6
+ import { AIMessage } from '@langchain/core/messages';
7
7
  import type { BedrockMessage, ConverseResponse, ContentBlockDeltaEvent, ConverseStreamMetadataEvent, ContentBlockStartEvent, ReasoningContentBlock, ReasoningContentBlockDelta, MessageContentReasoningBlock, MessageContentReasoningBlockReasoningTextPartial, MessageContentReasoningBlockRedacted } from '../types';
8
8
  /**
9
9
  * Convert a Bedrock reasoning block delta to a LangChain partial reasoning block.
@@ -3,10 +3,9 @@ import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
3
3
  import type { GenerateContentRequest } from '@google/generative-ai';
4
4
  import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
5
5
  import type { BaseMessage } from '@langchain/core/messages';
6
- import type { GeminiGenerationConfig } from '@langchain/google-common';
7
- import type { GoogleClientOptions } from '@/types';
6
+ import type { GoogleClientOptions, GoogleThinkingConfig } from '@/types';
8
7
  export declare class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
9
- thinkingConfig?: GeminiGenerationConfig['thinkingConfig'];
8
+ thinkingConfig?: GoogleThinkingConfig;
10
9
  /**
11
10
  * Override to add gemini-3 model support for multimodal and function calling thought signatures
12
11
  */
@@ -3,13 +3,33 @@ import { ChatGenerationChunk } from '@langchain/core/outputs';
3
3
  import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
4
4
  import type { FunctionMessageChunk, SystemMessageChunk, HumanMessageChunk, ToolMessageChunk, ChatMessageChunk, AIMessageChunk, BaseMessage } from '@langchain/core/messages';
5
5
  import type { ChatOpenAICallOptions, OpenAIChatInput, OpenAIClient } from '@langchain/openai';
6
- export interface ChatOpenRouterCallOptions extends ChatOpenAICallOptions {
6
+ export type OpenRouterReasoningEffort = 'xhigh' | 'high' | 'medium' | 'low' | 'minimal' | 'none';
7
+ export interface OpenRouterReasoning {
8
+ effort?: OpenRouterReasoningEffort;
9
+ max_tokens?: number;
10
+ exclude?: boolean;
11
+ enabled?: boolean;
12
+ }
13
+ export interface ChatOpenRouterCallOptions extends Omit<ChatOpenAICallOptions, 'reasoning'> {
14
+ /** @deprecated Use `reasoning` object instead */
7
15
  include_reasoning?: boolean;
16
+ reasoning?: OpenRouterReasoning;
8
17
  modelKwargs?: OpenAIChatInput['modelKwargs'];
9
18
  }
19
+ /** invocationParams return type extended with OpenRouter reasoning */
20
+ export type OpenRouterInvocationParams = Omit<OpenAIClient.Chat.ChatCompletionCreateParams, 'messages'> & {
21
+ reasoning?: OpenRouterReasoning;
22
+ };
10
23
  export declare class ChatOpenRouter extends ChatOpenAI {
24
+ private openRouterReasoning?;
25
+ /** @deprecated Use `reasoning` object instead */
26
+ private includeReasoning?;
11
27
  constructor(_fields: Partial<ChatOpenRouterCallOptions>);
12
28
  static lc_name(): 'LibreChatOpenRouter';
29
+ invocationParams(options?: this['ParsedCallOptions'], extra?: {
30
+ streaming?: boolean;
31
+ }): OpenRouterInvocationParams;
32
+ private buildOpenRouterReasoning;
13
33
  protected _convertOpenAIDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: 'function' | 'user' | 'system' | 'developer' | 'assistant' | 'tool'): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
14
34
  _streamResponseChunks2(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
15
35
  }
@@ -1,6 +1,6 @@
1
1
  import { ChatGoogle } from '@langchain/google-gauth';
2
2
  import type { GoogleAIModelRequestParams, GoogleAbstractedClient } from '@langchain/google-common';
3
- import type { VertexAIClientOptions } from '@/types';
3
+ import type { GoogleThinkingConfig, VertexAIClientOptions } from '@/types';
4
4
  /**
5
5
  * Integration with Google Vertex AI chat models.
6
6
  *
@@ -286,6 +286,7 @@ import type { VertexAIClientOptions } from '@/types';
286
286
  export declare class ChatVertexAI extends ChatGoogle {
287
287
  lc_namespace: string[];
288
288
  dynamicThinkingBudget: boolean;
289
+ thinkingConfig?: GoogleThinkingConfig;
289
290
  static lc_name(): 'LibreChatVertexAI';
290
291
  constructor(fields?: VertexAIClientOptions);
291
292
  invocationParams(options?: this['ParsedCallOptions'] | undefined): GoogleAIModelRequestParams;
@@ -2,7 +2,6 @@ import { ChatMistralAI } from '@langchain/mistralai';
2
2
  import type { BindToolsInput, BaseChatModelParams } from '@langchain/core/language_models/chat_models';
3
3
  import type { OpenAIChatInput, ChatOpenAIFields, AzureOpenAIInput, ClientOptions as OAIClientOptions } from '@langchain/openai';
4
4
  import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
5
- import type { GeminiGenerationConfig } from '@langchain/google-common';
6
5
  import type { ChatVertexAIInput } from '@langchain/google-vertexai';
7
6
  import type { ChatDeepSeekCallOptions } from '@langchain/deepseek';
8
7
  import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
@@ -36,6 +35,11 @@ export type AnthropicReasoning = {
36
35
  thinking?: ThinkingConfig | boolean;
37
36
  thinkingBudget?: number;
38
37
  };
38
+ export type GoogleThinkingConfig = {
39
+ thinkingBudget?: number;
40
+ includeThoughts?: boolean;
41
+ thinkingLevel?: string;
42
+ };
39
43
  export type OpenAIClientOptions = ChatOpenAIFields;
40
44
  export type AnthropicClientOptions = AnthropicInput & {
41
45
  promptCache?: boolean;
@@ -43,6 +47,7 @@ export type AnthropicClientOptions = AnthropicInput & {
43
47
  export type MistralAIClientOptions = ChatMistralAIInput;
44
48
  export type VertexAIClientOptions = ChatVertexAIInput & {
45
49
  includeThoughts?: boolean;
50
+ thinkingConfig?: GoogleThinkingConfig;
46
51
  };
47
52
  export type BedrockAnthropicInput = ChatBedrockConverseInput & {
48
53
  additionalModelRequestFields?: ChatBedrockConverseInput['additionalModelRequestFields'] & AnthropicReasoning;
@@ -52,7 +57,7 @@ export type BedrockConverseClientOptions = ChatBedrockConverseInput;
52
57
  export type BedrockAnthropicClientOptions = BedrockAnthropicInput;
53
58
  export type GoogleClientOptions = GoogleGenerativeAIChatInput & {
54
59
  customHeaders?: RequestOptions['customHeaders'];
55
- thinkingConfig?: GeminiGenerationConfig['thinkingConfig'];
60
+ thinkingConfig?: GoogleThinkingConfig;
56
61
  };
57
62
  export type DeepSeekClientOptions = ChatDeepSeekCallOptions;
58
63
  export type XAIClientOptions = ChatXAIInput;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.1.52",
3
+ "version": "3.1.54",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
package/src/index.ts CHANGED
@@ -27,3 +27,9 @@ export type * from './types';
27
27
 
28
28
  /* LLM */
29
29
  export { CustomOpenAIClient } from './llm/openai';
30
+ export { ChatOpenRouter } from './llm/openrouter';
31
+ export type {
32
+ OpenRouterReasoning,
33
+ OpenRouterReasoningEffort,
34
+ ChatOpenRouterCallOptions,
35
+ } from './llm/openrouter';