@langchain/google-genai 1.0.2 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @langchain/google-genai
2
2
 
3
+ ## 1.0.3
4
+
5
+ ### Patch Changes
6
+
7
+ - [#9444](https://github.com/langchain-ai/langchainjs/pull/9444) [`39404ac`](https://github.com/langchain-ai/langchainjs/commit/39404acf76e81360054910f417d01df02981a4e6) Thanks [@hntrl](https://github.com/hntrl)! - add tier based usage metadata token count
8
+
9
+ - [#9444](https://github.com/langchain-ai/langchainjs/pull/9444) [`39404ac`](https://github.com/langchain-ai/langchainjs/commit/39404acf76e81360054910f417d01df02981a4e6) Thanks [@hntrl](https://github.com/hntrl)! - fix streaming thought signature bug
10
+
11
+ - [#9444](https://github.com/langchain-ai/langchainjs/pull/9444) [`39404ac`](https://github.com/langchain-ai/langchainjs/commit/39404acf76e81360054910f417d01df02981a4e6) Thanks [@hntrl](https://github.com/hntrl)! - add thinkingConfig support in ChatGoogleGenerativeAI
12
+
13
+ - [#9444](https://github.com/langchain-ai/langchainjs/pull/9444) [`39404ac`](https://github.com/langchain-ai/langchainjs/commit/39404acf76e81360054910f417d01df02981a4e6) Thanks [@hntrl](https://github.com/hntrl)! - add cached token counts to usage metadata
14
+
3
15
  ## 1.0.2
4
16
 
5
17
  ### Patch Changes
@@ -415,6 +415,7 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
415
415
  json;
416
416
  streamUsage = true;
417
417
  convertSystemMessageToHumanContent;
418
+ thinkingConfig;
418
419
  client;
419
420
  get _isMultimodalModel() {
420
421
  return this.model.includes("vision") || this.model.startsWith("gemini-1.5") || this.model.startsWith("gemini-2") || this.model.startsWith("gemma-3-") && !this.model.startsWith("gemma-3-1b") || this.model.startsWith("gemini-3");
@@ -441,6 +442,7 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
441
442
  }
442
443
  this.streaming = fields.streaming ?? this.streaming;
443
444
  this.json = fields.json;
445
+ this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;
444
446
  this.client = new __google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
445
447
  model: this.model,
446
448
  safetySettings: this.safetySettings,
@@ -450,7 +452,8 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
450
452
  temperature: this.temperature,
451
453
  topP: this.topP,
452
454
  topK: this.topK,
453
- ...this.json ? { responseMimeType: "application/json" } : {}
455
+ ...this.json ? { responseMimeType: "application/json" } : {},
456
+ ...this.thinkingConfig ? { thinkingConfig: this.thinkingConfig } : {}
454
457
  }
455
458
  }, {
456
459
  apiVersion: fields.apiVersion,
@@ -540,14 +543,7 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
540
543
  contents: actualPrompt
541
544
  });
542
545
  let usageMetadata;
543
- if ("usageMetadata" in res.response) {
544
- const genAIUsageMetadata = res.response.usageMetadata;
545
- usageMetadata = {
546
- input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
547
- output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
548
- total_tokens: genAIUsageMetadata.totalTokenCount ?? 0
549
- };
550
- }
546
+ if ("usageMetadata" in res.response) usageMetadata = require_common.convertUsageMetadata(res.response.usageMetadata, this.model);
551
547
  const generationResult = require_common.mapGenerateContentResultToChatResult(res.response, { usageMetadata });
552
548
  if (generationResult.generations?.length > 0) await runManager?.handleLLMNewToken(generationResult.generations[0]?.text ?? "");
553
549
  return generationResult;
@@ -576,11 +572,7 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
576
572
  let index = 0;
577
573
  for await (const response of stream) {
578
574
  if ("usageMetadata" in response && response.usageMetadata !== void 0 && this.streamUsage !== false && options.streamUsage !== false) {
579
- usageMetadata = {
580
- input_tokens: response.usageMetadata.promptTokenCount ?? 0,
581
- output_tokens: response.usageMetadata.candidatesTokenCount ?? 0,
582
- total_tokens: response.usageMetadata.totalTokenCount ?? 0
583
- };
575
+ usageMetadata = require_common.convertUsageMetadata(response.usageMetadata, this.model);
584
576
  const newPromptTokenCount = response.usageMetadata.promptTokenCount ?? 0;
585
577
  usageMetadata.input_tokens = Math.max(0, newPromptTokenCount - prevPromptTokenCount);
586
578
  prevPromptTokenCount = newPromptTokenCount;
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.cjs","names":["BaseChatModel","fields: GoogleGenerativeAIChatInput","GenerativeAI","cachedContent: CachedContent","modelParams?: ModelParams","requestOptions?: RequestOptions","options: this[\"ParsedCallOptions\"]","tools: GoogleGenerativeAIToolType[]","kwargs?: Partial<GoogleGenerativeAIChatCallOptions>","convertToolsToGenAI","options?: this[\"ParsedCallOptions\"]","messages: BaseMessage[]","runManager?: CallbackManagerForLLMRun","convertBaseMessagesToContent","tokenUsage: TokenUsage","finalChunks: Record<number, ChatGenerationChunk>","usageMetadata: UsageMetadata | undefined","mapGenerateContentResultToChatResult","stream","convertResponseContentToChatGenerationChunk","request: string | GenerateContentRequest | (string | GenerativeAIPart)[]","e: any","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>","schema: InteropZodType<RunOutput> | Record<string, any>","outputParser: BaseLLMOutputParser<RunOutput>","tools: GoogleGenerativeAIFunctionDeclarationsTool[]","schemaToGenerativeAIParameters","GoogleGenerativeAIToolsOutputParser","geminiFunctionDefinition: GenerativeAIFunctionDeclaration","removeAdditionalProperties","JsonOutputParser","RunnablePassthrough","input: any","config","RunnableSequence"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n GenerativeModel,\n GoogleGenerativeAI as GenerativeAI,\n FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,\n FunctionDeclaration as GenerativeAIFunctionDeclaration,\n type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema,\n GenerateContentRequest,\n SafetySetting,\n Part as GenerativeAIPart,\n ModelParams,\n RequestOptions,\n type CachedContent,\n Schema,\n} from \"@google/generative-ai\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n AIMessageChunk,\n BaseMessage,\n UsageMetadata,\n} from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, ChatResult } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport {\n BaseChatModel,\n type BaseChatModelCallOptions,\n type LangSmithParams,\n type BaseChatModelParams,\n} from \"@langchain/core/language_models/chat_models\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { NewTokenIndices } from \"@langchain/core/callbacks/base\";\nimport {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport {\n Runnable,\n RunnablePassthrough,\n RunnableSequence,\n} from \"@langchain/core/runnables\";\nimport {\n InferInteropZodOutput,\n InteropZodType,\n isInteropZodSchema,\n} from \"@langchain/core/utils/types\";\nimport {\n BaseLLMOutputParser,\n JsonOutputParser,\n} from \"@langchain/core/output_parsers\";\nimport {\n schemaToGenerativeAIParameters,\n removeAdditionalProperties,\n} from \"./utils/zod_to_genai_parameters.js\";\nimport {\n convertBaseMessagesToContent,\n convertResponseContentToChatGenerationChunk,\n mapGenerateContentResultToChatResult,\n} from \"./utils/common.js\";\nimport { GoogleGenerativeAIToolsOutputParser } from \"./output_parsers.js\";\nimport { GoogleGenerativeAIToolType } from \"./types.js\";\nimport { convertToolsToGenAI } from \"./utils/tools.js\";\nimport PROFILES from \"./profiles.js\";\n\ninterface TokenUsage {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n}\n\nexport type BaseMessageExamplePair = {\n input: BaseMessage;\n output: BaseMessage;\n};\n\nexport interface GoogleGenerativeAIChatCallOptions\n extends BaseChatModelCallOptions {\n tools?: GoogleGenerativeAIToolType[];\n /**\n * Allowed functions to call when the mode is \"any\".\n * If empty, any one of the provided functions are called.\n */\n allowedFunctionNames?: string[];\n /**\n * Whether or not to include usage data, like token counts\n * in the streamed response chunks.\n * @default true\n */\n streamUsage?: boolean;\n\n /**\n * JSON schema to be returned by the model.\n */\n responseSchema?: Schema;\n}\n\n/**\n * An interface defining the input to the ChatGoogleGenerativeAI class.\n */\nexport interface GoogleGenerativeAIChatInput\n extends BaseChatModelParams,\n Pick<GoogleGenerativeAIChatCallOptions, \"streamUsage\"> {\n /**\n * Model Name to use\n *\n * Note: The format must follow the pattern - `{model}`\n */\n model: string;\n\n /**\n * Controls the randomness of the output.\n *\n * Values can range from [0.0,2.0], inclusive. A value closer to 2.0\n * will produce responses that are more varied and creative, while\n * a value closer to 0.0 will typically result in less surprising\n * responses from the model.\n *\n * Note: The default value varies by model\n */\n temperature?: number;\n\n /**\n * Maximum number of tokens to generate in the completion.\n */\n maxOutputTokens?: number;\n\n /**\n * Top-p changes how the model selects tokens for output.\n *\n * Tokens are selected from most probable to least until the sum\n * of their probabilities equals the top-p value.\n *\n * For example, if tokens A, B, and C have a probability of\n * .3, .2, and .1 and the top-p value is .5, then the model will\n * select either A or B as the next token (using temperature).\n *\n * Note: The default value varies by model\n */\n topP?: number;\n\n /**\n * Top-k changes how the model selects tokens for output.\n *\n * A top-k of 1 means the selected token is the most probable among\n * all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-k of 3 means that the next token is selected from\n * among the 3 most probable tokens (using temperature).\n *\n * Note: The default value varies by model\n */\n topK?: number;\n\n /**\n * The set of character sequences (up to 5) that will stop output generation.\n * If specified, the API will stop at the first appearance of a stop\n * sequence.\n *\n * Note: The stop sequence will not be included as part of the response.\n * Note: stopSequences is only supported for Gemini models\n */\n stopSequences?: string[];\n\n /**\n * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block\n * any prompts and responses that fail to meet the thresholds set by these settings. If there\n * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use\n * the default safety setting for that category.\n */\n safetySettings?: SafetySetting[];\n\n /**\n * Google API key to use\n */\n apiKey?: string;\n\n /**\n * Google API version to use\n */\n apiVersion?: string;\n\n /**\n * Google API base URL to use\n */\n baseUrl?: string;\n\n /** Whether to stream the results or not */\n streaming?: boolean;\n\n /**\n * Whether or not to force the model to respond with JSON.\n * Available for `gemini-1.5` models and later.\n * @default false\n */\n json?: boolean;\n\n /**\n * Whether or not model supports system instructions.\n * The following models support system instructions:\n * - All Gemini 1.5 Pro model versions\n * - All Gemini 1.5 Flash model versions\n * - Gemini 1.0 Pro version gemini-1.0-pro-002\n */\n convertSystemMessageToHumanContent?: boolean | undefined;\n}\n\n/**\n * Google Generative AI chat model integration.\n *\n * Setup:\n * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`.\n *\n * ```bash\n * npm install @langchain/google-genai\n * export GOOGLE_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * stop: [\"\\n\"],\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatGoogleGenerativeAI } from '@langchain/google-genai';\n *\n * const llm = new ChatGoogleGenerativeAI({\n * model: \"gemini-1.5-flash\",\n * temperature: 0,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"response_metadata\": {\n * \"finishReason\": \"STOP\",\n * \"index\": 0,\n * \"safetyRatings\": [\n * {\n * \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HARASSMENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * }\n * ]\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 149,\n * \"total_tokens\": 159\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There\",\n * \"response_metadata\": {\n * \"index\": 0\n * }\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 1,\n * \"total_tokens\": 11\n * }\n * }\n * AIMessageChunk {\n * \"content\": \" are a few ways to translate \\\"I love programming\\\" into French, depending on\",\n * }\n * AIMessageChunk {\n * \"content\": \" the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n\",\n * }\n * AIMessageChunk {\n * \"content\": \"* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This\",\n * }\n * AIMessageChunk {\n * \"content\": \" is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More\",\n * }\n * AIMessageChunk {\n * \"content\": \" specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and\",\n * }\n * AIMessageChunk {\n * \"content\": \" your intended audience. \\n\",\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 277,\n * \"total_tokens\": 287\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don\\\\'t cats play poker?\",\n * punchline: \"Why don\\\\'t cats play poker? Because they always have an ace up their sleeve!\"\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 10, output_tokens: 149, total_tokens: 159 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * finishReason: 'STOP',\n * index: 0,\n * safetyRatings: [\n * {\n * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n * probability: 'NEGLIGIBLE'\n * },\n * {\n * category: 'HARM_CATEGORY_HATE_SPEECH',\n * probability: 'NEGLIGIBLE'\n * },\n * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },\n * {\n * category: 'HARM_CATEGORY_DANGEROUS_CONTENT',\n * probability: 'NEGLIGIBLE'\n * }\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Document Messages</strong></summary>\n *\n * This example will show you how to pass documents such as PDFs to Google\n * Generative AI through messages.\n *\n * ```typescript\n * const pdfPath = \"/Users/my_user/Downloads/invoice.pdf\";\n * const pdfBase64 = await fs.readFile(pdfPath, \"base64\");\n *\n * const response = await llm.invoke([\n * [\"system\", \"Use the provided documents to answer the question\"],\n * [\n * \"user\",\n * [\n * {\n * type: \"application/pdf\", // If the `type` field includes a single slash (`/`), it will be treated as inline data.\n * data: pdfBase64,\n * },\n * {\n * type: \"text\",\n * text: \"Summarize the contents of this PDF\",\n * },\n * ],\n * ],\n * ]);\n *\n * console.log(response.content);\n * ```\n *\n * ```txt\n * This is a billing invoice from Twitter Developers for X API Basic Access. The transaction date is January 7, 2025,\n * and the amount is $194.34, which has been paid. The subscription period is from January 7, 2025 21:02 to February 7, 2025 00:00 (UTC).\n * The tax is $0.00, with a tax rate of 0%. The total amount is $194.34. The payment was made using a Visa card ending in 7022,\n * expiring in 12/2026. The billing address is Brace Sproul, 1234 Main Street, San Francisco, CA, US 94103. The company being billed is\n * X Corp, located at 865 FM 1209 Building 2, Bastrop, TX, US 78602. Terms and conditions apply.\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatGoogleGenerativeAI\n extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk>\n implements GoogleGenerativeAIChatInput\n{\n static lc_name() {\n return \"ChatGoogleGenerativeAI\";\n }\n\n lc_serializable = true;\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"GOOGLE_API_KEY\",\n };\n }\n\n lc_namespace = [\"langchain\", \"chat_models\", \"google_genai\"];\n\n get lc_aliases() {\n return {\n apiKey: \"google_api_key\",\n };\n }\n\n model: string;\n\n temperature?: number; // default value chosen based on model\n\n maxOutputTokens?: number;\n\n topP?: number; // default value chosen based on model\n\n topK?: number; // default value chosen based on model\n\n stopSequences: string[] = [];\n\n safetySettings?: SafetySetting[];\n\n apiKey?: string;\n\n streaming = false;\n\n json?: boolean;\n\n streamUsage = true;\n\n convertSystemMessageToHumanContent: boolean | undefined;\n\n private client: GenerativeModel;\n\n get _isMultimodalModel() {\n return (\n this.model.includes(\"vision\") ||\n this.model.startsWith(\"gemini-1.5\") ||\n this.model.startsWith(\"gemini-2\") ||\n (this.model.startsWith(\"gemma-3-\") &&\n !this.model.startsWith(\"gemma-3-1b\")) || // gemma-3 models are multimodal(but gemma-3n-* and gemma-3-1b are not)\n this.model.startsWith(\"gemini-3\")\n );\n }\n\n constructor(fields: GoogleGenerativeAIChatInput) {\n super(fields);\n\n this.model = fields.model.replace(/^models\\//, \"\");\n\n this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;\n\n if (this.maxOutputTokens && this.maxOutputTokens < 0) {\n throw new Error(\"`maxOutputTokens` must be a positive integer\");\n }\n\n this.temperature = fields.temperature ?? this.temperature;\n if (this.temperature && (this.temperature < 0 || this.temperature > 2)) {\n throw new Error(\"`temperature` must be in the range of [0.0,2.0]\");\n }\n\n this.topP = fields.topP ?? this.topP;\n if (this.topP && this.topP < 0) {\n throw new Error(\"`topP` must be a positive integer\");\n }\n\n if (this.topP && this.topP > 1) {\n throw new Error(\"`topP` must be below 1.\");\n }\n\n this.topK = fields.topK ?? this.topK;\n if (this.topK && this.topK < 0) {\n throw new Error(\"`topK` must be a positive integer\");\n }\n\n this.stopSequences = fields.stopSequences ?? this.stopSequences;\n\n this.apiKey = fields.apiKey ?? getEnvironmentVariable(\"GOOGLE_API_KEY\");\n if (!this.apiKey) {\n throw new Error(\n \"Please set an API key for Google GenerativeAI \" +\n \"in the environment variable GOOGLE_API_KEY \" +\n \"or in the `apiKey` field of the \" +\n \"ChatGoogleGenerativeAI constructor\"\n );\n }\n\n this.safetySettings = fields.safetySettings ?? this.safetySettings;\n if (this.safetySettings && this.safetySettings.length > 0) {\n const safetySettingsSet = new Set(\n this.safetySettings.map((s) => s.category)\n );\n if (safetySettingsSet.size !== this.safetySettings.length) {\n throw new Error(\n \"The categories in `safetySettings` array must be unique\"\n );\n }\n }\n\n this.streaming = fields.streaming ?? this.streaming;\n this.json = fields.json;\n\n this.client = new GenerativeAI(this.apiKey).getGenerativeModel(\n {\n model: this.model,\n safetySettings: this.safetySettings as SafetySetting[],\n generationConfig: {\n stopSequences: this.stopSequences,\n maxOutputTokens: this.maxOutputTokens,\n temperature: this.temperature,\n topP: this.topP,\n topK: this.topK,\n ...(this.json ? { responseMimeType: \"application/json\" } : {}),\n },\n },\n {\n apiVersion: fields.apiVersion,\n baseUrl: fields.baseUrl,\n }\n );\n this.streamUsage = fields.streamUsage ?? this.streamUsage;\n }\n\n useCachedContent(\n cachedContent: CachedContent,\n modelParams?: ModelParams,\n requestOptions?: RequestOptions\n ): void {\n if (!this.apiKey) return;\n this.client = new GenerativeAI(\n this.apiKey\n ).getGenerativeModelFromCachedContent(\n cachedContent,\n modelParams,\n requestOptions\n );\n }\n\n get useSystemInstruction(): boolean {\n return typeof this.convertSystemMessageToHumanContent === \"boolean\"\n ? !this.convertSystemMessageToHumanContent\n : this.computeUseSystemInstruction;\n }\n\n get computeUseSystemInstruction(): boolean {\n // This works on models from April 2024 and later\n // Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later\n // AI Studio: gemini-1.5-pro-latest\n if (this.model === \"gemini-1.0-pro-001\") {\n return false;\n } else if (this.model.startsWith(\"gemini-pro-vision\")) {\n return false;\n } else if (this.model.startsWith(\"gemini-1.0-pro-vision\")) {\n return false;\n } else if (this.model === \"gemini-pro\") {\n // on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001\n return false;\n }\n return true;\n }\n\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams {\n return {\n ls_provider: \"google_genai\",\n ls_model_name: this.model,\n ls_model_type: \"chat\",\n ls_temperature: this.client.generationConfig.temperature,\n ls_max_tokens: this.client.generationConfig.maxOutputTokens,\n ls_stop: options.stop,\n };\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n _llmType() {\n return \"googlegenerativeai\";\n }\n\n override bindTools(\n tools: GoogleGenerativeAIToolType[],\n kwargs?: Partial<GoogleGenerativeAIChatCallOptions>\n ): Runnable<\n BaseLanguageModelInput,\n AIMessageChunk,\n GoogleGenerativeAIChatCallOptions\n > {\n return this.withConfig({\n tools: convertToolsToGenAI(tools)?.tools,\n ...kwargs,\n });\n }\n\n invocationParams(\n options?: this[\"ParsedCallOptions\"]\n ): Omit<GenerateContentRequest, \"contents\"> {\n const toolsAndConfig = options?.tools?.length\n ? convertToolsToGenAI(options.tools, {\n toolChoice: options.tool_choice,\n allowedFunctionNames: options.allowedFunctionNames,\n })\n : undefined;\n\n if (options?.responseSchema) {\n this.client.generationConfig.responseSchema = options.responseSchema;\n this.client.generationConfig.responseMimeType = \"application/json\";\n } else {\n this.client.generationConfig.responseSchema = undefined;\n this.client.generationConfig.responseMimeType = this.json\n ? \"application/json\"\n : undefined;\n }\n\n return {\n ...(toolsAndConfig?.tools ? { tools: toolsAndConfig.tools } : {}),\n ...(toolsAndConfig?.toolConfig\n ? { toolConfig: toolsAndConfig.toolConfig }\n : {}),\n };\n }\n\n async _generate(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt[0].role === \"system\") {\n const [systemInstruction] = prompt;\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n\n // Handle streaming\n if (this.streaming) {\n const tokenUsage: TokenUsage = {};\n const stream = this._streamResponseChunks(messages, options, runManager);\n const finalChunks: Record<number, ChatGenerationChunk> = {};\n\n for await (const chunk of stream) {\n const index =\n (chunk.generationInfo as NewTokenIndices)?.completion ?? 0;\n if (finalChunks[index] === undefined) {\n finalChunks[index] = chunk;\n } else {\n finalChunks[index] = finalChunks[index].concat(chunk);\n }\n }\n const generations = Object.entries(finalChunks)\n .sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))\n .map(([_, value]) => value);\n\n return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };\n }\n\n const res = await this.completionWithRetry({\n ...parameters,\n contents: actualPrompt,\n });\n\n let usageMetadata: UsageMetadata | undefined;\n if (\"usageMetadata\" in res.response) {\n const genAIUsageMetadata = res.response.usageMetadata as {\n promptTokenCount: number | undefined;\n candidatesTokenCount: number | undefined;\n totalTokenCount: number | undefined;\n };\n usageMetadata = {\n input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,\n output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,\n total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,\n };\n }\n\n const generationResult = mapGenerateContentResultToChatResult(\n res.response,\n {\n usageMetadata,\n }\n );\n // may not have generations in output if there was a refusal for safety reasons, malformed function call, etc.\n if (generationResult.generations?.length > 0) {\n await runManager?.handleLLMNewToken(\n generationResult.generations[0]?.text ?? \"\"\n );\n }\n return generationResult;\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt[0].role === \"system\") {\n const [systemInstruction] = prompt;\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n const request = {\n ...parameters,\n contents: actualPrompt,\n };\n const stream = await this.caller.callWithOptions(\n { signal: options?.signal },\n async () => {\n const { stream } = await this.client.generateContentStream(request);\n return stream;\n }\n );\n\n let usageMetadata: UsageMetadata | undefined;\n // Keep prior cumulative counts for calculating token deltas while streaming\n let prevPromptTokenCount = 0;\n let prevCandidatesTokenCount = 0;\n let prevTotalTokenCount = 0;\n let index = 0;\n for await (const response of stream) {\n if (\n \"usageMetadata\" in response &&\n response.usageMetadata !== undefined &&\n this.streamUsage !== false &&\n options.streamUsage !== false\n ) {\n usageMetadata = {\n input_tokens: response.usageMetadata.promptTokenCount ?? 0,\n output_tokens: response.usageMetadata.candidatesTokenCount ?? 0,\n total_tokens: response.usageMetadata.totalTokenCount ?? 0,\n };\n\n // Under the hood, LangChain combines the prompt tokens. Google returns the updated\n // total each time, so we need to find the difference between the tokens.\n const newPromptTokenCount =\n response.usageMetadata.promptTokenCount ?? 0;\n usageMetadata.input_tokens = Math.max(\n 0,\n newPromptTokenCount - prevPromptTokenCount\n );\n prevPromptTokenCount = newPromptTokenCount;\n\n const newCandidatesTokenCount =\n response.usageMetadata.candidatesTokenCount ?? 0;\n usageMetadata.output_tokens = Math.max(\n 0,\n newCandidatesTokenCount - prevCandidatesTokenCount\n );\n prevCandidatesTokenCount = newCandidatesTokenCount;\n\n const newTotalTokenCount = response.usageMetadata.totalTokenCount ?? 0;\n usageMetadata.total_tokens = Math.max(\n 0,\n newTotalTokenCount - prevTotalTokenCount\n );\n prevTotalTokenCount = newTotalTokenCount;\n }\n\n const chunk = convertResponseContentToChatGenerationChunk(response, {\n usageMetadata,\n index,\n });\n index += 1;\n if (!chunk) {\n continue;\n }\n\n yield chunk;\n await runManager?.handleLLMNewToken(chunk.text ?? \"\");\n }\n }\n\n async completionWithRetry(\n request: string | GenerateContentRequest | (string | GenerativeAIPart)[],\n options?: this[\"ParsedCallOptions\"]\n ) {\n return this.caller.callWithOptions(\n { signal: options?.signal },\n async () => {\n try {\n return await this.client.generateContent(request);\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n // TODO: Improve error handling\n if (e.message?.includes(\"400 Bad Request\")) {\n e.status = 400;\n }\n throw e;\n }\n }\n );\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-flash\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 2000000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const schema: InteropZodType<RunOutput> | Record<string, any> =\n outputSchema;\n const name = config?.name;\n const method = config?.method;\n const includeRaw = config?.includeRaw;\n if (method === \"jsonMode\") {\n throw new Error(\n `ChatGoogleGenerativeAI only supports \"jsonSchema\" or \"functionCalling\" as a method.`\n );\n }\n\n let llm;\n let outputParser: BaseLLMOutputParser<RunOutput>;\n if (method === \"functionCalling\") {\n let functionName = name ?? \"extract\";\n let tools: GoogleGenerativeAIFunctionDeclarationsTool[];\n if (isInteropZodSchema(schema)) {\n const jsonSchema = schemaToGenerativeAIParameters(schema);\n tools = [\n {\n functionDeclarations: [\n {\n name: functionName,\n description:\n jsonSchema.description ?? \"A function available to call.\",\n parameters: jsonSchema as GenerativeAIFunctionDeclarationSchema,\n },\n ],\n },\n ];\n outputParser = new GoogleGenerativeAIToolsOutputParser<\n InferInteropZodOutput<typeof schema>\n >({\n returnSingle: true,\n keyName: functionName,\n zodSchema: schema,\n });\n } else {\n let geminiFunctionDefinition: GenerativeAIFunctionDeclaration;\n if (\n typeof schema.name === \"string\" &&\n typeof schema.parameters === \"object\" &&\n schema.parameters != null\n ) {\n geminiFunctionDefinition = schema as GenerativeAIFunctionDeclaration;\n geminiFunctionDefinition.parameters = removeAdditionalProperties(\n schema.parameters\n ) as GenerativeAIFunctionDeclarationSchema;\n functionName = schema.name;\n } else {\n geminiFunctionDefinition = {\n name: functionName,\n description: schema.description ?? \"\",\n parameters: removeAdditionalProperties(\n schema\n ) as GenerativeAIFunctionDeclarationSchema,\n };\n }\n tools = [\n {\n functionDeclarations: [geminiFunctionDefinition],\n },\n ];\n outputParser = new GoogleGenerativeAIToolsOutputParser<RunOutput>({\n returnSingle: true,\n keyName: functionName,\n });\n }\n llm = this.bindTools(tools).withConfig({\n allowedFunctionNames: [functionName],\n });\n } else {\n const jsonSchema = schemaToGenerativeAIParameters(schema);\n llm = this.withConfig({\n responseSchema: jsonSchema as Schema,\n });\n outputParser = new JsonOutputParser();\n }\n\n if (!includeRaw) {\n return llm.pipe(outputParser).withConfig({\n runName: \"ChatGoogleGenerativeAIStructuredOutput\",\n }) as Runnable<BaseLanguageModelInput, RunOutput>;\n }\n\n const parserAssign = RunnablePassthrough.assign({\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n parsed: (input: any, config) => outputParser.invoke(input.raw, config),\n });\n const parserNone = RunnablePassthrough.assign({\n parsed: () => null,\n });\n const parsedWithFallback = parserAssign.withFallbacks({\n fallbacks: [parserNone],\n });\n return RunnableSequence.from<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n >([\n {\n raw: llm,\n },\n parsedWithFallback,\n ]).withConfig({\n runName: \"StructuredOutputRunnable\",\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkkBA,IAAa,yBAAb,cACUA,2DAEV;CACE,OAAO,UAAU;AACf,SAAO;CACR;CAED,kBAAkB;CAElB,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,iBACT;CACF;CAED,eAAe;EAAC;EAAa;EAAe;CAAe;CAE3D,IAAI,aAAa;AACf,SAAO,EACL,QAAQ,iBACT;CACF;CAED;CAEA;CAEA;CAEA;CAEA;CAEA,gBAA0B,CAAE;CAE5B;CAEA;CAEA,YAAY;CAEZ;CAEA,cAAc;CAEd;CAEA,AAAQ;CAER,IAAI,qBAAqB;AACvB,SACE,KAAK,MAAM,SAAS,SAAS,IAC7B,KAAK,MAAM,WAAW,aAAa,IACnC,KAAK,MAAM,WAAW,WAAW,IAChC,KAAK,MAAM,WAAW,WAAW,IAChC,CAAC,KAAK,MAAM,WAAW,aAAa,IACtC,KAAK,MAAM,WAAW,WAAW;CAEpC;CAED,YAAYC,QAAqC;EAC/C,MAAM,OAAO;EAEb,KAAK,QAAQ,OAAO,MAAM,QAAQ,aAAa,GAAG;EAElD,KAAK,kBAAkB,OAAO,mBAAmB,KAAK;AAEtD,MAAI,KAAK,mBAAmB,KAAK,kBAAkB,EACjD,OAAM,IAAI,MAAM;EAGlB,KAAK,cAAc,OAAO,eAAe,KAAK;AAC9C,MAAI,KAAK,gBAAgB,KAAK,cAAc,KAAK,KAAK,cAAc,GAClE,OAAM,IAAI,MAAM;EAGlB,KAAK,OAAO,OAAO,QAAQ,KAAK;AAChC,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;AAGlB,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;EAGlB,KAAK,OAAO,OAAO,QAAQ,KAAK;AAChC,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;EAGlB,KAAK,gBAAgB,OAAO,iBAAiB,KAAK;EAElD,KAAK,SAAS,OAAO,iEAAiC,iBAAiB;AACvE,MAAI,CAAC,KAAK,OACR,OAAM,IAAI,MACR;EAOJ,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;AACpD,MAAI,KAAK,kBAAkB,KAAK,eAAe,SAAS,GAAG;GACzD,MAAM,oBAAoB,IAAI,IAC5B,KAAK,eAAe,IAAI,CAAC,MAAM,EAAE,SAAS;AAE5C,OAAI,kBAAkB,SAAS,KAAK,eAAe,OACjD,OAAM,IAAI,MACR;EAGL;EAED,KAAK,YAAY,OAAO,aAAa,KAAK;EAC1C,KAAK,OAAO,OAAO;EAEnB,KAAK,SAAS,IAAIC,0CAAa,KAAK,QAAQ,mBAC1C;GACE,OAAO,KAAK;GACZ,gBAAgB,KAAK;GACrB,kBAAkB;IAChB,eAAe,KAAK;IACpB,iBAAiB,KAAK;IACtB,aAAa,KAAK;IAClB,MAAM,KAAK;IACX,MAAM,KAAK;IACX,GAAI,KAAK,OAAO,EAAE,kBAAkB,mBAAoB,IAAG,CAAE;GAC9D;EACF,GACD;GACE,YAAY,OAAO;GACnB,SAAS,OAAO;EACjB,EACF;EACD,KAAK,cAAc,OAAO,eAAe,KAAK;CAC/C;CAED,iBACEC,eACAC,aACAC,gBACM;AACN,MAAI,CAAC,KAAK,OAAQ;EAClB,KAAK,SAAS,IAAIH,0CAChB,KAAK,QACL,oCACA,eACA,aACA,eACD;CACF;CAED,IAAI,uBAAgC;AAClC,SAAO,OAAO,KAAK,uCAAuC,YACtD,CAAC,KAAK,qCACN,KAAK;CACV;CAED,IAAI,8BAAuC;AAIzC,MAAI,KAAK,UAAU,qBACjB,QAAO;WACE,KAAK,MAAM,WAAW,oBAAoB,CACnD,QAAO;WACE,KAAK,MAAM,WAAW,wBAAwB,CACvD,QAAO;WACE,KAAK,UAAU,aAExB,QAAO;AAET,SAAO;CACR;CAED,YAAYI,SAAqD;AAC/D,SAAO;GACL,aAAa;GACb,eAAe,KAAK;GACpB,eAAe;GACf,gBAAgB,KAAK,OAAO,iBAAiB;GAC7C,eAAe,KAAK,OAAO,iBAAiB;GAC5C,SAAS,QAAQ;EAClB;CACF;CAED,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,WAAW;AACT,SAAO;CACR;CAED,AAAS,UACPC,OACAC,QAKA;AACA,SAAO,KAAK,WAAW;GACrB,OAAOC,kCAAoB,MAAM,EAAE;GACnC,GAAG;EACJ,EAAC;CACH;CAED,iBACEC,SAC0C;EAC1C,MAAM,iBAAiB,SAAS,OAAO,SACnCD,kCAAoB,QAAQ,OAAO;GACjC,YAAY,QAAQ;GACpB,sBAAsB,QAAQ;EAC/B,EAAC,GACF;AAEJ,MAAI,SAAS,gBAAgB;GAC3B,KAAK,OAAO,iBAAiB,iBAAiB,QAAQ;GACtD,KAAK,OAAO,iBAAiB,mBAAmB;EACjD,OAAM;GACL,KAAK,OAAO,iBAAiB,iBAAiB;GAC9C,KAAK,OAAO,iBAAiB,mBAAmB,KAAK,OACjD,qBACA;EACL;AAED,SAAO;GACL,GAAI,gBAAgB,QAAQ,EAAE,OAAO,eAAe,MAAO,IAAG,CAAE;GAChE,GAAI,gBAAgB,aAChB,EAAE,YAAY,eAAe,WAAY,IACzC,CAAE;EACP;CACF;CAED,MAAM,UACJE,UACAL,SACAM,YACqB;EACrB,MAAM,SAASC,4CACb,UACA,KAAK,oBACL,KAAK,sBACL,KAAK,MACN;EACD,IAAI,eAAe;AACnB,MAAI,OAAO,GAAG,SAAS,UAAU;GAC/B,MAAM,CAAC,kBAAkB,GAAG;GAC5B,KAAK,OAAO,oBAAoB;GAChC,eAAe,OAAO,MAAM,EAAE;EAC/B;EACD,MAAM,aAAa,KAAK,iBAAiB,QAAQ;AAGjD,MAAI,KAAK,WAAW;GAClB,MAAMC,aAAyB,CAAE;GACjC,MAAM,SAAS,KAAK,sBAAsB,UAAU,SAAS,WAAW;GACxE,MAAMC,cAAmD,CAAE;AAE3D,cAAW,MAAM,SAAS,QAAQ;IAChC,MAAM,QACH,MAAM,gBAAoC,cAAc;AAC3D,QAAI,YAAY,WAAW,QACzB,YAAY,SAAS;SAErB,YAAY,SAAS,YAAY,OAAO,OAAO,MAAM;GAExD;GACD,MAAM,cAAc,OAAO,QAAQ,YAAY,CAC5C,KAAK,CAAC,CAAC,KAAK,EAAE,CAAC,KAAK,KAAK,SAAS,MAAM,GAAG,GAAG,SAAS,MAAM,GAAG,CAAC,CACjE,IAAI,CAAC,CAAC,GAAG,MAAM,KAAK,MAAM;AAE7B,UAAO;IAAE;IAAa,WAAW,EAAE,qBAAqB,WAAY;GAAE;EACvE;EAED,MAAM,MAAM,MAAM,KAAK,oBAAoB;GACzC,GAAG;GACH,UAAU;EACX,EAAC;EAEF,IAAIC;AACJ,MAAI,mBAAmB,IAAI,UAAU;GACnC,MAAM,qBAAqB,IAAI,SAAS;GAKxC,gBAAgB;IACd,cAAc,mBAAmB,oBAAoB;IACrD,eAAe,mBAAmB,wBAAwB;IAC1D,cAAc,mBAAmB,mBAAmB;GACrD;EACF;EAED,MAAM,mBAAmBC,oDACvB,IAAI,UACJ,EACE,cACD,EACF;AAED,MAAI,iBAAiB,aAAa,SAAS,GACzC,MAAM,YAAY,kBAChB,iBAAiB,YAAY,IAAI,QAAQ,GAC1C;AAEH,SAAO;CACR;CAED,OAAO,sBACLN,UACAL,SACAM,YACqC;EACrC,MAAM,SAASC,4CACb,UACA,KAAK,oBACL,KAAK,sBACL,KAAK,MACN;EACD,IAAI,eAAe;AACnB,MAAI,OAAO,GAAG,SAAS,UAAU;GAC/B,MAAM,CAAC,kBAAkB,GAAG;GAC5B,KAAK,OAAO,oBAAoB;GAChC,eAAe,OAAO,MAAM,EAAE;EAC/B;EACD,MAAM,aAAa,KAAK,iBAAiB,QAAQ;EACjD,MAAM,UAAU;GACd,GAAG;GACH,UAAU;EACX;EACD,MAAM,SAAS,MAAM,KAAK,OAAO,gBAC/B,EAAE,QAAQ,SAAS,OAAQ,GAC3B,YAAY;GACV,MAAM,EAAE,kBAAQ,GAAG,MAAM,KAAK,OAAO,sBAAsB,QAAQ;AACnE,UAAOK;EACR,EACF;EAED,IAAIF;EAEJ,IAAI,uBAAuB;EAC3B,IAAI,2BAA2B;EAC/B,IAAI,sBAAsB;EAC1B,IAAI,QAAQ;AACZ,aAAW,MAAM,YAAY,QAAQ;AACnC,OACE,mBAAmB,YACnB,SAAS,kBAAkB,UAC3B,KAAK,gBAAgB,SACrB,QAAQ,gBAAgB,OACxB;IACA,gBAAgB;KACd,cAAc,SAAS,cAAc,oBAAoB;KACzD,eAAe,SAAS,cAAc,wBAAwB;KAC9D,cAAc,SAAS,cAAc,mBAAmB;IACzD;IAID,MAAM,sBACJ,SAAS,cAAc,oBAAoB;IAC7C,cAAc,eAAe,KAAK,IAChC,GACA,sBAAsB,qBACvB;IACD,uBAAuB;IAEvB,MAAM,0BACJ,SAAS,cAAc,wBAAwB;IACjD,cAAc,gBAAgB,KAAK,IACjC,GACA,0BAA0B,yBAC3B;IACD,2BAA2B;IAE3B,MAAM,qBAAqB,SAAS,cAAc,mBAAmB;IACrE,cAAc,eAAe,KAAK,IAChC,GACA,qBAAqB,oBACtB;IACD,sBAAsB;GACvB;GAED,MAAM,QAAQG,2DAA4C,UAAU;IAClE;IACA;GACD,EAAC;GACF,SAAS;AACT,OAAI,CAAC,MACH;GAGF,MAAM;GACN,MAAM,YAAY,kBAAkB,MAAM,QAAQ,GAAG;EACtD;CACF;CAED,MAAM,oBACJC,SACAV,SACA;AACA,SAAO,KAAK,OAAO,gBACjB,EAAE,QAAQ,SAAS,OAAQ,GAC3B,YAAY;AACV,OAAI;AACF,WAAO,MAAM,KAAK,OAAO,gBAAgB,QAAQ;GAElD,SAAQW,GAAQ;AAEf,QAAI,EAAE,SAAS,SAAS,kBAAkB,EACxC,EAAE,SAAS;AAEb,UAAM;GACP;EACF,EACF;CACF;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,yBAAS,KAAK,UAAU,CAAE;CAClC;CAwBD,qBAIEC,cAIAC,QAMI;EAEJ,MAAMC,SACJ;EACF,MAAM,OAAO,QAAQ;EACrB,MAAM,SAAS,QAAQ;EACvB,MAAM,aAAa,QAAQ;AAC3B,MAAI,WAAW,WACb,OAAM,IAAI,MACR,CAAC,mFAAmF,CAAC;EAIzF,IAAI;EACJ,IAAIC;AACJ,MAAI,WAAW,mBAAmB;GAChC,IAAI,eAAe,QAAQ;GAC3B,IAAIC;AACJ,4DAAuB,OAAO,EAAE;IAC9B,MAAM,aAAaC,+DAA+B,OAAO;IACzD,QAAQ,CACN,EACE,sBAAsB,CACpB;KACE,MAAM;KACN,aACE,WAAW,eAAe;KAC5B,YAAY;IACb,CACF,EACF,CACF;IACD,eAAe,IAAIC,2DAEjB;KACA,cAAc;KACd,SAAS;KACT,WAAW;IACZ;GACF,OAAM;IACL,IAAIC;AACJ,QACE,OAAO,OAAO,SAAS,YACvB,OAAO,OAAO,eAAe,YAC7B,OAAO,cAAc,MACrB;KACA,2BAA2B;KAC3B,yBAAyB,aAAaC,2DACpC,OAAO,WACR;KACD,eAAe,OAAO;IACvB,OACC,2BAA2B;KACzB,MAAM;KACN,aAAa,OAAO,eAAe;KACnC,YAAYA,2DACV,OACD;IACF;IAEH,QAAQ,CACN,EACE,sBAAsB,CAAC,wBAAyB,EACjD,CACF;IACD,eAAe,IAAIF,2DAA+C;KAChE,cAAc;KACd,SAAS;IACV;GACF;GACD,MAAM,KAAK,UAAU,MAAM,CAAC,WAAW,EACrC,sBAAsB,CAAC,YAAa,EACrC,EAAC;EACH,OAAM;GACL,MAAM,aAAaD,+DAA+B,OAAO;GACzD,MAAM,KAAK,WAAW,EACpB,gBAAgB,WACjB,EAAC;GACF,eAAe,IAAII;EACpB;AAED,MAAI,CAAC,WACH,QAAO,IAAI,KAAK,aAAa,CAAC,WAAW,EACvC,SAAS,yCACV,EAAC;EAGJ,MAAM,eAAeC,+CAAoB,OAAO,EAE9C,QAAQ,CAACC,OAAYC,aAAW,aAAa,OAAO,MAAM,KAAKA,SAAO,CACvE,EAAC;EACF,MAAM,aAAaF,+CAAoB,OAAO,EAC5C,QAAQ,MAAM,KACf,EAAC;EACF,MAAM,qBAAqB,aAAa,cAAc,EACpD,WAAW,CAAC,UAAW,EACxB,EAAC;AACF,SAAOG,4CAAiB,KAGtB,CACA,EACE,KAAK,IACN,GACD,kBACD,EAAC,CAAC,WAAW,EACZ,SAAS,2BACV,EAAC;CACH;AACF"}
1
+ {"version":3,"file":"chat_models.cjs","names":["BaseChatModel","fields: GoogleGenerativeAIChatInput","GenerativeAI","cachedContent: CachedContent","modelParams?: ModelParams","requestOptions?: RequestOptions","options: this[\"ParsedCallOptions\"]","tools: GoogleGenerativeAIToolType[]","kwargs?: Partial<GoogleGenerativeAIChatCallOptions>","convertToolsToGenAI","options?: this[\"ParsedCallOptions\"]","messages: BaseMessage[]","runManager?: CallbackManagerForLLMRun","convertBaseMessagesToContent","tokenUsage: TokenUsage","finalChunks: Record<number, ChatGenerationChunk>","usageMetadata: UsageMetadata | undefined","convertUsageMetadata","mapGenerateContentResultToChatResult","stream","convertResponseContentToChatGenerationChunk","request: string | GenerateContentRequest | (string | GenerativeAIPart)[]","e: any","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>","schema: InteropZodType<RunOutput> | Record<string, any>","outputParser: BaseLLMOutputParser<RunOutput>","tools: GoogleGenerativeAIFunctionDeclarationsTool[]","schemaToGenerativeAIParameters","GoogleGenerativeAIToolsOutputParser","geminiFunctionDefinition: GenerativeAIFunctionDeclaration","removeAdditionalProperties","JsonOutputParser","RunnablePassthrough","input: any","config","RunnableSequence"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n GenerativeModel,\n GoogleGenerativeAI as GenerativeAI,\n FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,\n FunctionDeclaration as GenerativeAIFunctionDeclaration,\n type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema,\n GenerateContentRequest,\n SafetySetting,\n Part as GenerativeAIPart,\n ModelParams,\n RequestOptions,\n type CachedContent,\n Schema,\n} from \"@google/generative-ai\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n AIMessageChunk,\n BaseMessage,\n UsageMetadata,\n} from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, ChatResult } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport {\n BaseChatModel,\n type BaseChatModelCallOptions,\n type LangSmithParams,\n type BaseChatModelParams,\n} from \"@langchain/core/language_models/chat_models\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { NewTokenIndices } from \"@langchain/core/callbacks/base\";\nimport {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport {\n Runnable,\n RunnablePassthrough,\n RunnableSequence,\n} from \"@langchain/core/runnables\";\nimport {\n InferInteropZodOutput,\n InteropZodType,\n isInteropZodSchema,\n} from \"@langchain/core/utils/types\";\nimport {\n BaseLLMOutputParser,\n JsonOutputParser,\n} from \"@langchain/core/output_parsers\";\nimport {\n schemaToGenerativeAIParameters,\n removeAdditionalProperties,\n} from \"./utils/zod_to_genai_parameters.js\";\nimport {\n convertBaseMessagesToContent,\n convertResponseContentToChatGenerationChunk,\n convertUsageMetadata,\n mapGenerateContentResultToChatResult,\n} from \"./utils/common.js\";\nimport { GoogleGenerativeAIToolsOutputParser } from \"./output_parsers.js\";\nimport {\n GoogleGenerativeAIThinkingConfig,\n GoogleGenerativeAIToolType,\n} from \"./types.js\";\nimport { convertToolsToGenAI } from \"./utils/tools.js\";\nimport PROFILES from \"./profiles.js\";\n\ninterface TokenUsage {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n}\n\nexport type BaseMessageExamplePair = {\n input: BaseMessage;\n output: BaseMessage;\n};\n\nexport interface GoogleGenerativeAIChatCallOptions\n extends BaseChatModelCallOptions {\n tools?: GoogleGenerativeAIToolType[];\n /**\n * Allowed functions to call when the mode is \"any\".\n * If empty, any one of the provided functions are called.\n */\n allowedFunctionNames?: string[];\n /**\n * Whether or not to include usage data, like token counts\n * in the streamed response chunks.\n * @default true\n */\n streamUsage?: boolean;\n\n /**\n * JSON schema to be returned by the model.\n */\n responseSchema?: Schema;\n}\n\n/**\n * An interface defining the input to the ChatGoogleGenerativeAI class.\n */\nexport interface GoogleGenerativeAIChatInput\n extends BaseChatModelParams,\n Pick<GoogleGenerativeAIChatCallOptions, \"streamUsage\"> {\n /**\n * Model Name to use\n *\n * Note: The format must follow the pattern - `{model}`\n */\n model: string;\n\n /**\n * Controls the randomness of the output.\n *\n * Values can range from [0.0,2.0], inclusive. A value closer to 2.0\n * will produce responses that are more varied and creative, while\n * a value closer to 0.0 will typically result in less surprising\n * responses from the model.\n *\n * Note: The default value varies by model\n */\n temperature?: number;\n\n /**\n * Maximum number of tokens to generate in the completion.\n */\n maxOutputTokens?: number;\n\n /**\n * Top-p changes how the model selects tokens for output.\n *\n * Tokens are selected from most probable to least until the sum\n * of their probabilities equals the top-p value.\n *\n * For example, if tokens A, B, and C have a probability of\n * .3, .2, and .1 and the top-p value is .5, then the model will\n * select either A or B as the next token (using temperature).\n *\n * Note: The default value varies by model\n */\n topP?: number;\n\n /**\n * Top-k changes how the model selects tokens for output.\n *\n * A top-k of 1 means the selected token is the most probable among\n * all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-k of 3 means that the next token is selected from\n * among the 3 most probable tokens (using temperature).\n *\n * Note: The default value varies by model\n */\n topK?: number;\n\n /**\n * The set of character sequences (up to 5) that will stop output generation.\n * If specified, the API will stop at the first appearance of a stop\n * sequence.\n *\n * Note: The stop sequence will not be included as part of the response.\n * Note: stopSequences is only supported for Gemini models\n */\n stopSequences?: string[];\n\n /**\n * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block\n * any prompts and responses that fail to meet the thresholds set by these settings. If there\n * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use\n * the default safety setting for that category.\n */\n safetySettings?: SafetySetting[];\n\n /**\n * Google API key to use\n */\n apiKey?: string;\n\n /**\n * Google API version to use\n */\n apiVersion?: string;\n\n /**\n * Google API base URL to use\n */\n baseUrl?: string;\n\n /** Whether to stream the results or not */\n streaming?: boolean;\n\n /**\n * Whether or not to force the model to respond with JSON.\n * Available for `gemini-1.5` models and later.\n * @default false\n */\n json?: boolean;\n\n /**\n * Whether or not model supports system instructions.\n * The following models support system instructions:\n * - All Gemini 1.5 Pro model versions\n * - All Gemini 1.5 Flash model versions\n * - Gemini 1.0 Pro version gemini-1.0-pro-002\n */\n convertSystemMessageToHumanContent?: boolean | undefined;\n\n /**\n * Optional. Config for thinking features. An error will be returned if this\n * field is set for models that don't support thinking.\n */\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n}\n\n/**\n * Google Generative AI chat model integration.\n *\n * Setup:\n * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`.\n *\n * ```bash\n * npm install @langchain/google-genai\n * export GOOGLE_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * stop: [\"\\n\"],\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatGoogleGenerativeAI } from '@langchain/google-genai';\n *\n * const llm = new ChatGoogleGenerativeAI({\n * model: \"gemini-1.5-flash\",\n * temperature: 0,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"response_metadata\": {\n * \"finishReason\": \"STOP\",\n * \"index\": 0,\n * \"safetyRatings\": [\n * {\n * \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HARASSMENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * }\n * ]\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 149,\n * \"total_tokens\": 159\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There\",\n * \"response_metadata\": {\n * \"index\": 0\n * }\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 1,\n * \"total_tokens\": 11\n * }\n * }\n * AIMessageChunk {\n * \"content\": \" are a few ways to translate \\\"I love programming\\\" into French, depending on\",\n * }\n * AIMessageChunk {\n * \"content\": \" the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n\",\n * }\n * AIMessageChunk {\n * \"content\": \"* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This\",\n * }\n * AIMessageChunk {\n * \"content\": \" is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More\",\n * }\n * AIMessageChunk {\n * \"content\": \" specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and\",\n * }\n * AIMessageChunk {\n * \"content\": \" your intended audience. \\n\",\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 277,\n * \"total_tokens\": 287\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don\\\\'t cats play poker?\",\n * punchline: \"Why don\\\\'t cats play poker? Because they always have an ace up their sleeve!\"\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 10, output_tokens: 149, total_tokens: 159 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * finishReason: 'STOP',\n * index: 0,\n * safetyRatings: [\n * {\n * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n * probability: 'NEGLIGIBLE'\n * },\n * {\n * category: 'HARM_CATEGORY_HATE_SPEECH',\n * probability: 'NEGLIGIBLE'\n * },\n * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },\n * {\n * category: 'HARM_CATEGORY_DANGEROUS_CONTENT',\n * probability: 'NEGLIGIBLE'\n * }\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Document Messages</strong></summary>\n *\n * This example will show you how to pass documents such as PDFs to Google\n * Generative AI through messages.\n *\n * ```typescript\n * const pdfPath = \"/Users/my_user/Downloads/invoice.pdf\";\n * const pdfBase64 = await fs.readFile(pdfPath, \"base64\");\n *\n * const response = await llm.invoke([\n * [\"system\", \"Use the provided documents to answer the question\"],\n * [\n * \"user\",\n * [\n * {\n * type: \"application/pdf\", // If the `type` field includes a single slash (`/`), it will be treated as inline data.\n * data: pdfBase64,\n * },\n * {\n * type: \"text\",\n * text: \"Summarize the contents of this PDF\",\n * },\n * ],\n * ],\n * ]);\n *\n * console.log(response.content);\n * ```\n *\n * ```txt\n * This is a billing invoice from Twitter Developers for X API Basic Access. The transaction date is January 7, 2025,\n * and the amount is $194.34, which has been paid. The subscription period is from January 7, 2025 21:02 to February 7, 2025 00:00 (UTC).\n * The tax is $0.00, with a tax rate of 0%. The total amount is $194.34. The payment was made using a Visa card ending in 7022,\n * expiring in 12/2026. The billing address is Brace Sproul, 1234 Main Street, San Francisco, CA, US 94103. The company being billed is\n * X Corp, located at 865 FM 1209 Building 2, Bastrop, TX, US 78602. Terms and conditions apply.\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatGoogleGenerativeAI\n extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk>\n implements GoogleGenerativeAIChatInput\n{\n static lc_name() {\n return \"ChatGoogleGenerativeAI\";\n }\n\n lc_serializable = true;\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"GOOGLE_API_KEY\",\n };\n }\n\n lc_namespace = [\"langchain\", \"chat_models\", \"google_genai\"];\n\n get lc_aliases() {\n return {\n apiKey: \"google_api_key\",\n };\n }\n\n model: string;\n\n temperature?: number; // default value chosen based on model\n\n maxOutputTokens?: number;\n\n topP?: number; // default value chosen based on model\n\n topK?: number; // default value chosen based on model\n\n stopSequences: string[] = [];\n\n safetySettings?: SafetySetting[];\n\n apiKey?: string;\n\n streaming = false;\n\n json?: boolean;\n\n streamUsage = true;\n\n convertSystemMessageToHumanContent: boolean | undefined;\n\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n\n private client: GenerativeModel;\n\n get _isMultimodalModel() {\n return (\n this.model.includes(\"vision\") ||\n this.model.startsWith(\"gemini-1.5\") ||\n this.model.startsWith(\"gemini-2\") ||\n (this.model.startsWith(\"gemma-3-\") &&\n !this.model.startsWith(\"gemma-3-1b\")) || // gemma-3 models are multimodal(but gemma-3n-* and gemma-3-1b are not)\n this.model.startsWith(\"gemini-3\")\n );\n }\n\n constructor(fields: GoogleGenerativeAIChatInput) {\n super(fields);\n\n this.model = fields.model.replace(/^models\\//, \"\");\n\n this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;\n\n if (this.maxOutputTokens && this.maxOutputTokens < 0) {\n throw new Error(\"`maxOutputTokens` must be a positive integer\");\n }\n\n this.temperature = fields.temperature ?? this.temperature;\n if (this.temperature && (this.temperature < 0 || this.temperature > 2)) {\n throw new Error(\"`temperature` must be in the range of [0.0,2.0]\");\n }\n\n this.topP = fields.topP ?? this.topP;\n if (this.topP && this.topP < 0) {\n throw new Error(\"`topP` must be a positive integer\");\n }\n\n if (this.topP && this.topP > 1) {\n throw new Error(\"`topP` must be below 1.\");\n }\n\n this.topK = fields.topK ?? this.topK;\n if (this.topK && this.topK < 0) {\n throw new Error(\"`topK` must be a positive integer\");\n }\n\n this.stopSequences = fields.stopSequences ?? this.stopSequences;\n\n this.apiKey = fields.apiKey ?? getEnvironmentVariable(\"GOOGLE_API_KEY\");\n if (!this.apiKey) {\n throw new Error(\n \"Please set an API key for Google GenerativeAI \" +\n \"in the environment variable GOOGLE_API_KEY \" +\n \"or in the `apiKey` field of the \" +\n \"ChatGoogleGenerativeAI constructor\"\n );\n }\n\n this.safetySettings = fields.safetySettings ?? this.safetySettings;\n if (this.safetySettings && this.safetySettings.length > 0) {\n const safetySettingsSet = new Set(\n this.safetySettings.map((s) => s.category)\n );\n if (safetySettingsSet.size !== this.safetySettings.length) {\n throw new Error(\n \"The categories in `safetySettings` array must be unique\"\n );\n }\n }\n\n this.streaming = fields.streaming ?? this.streaming;\n this.json = fields.json;\n\n this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;\n\n this.client = new GenerativeAI(this.apiKey).getGenerativeModel(\n {\n model: this.model,\n safetySettings: this.safetySettings as SafetySetting[],\n generationConfig: {\n stopSequences: this.stopSequences,\n maxOutputTokens: this.maxOutputTokens,\n temperature: this.temperature,\n topP: this.topP,\n topK: this.topK,\n ...(this.json ? { responseMimeType: \"application/json\" } : {}),\n ...(this.thinkingConfig\n ? { thinkingConfig: this.thinkingConfig }\n : {}),\n },\n },\n {\n apiVersion: fields.apiVersion,\n baseUrl: fields.baseUrl,\n }\n );\n this.streamUsage = fields.streamUsage ?? this.streamUsage;\n }\n\n useCachedContent(\n cachedContent: CachedContent,\n modelParams?: ModelParams,\n requestOptions?: RequestOptions\n ): void {\n if (!this.apiKey) return;\n this.client = new GenerativeAI(\n this.apiKey\n ).getGenerativeModelFromCachedContent(\n cachedContent,\n modelParams,\n requestOptions\n );\n }\n\n get useSystemInstruction(): boolean {\n return typeof this.convertSystemMessageToHumanContent === \"boolean\"\n ? !this.convertSystemMessageToHumanContent\n : this.computeUseSystemInstruction;\n }\n\n get computeUseSystemInstruction(): boolean {\n // This works on models from April 2024 and later\n // Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later\n // AI Studio: gemini-1.5-pro-latest\n if (this.model === \"gemini-1.0-pro-001\") {\n return false;\n } else if (this.model.startsWith(\"gemini-pro-vision\")) {\n return false;\n } else if (this.model.startsWith(\"gemini-1.0-pro-vision\")) {\n return false;\n } else if (this.model === \"gemini-pro\") {\n // on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001\n return false;\n }\n return true;\n }\n\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams {\n return {\n ls_provider: \"google_genai\",\n ls_model_name: this.model,\n ls_model_type: \"chat\",\n ls_temperature: this.client.generationConfig.temperature,\n ls_max_tokens: this.client.generationConfig.maxOutputTokens,\n ls_stop: options.stop,\n };\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n _llmType() {\n return \"googlegenerativeai\";\n }\n\n override bindTools(\n tools: GoogleGenerativeAIToolType[],\n kwargs?: Partial<GoogleGenerativeAIChatCallOptions>\n ): Runnable<\n BaseLanguageModelInput,\n AIMessageChunk,\n GoogleGenerativeAIChatCallOptions\n > {\n return this.withConfig({\n tools: convertToolsToGenAI(tools)?.tools,\n ...kwargs,\n });\n }\n\n invocationParams(\n options?: this[\"ParsedCallOptions\"]\n ): Omit<GenerateContentRequest, \"contents\"> {\n const toolsAndConfig = options?.tools?.length\n ? convertToolsToGenAI(options.tools, {\n toolChoice: options.tool_choice,\n allowedFunctionNames: options.allowedFunctionNames,\n })\n : undefined;\n\n if (options?.responseSchema) {\n this.client.generationConfig.responseSchema = options.responseSchema;\n this.client.generationConfig.responseMimeType = \"application/json\";\n } else {\n this.client.generationConfig.responseSchema = undefined;\n this.client.generationConfig.responseMimeType = this.json\n ? \"application/json\"\n : undefined;\n }\n\n return {\n ...(toolsAndConfig?.tools ? { tools: toolsAndConfig.tools } : {}),\n ...(toolsAndConfig?.toolConfig\n ? { toolConfig: toolsAndConfig.toolConfig }\n : {}),\n };\n }\n\n async _generate(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt[0].role === \"system\") {\n const [systemInstruction] = prompt;\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n\n // Handle streaming\n if (this.streaming) {\n const tokenUsage: TokenUsage = {};\n const stream = this._streamResponseChunks(messages, options, runManager);\n const finalChunks: Record<number, ChatGenerationChunk> = {};\n\n for await (const chunk of stream) {\n const index =\n (chunk.generationInfo as NewTokenIndices)?.completion ?? 0;\n if (finalChunks[index] === undefined) {\n finalChunks[index] = chunk;\n } else {\n finalChunks[index] = finalChunks[index].concat(chunk);\n }\n }\n const generations = Object.entries(finalChunks)\n .sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))\n .map(([_, value]) => value);\n\n return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };\n }\n\n const res = await this.completionWithRetry({\n ...parameters,\n contents: actualPrompt,\n });\n\n let usageMetadata: UsageMetadata | undefined;\n if (\"usageMetadata\" in res.response) {\n usageMetadata = convertUsageMetadata(\n res.response.usageMetadata,\n this.model\n );\n }\n\n const generationResult = mapGenerateContentResultToChatResult(\n res.response,\n {\n usageMetadata,\n }\n );\n // may not have generations in output if there was a refusal for safety reasons, malformed function call, etc.\n if (generationResult.generations?.length > 0) {\n await runManager?.handleLLMNewToken(\n generationResult.generations[0]?.text ?? \"\"\n );\n }\n return generationResult;\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt[0].role === \"system\") {\n const [systemInstruction] = prompt;\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n const request = {\n ...parameters,\n contents: actualPrompt,\n };\n const stream = await this.caller.callWithOptions(\n { signal: options?.signal },\n async () => {\n const { stream } = await this.client.generateContentStream(request);\n return stream;\n }\n );\n\n let usageMetadata: UsageMetadata | undefined;\n // Keep prior cumulative counts for calculating token deltas while streaming\n let prevPromptTokenCount = 0;\n let prevCandidatesTokenCount = 0;\n let prevTotalTokenCount = 0;\n let index = 0;\n for await (const response of stream) {\n if (\n \"usageMetadata\" in response &&\n response.usageMetadata !== undefined &&\n this.streamUsage !== false &&\n options.streamUsage !== false\n ) {\n usageMetadata = convertUsageMetadata(\n response.usageMetadata,\n this.model\n );\n\n // Under the hood, LangChain combines the prompt tokens. Google returns the updated\n // total each time, so we need to find the difference between the tokens.\n const newPromptTokenCount =\n response.usageMetadata.promptTokenCount ?? 0;\n usageMetadata.input_tokens = Math.max(\n 0,\n newPromptTokenCount - prevPromptTokenCount\n );\n prevPromptTokenCount = newPromptTokenCount;\n\n const newCandidatesTokenCount =\n response.usageMetadata.candidatesTokenCount ?? 0;\n usageMetadata.output_tokens = Math.max(\n 0,\n newCandidatesTokenCount - prevCandidatesTokenCount\n );\n prevCandidatesTokenCount = newCandidatesTokenCount;\n\n const newTotalTokenCount = response.usageMetadata.totalTokenCount ?? 0;\n usageMetadata.total_tokens = Math.max(\n 0,\n newTotalTokenCount - prevTotalTokenCount\n );\n prevTotalTokenCount = newTotalTokenCount;\n }\n\n const chunk = convertResponseContentToChatGenerationChunk(response, {\n usageMetadata,\n index,\n });\n index += 1;\n if (!chunk) {\n continue;\n }\n\n yield chunk;\n await runManager?.handleLLMNewToken(chunk.text ?? \"\");\n }\n }\n\n async completionWithRetry(\n request: string | GenerateContentRequest | (string | GenerativeAIPart)[],\n options?: this[\"ParsedCallOptions\"]\n ) {\n return this.caller.callWithOptions(\n { signal: options?.signal },\n async () => {\n try {\n return await this.client.generateContent(request);\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n // TODO: Improve error handling\n if (e.message?.includes(\"400 Bad Request\")) {\n e.status = 400;\n }\n throw e;\n }\n }\n );\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-flash\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 2000000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const schema: InteropZodType<RunOutput> | Record<string, any> =\n outputSchema;\n const name = config?.name;\n const method = config?.method;\n const includeRaw = config?.includeRaw;\n if (method === \"jsonMode\") {\n throw new Error(\n `ChatGoogleGenerativeAI only supports \"jsonSchema\" or \"functionCalling\" as a method.`\n );\n }\n\n let llm;\n let outputParser: BaseLLMOutputParser<RunOutput>;\n if (method === \"functionCalling\") {\n let functionName = name ?? \"extract\";\n let tools: GoogleGenerativeAIFunctionDeclarationsTool[];\n if (isInteropZodSchema(schema)) {\n const jsonSchema = schemaToGenerativeAIParameters(schema);\n tools = [\n {\n functionDeclarations: [\n {\n name: functionName,\n description:\n jsonSchema.description ?? \"A function available to call.\",\n parameters: jsonSchema as GenerativeAIFunctionDeclarationSchema,\n },\n ],\n },\n ];\n outputParser = new GoogleGenerativeAIToolsOutputParser<\n InferInteropZodOutput<typeof schema>\n >({\n returnSingle: true,\n keyName: functionName,\n zodSchema: schema,\n });\n } else {\n let geminiFunctionDefinition: GenerativeAIFunctionDeclaration;\n if (\n typeof schema.name === \"string\" &&\n typeof schema.parameters === \"object\" &&\n schema.parameters != null\n ) {\n geminiFunctionDefinition = schema as GenerativeAIFunctionDeclaration;\n geminiFunctionDefinition.parameters = removeAdditionalProperties(\n schema.parameters\n ) as GenerativeAIFunctionDeclarationSchema;\n functionName = schema.name;\n } else {\n geminiFunctionDefinition = {\n name: functionName,\n description: schema.description ?? \"\",\n parameters: removeAdditionalProperties(\n schema\n ) as GenerativeAIFunctionDeclarationSchema,\n };\n }\n tools = [\n {\n functionDeclarations: [geminiFunctionDefinition],\n },\n ];\n outputParser = new GoogleGenerativeAIToolsOutputParser<RunOutput>({\n returnSingle: true,\n keyName: functionName,\n });\n }\n llm = this.bindTools(tools).withConfig({\n allowedFunctionNames: [functionName],\n });\n } else {\n const jsonSchema = schemaToGenerativeAIParameters(schema);\n llm = this.withConfig({\n responseSchema: jsonSchema as Schema,\n });\n outputParser = new JsonOutputParser();\n }\n\n if (!includeRaw) {\n return llm.pipe(outputParser).withConfig({\n runName: \"ChatGoogleGenerativeAIStructuredOutput\",\n }) as Runnable<BaseLanguageModelInput, RunOutput>;\n }\n\n const parserAssign = RunnablePassthrough.assign({\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n parsed: (input: any, config) => outputParser.invoke(input.raw, config),\n });\n const parserNone = RunnablePassthrough.assign({\n parsed: () => null,\n });\n const parsedWithFallback = parserAssign.withFallbacks({\n fallbacks: [parserNone],\n });\n return RunnableSequence.from<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n >([\n {\n raw: llm,\n },\n parsedWithFallback,\n ]).withConfig({\n runName: \"StructuredOutputRunnable\",\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA4kBA,IAAa,yBAAb,cACUA,2DAEV;CACE,OAAO,UAAU;AACf,SAAO;CACR;CAED,kBAAkB;CAElB,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,iBACT;CACF;CAED,eAAe;EAAC;EAAa;EAAe;CAAe;CAE3D,IAAI,aAAa;AACf,SAAO,EACL,QAAQ,iBACT;CACF;CAED;CAEA;CAEA;CAEA;CAEA;CAEA,gBAA0B,CAAE;CAE5B;CAEA;CAEA,YAAY;CAEZ;CAEA,cAAc;CAEd;CAEA;CAEA,AAAQ;CAER,IAAI,qBAAqB;AACvB,SACE,KAAK,MAAM,SAAS,SAAS,IAC7B,KAAK,MAAM,WAAW,aAAa,IACnC,KAAK,MAAM,WAAW,WAAW,IAChC,KAAK,MAAM,WAAW,WAAW,IAChC,CAAC,KAAK,MAAM,WAAW,aAAa,IACtC,KAAK,MAAM,WAAW,WAAW;CAEpC;CAED,YAAYC,QAAqC;EAC/C,MAAM,OAAO;EAEb,KAAK,QAAQ,OAAO,MAAM,QAAQ,aAAa,GAAG;EAElD,KAAK,kBAAkB,OAAO,mBAAmB,KAAK;AAEtD,MAAI,KAAK,mBAAmB,KAAK,kBAAkB,EACjD,OAAM,IAAI,MAAM;EAGlB,KAAK,cAAc,OAAO,eAAe,KAAK;AAC9C,MAAI,KAAK,gBAAgB,KAAK,cAAc,KAAK,KAAK,cAAc,GAClE,OAAM,IAAI,MAAM;EAGlB,KAAK,OAAO,OAAO,QAAQ,KAAK;AAChC,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;AAGlB,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;EAGlB,KAAK,OAAO,OAAO,QAAQ,KAAK;AAChC,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;EAGlB,KAAK,gBAAgB,OAAO,iBAAiB,KAAK;EAElD,KAAK,SAAS,OAAO,iEAAiC,iBAAiB;AACvE,MAAI,CAAC,KAAK,OACR,OAAM,IAAI,MACR;EAOJ,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;AACpD,MAAI,KAAK,kBAAkB,KAAK,eAAe,SAAS,GAAG;GACzD,MAAM,oBAAoB,IAAI,IAC5B,KAAK,eAAe,IAAI,CAAC,MAAM,EAAE,SAAS;AAE5C,OAAI,kBAAkB,SAAS,KAAK,eAAe,OACjD,OAAM,IAAI,MACR;EAGL;EAED,KAAK,YAAY,OAAO,aAAa,KAAK;EAC1C,KAAK,OAAO,OAAO;EAEnB,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;EAEpD,KAAK,SAAS,IAAIC,0CAAa,KAAK,QAAQ,mBAC1C;GACE,OAAO,KAAK;GACZ,gBAAgB,KAAK;GACrB,kBAAkB;IAChB,eAAe,KAAK;IACpB,iBAAiB,KAAK;IACtB,aAAa,KAAK;IAClB,MAAM,KAAK;IACX,MAAM,KAAK;IACX,GAAI,KAAK,OAAO,EAAE,kBAAkB,mBAAoB,IAAG,CAAE;IAC7D,GAAI,KAAK,iBACL,EAAE,gBAAgB,KAAK,eAAgB,IACvC,CAAE;GACP;EACF,GACD;GACE,YAAY,OAAO;GACnB,SAAS,OAAO;EACjB,EACF;EACD,KAAK,cAAc,OAAO,eAAe,KAAK;CAC/C;CAED,iBACEC,eACAC,aACAC,gBACM;AACN,MAAI,CAAC,KAAK,OAAQ;EAClB,KAAK,SAAS,IAAIH,0CAChB,KAAK,QACL,oCACA,eACA,aACA,eACD;CACF;CAED,IAAI,uBAAgC;AAClC,SAAO,OAAO,KAAK,uCAAuC,YACtD,CAAC,KAAK,qCACN,KAAK;CACV;CAED,IAAI,8BAAuC;AAIzC,MAAI,KAAK,UAAU,qBACjB,QAAO;WACE,KAAK,MAAM,WAAW,oBAAoB,CACnD,QAAO;WACE,KAAK,MAAM,WAAW,wBAAwB,CACvD,QAAO;WACE,KAAK,UAAU,aAExB,QAAO;AAET,SAAO;CACR;CAED,YAAYI,SAAqD;AAC/D,SAAO;GACL,aAAa;GACb,eAAe,KAAK;GACpB,eAAe;GACf,gBAAgB,KAAK,OAAO,iBAAiB;GAC7C,eAAe,KAAK,OAAO,iBAAiB;GAC5C,SAAS,QAAQ;EAClB;CACF;CAED,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,WAAW;AACT,SAAO;CACR;CAED,AAAS,UACPC,OACAC,QAKA;AACA,SAAO,KAAK,WAAW;GACrB,OAAOC,kCAAoB,MAAM,EAAE;GACnC,GAAG;EACJ,EAAC;CACH;CAED,iBACEC,SAC0C;EAC1C,MAAM,iBAAiB,SAAS,OAAO,SACnCD,kCAAoB,QAAQ,OAAO;GACjC,YAAY,QAAQ;GACpB,sBAAsB,QAAQ;EAC/B,EAAC,GACF;AAEJ,MAAI,SAAS,gBAAgB;GAC3B,KAAK,OAAO,iBAAiB,iBAAiB,QAAQ;GACtD,KAAK,OAAO,iBAAiB,mBAAmB;EACjD,OAAM;GACL,KAAK,OAAO,iBAAiB,iBAAiB;GAC9C,KAAK,OAAO,iBAAiB,mBAAmB,KAAK,OACjD,qBACA;EACL;AAED,SAAO;GACL,GAAI,gBAAgB,QAAQ,EAAE,OAAO,eAAe,MAAO,IAAG,CAAE;GAChE,GAAI,gBAAgB,aAChB,EAAE,YAAY,eAAe,WAAY,IACzC,CAAE;EACP;CACF;CAED,MAAM,UACJE,UACAL,SACAM,YACqB;EACrB,MAAM,SAASC,4CACb,UACA,KAAK,oBACL,KAAK,sBACL,KAAK,MACN;EACD,IAAI,eAAe;AACnB,MAAI,OAAO,GAAG,SAAS,UAAU;GAC/B,MAAM,CAAC,kBAAkB,GAAG;GAC5B,KAAK,OAAO,oBAAoB;GAChC,eAAe,OAAO,MAAM,EAAE;EAC/B;EACD,MAAM,aAAa,KAAK,iBAAiB,QAAQ;AAGjD,MAAI,KAAK,WAAW;GAClB,MAAMC,aAAyB,CAAE;GACjC,MAAM,SAAS,KAAK,sBAAsB,UAAU,SAAS,WAAW;GACxE,MAAMC,cAAmD,CAAE;AAE3D,cAAW,MAAM,SAAS,QAAQ;IAChC,MAAM,QACH,MAAM,gBAAoC,cAAc;AAC3D,QAAI,YAAY,WAAW,QACzB,YAAY,SAAS;SAErB,YAAY,SAAS,YAAY,OAAO,OAAO,MAAM;GAExD;GACD,MAAM,cAAc,OAAO,QAAQ,YAAY,CAC5C,KAAK,CAAC,CAAC,KAAK,EAAE,CAAC,KAAK,KAAK,SAAS,MAAM,GAAG,GAAG,SAAS,MAAM,GAAG,CAAC,CACjE,IAAI,CAAC,CAAC,GAAG,MAAM,KAAK,MAAM;AAE7B,UAAO;IAAE;IAAa,WAAW,EAAE,qBAAqB,WAAY;GAAE;EACvE;EAED,MAAM,MAAM,MAAM,KAAK,oBAAoB;GACzC,GAAG;GACH,UAAU;EACX,EAAC;EAEF,IAAIC;AACJ,MAAI,mBAAmB,IAAI,UACzB,gBAAgBC,oCACd,IAAI,SAAS,eACb,KAAK,MACN;EAGH,MAAM,mBAAmBC,oDACvB,IAAI,UACJ,EACE,cACD,EACF;AAED,MAAI,iBAAiB,aAAa,SAAS,GACzC,MAAM,YAAY,kBAChB,iBAAiB,YAAY,IAAI,QAAQ,GAC1C;AAEH,SAAO;CACR;CAED,OAAO,sBACLP,UACAL,SACAM,YACqC;EACrC,MAAM,SAASC,4CACb,UACA,KAAK,oBACL,KAAK,sBACL,KAAK,MACN;EACD,IAAI,eAAe;AACnB,MAAI,OAAO,GAAG,SAAS,UAAU;GAC/B,MAAM,CAAC,kBAAkB,GAAG;GAC5B,KAAK,OAAO,oBAAoB;GAChC,eAAe,OAAO,MAAM,EAAE;EAC/B;EACD,MAAM,aAAa,KAAK,iBAAiB,QAAQ;EACjD,MAAM,UAAU;GACd,GAAG;GACH,UAAU;EACX;EACD,MAAM,SAAS,MAAM,KAAK,OAAO,gBAC/B,EAAE,QAAQ,SAAS,OAAQ,GAC3B,YAAY;GACV,MAAM,EAAE,kBAAQ,GAAG,MAAM,KAAK,OAAO,sBAAsB,QAAQ;AACnE,UAAOM;EACR,EACF;EAED,IAAIH;EAEJ,IAAI,uBAAuB;EAC3B,IAAI,2BAA2B;EAC/B,IAAI,sBAAsB;EAC1B,IAAI,QAAQ;AACZ,aAAW,MAAM,YAAY,QAAQ;AACnC,OACE,mBAAmB,YACnB,SAAS,kBAAkB,UAC3B,KAAK,gBAAgB,SACrB,QAAQ,gBAAgB,OACxB;IACA,gBAAgBC,oCACd,SAAS,eACT,KAAK,MACN;IAID,MAAM,sBACJ,SAAS,cAAc,oBAAoB;IAC7C,cAAc,eAAe,KAAK,IAChC,GACA,sBAAsB,qBACvB;IACD,uBAAuB;IAEvB,MAAM,0BACJ,SAAS,cAAc,wBAAwB;IACjD,cAAc,gBAAgB,KAAK,IACjC,GACA,0BAA0B,yBAC3B;IACD,2BAA2B;IAE3B,MAAM,qBAAqB,SAAS,cAAc,mBAAmB;IACrE,cAAc,eAAe,KAAK,IAChC,GACA,qBAAqB,oBACtB;IACD,sBAAsB;GACvB;GAED,MAAM,QAAQG,2DAA4C,UAAU;IAClE;IACA;GACD,EAAC;GACF,SAAS;AACT,OAAI,CAAC,MACH;GAGF,MAAM;GACN,MAAM,YAAY,kBAAkB,MAAM,QAAQ,GAAG;EACtD;CACF;CAED,MAAM,oBACJC,SACAX,SACA;AACA,SAAO,KAAK,OAAO,gBACjB,EAAE,QAAQ,SAAS,OAAQ,GAC3B,YAAY;AACV,OAAI;AACF,WAAO,MAAM,KAAK,OAAO,gBAAgB,QAAQ;GAElD,SAAQY,GAAQ;AAEf,QAAI,EAAE,SAAS,SAAS,kBAAkB,EACxC,EAAE,SAAS;AAEb,UAAM;GACP;EACF,EACF;CACF;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,yBAAS,KAAK,UAAU,CAAE;CAClC;CAwBD,qBAIEC,cAIAC,QAMI;EAEJ,MAAMC,SACJ;EACF,MAAM,OAAO,QAAQ;EACrB,MAAM,SAAS,QAAQ;EACvB,MAAM,aAAa,QAAQ;AAC3B,MAAI,WAAW,WACb,OAAM,IAAI,MACR,CAAC,mFAAmF,CAAC;EAIzF,IAAI;EACJ,IAAIC;AACJ,MAAI,WAAW,mBAAmB;GAChC,IAAI,eAAe,QAAQ;GAC3B,IAAIC;AACJ,4DAAuB,OAAO,EAAE;IAC9B,MAAM,aAAaC,+DAA+B,OAAO;IACzD,QAAQ,CACN,EACE,sBAAsB,CACpB;KACE,MAAM;KACN,aACE,WAAW,eAAe;KAC5B,YAAY;IACb,CACF,EACF,CACF;IACD,eAAe,IAAIC,2DAEjB;KACA,cAAc;KACd,SAAS;KACT,WAAW;IACZ;GACF,OAAM;IACL,IAAIC;AACJ,QACE,OAAO,OAAO,SAAS,YACvB,OAAO,OAAO,eAAe,YAC7B,OAAO,cAAc,MACrB;KACA,2BAA2B;KAC3B,yBAAyB,aAAaC,2DACpC,OAAO,WACR;KACD,eAAe,OAAO;IACvB,OACC,2BAA2B;KACzB,MAAM;KACN,aAAa,OAAO,eAAe;KACnC,YAAYA,2DACV,OACD;IACF;IAEH,QAAQ,CACN,EACE,sBAAsB,CAAC,wBAAyB,EACjD,CACF;IACD,eAAe,IAAIF,2DAA+C;KAChE,cAAc;KACd,SAAS;IACV;GACF;GACD,MAAM,KAAK,UAAU,MAAM,CAAC,WAAW,EACrC,sBAAsB,CAAC,YAAa,EACrC,EAAC;EACH,OAAM;GACL,MAAM,aAAaD,+DAA+B,OAAO;GACzD,MAAM,KAAK,WAAW,EACpB,gBAAgB,WACjB,EAAC;GACF,eAAe,IAAII;EACpB;AAED,MAAI,CAAC,WACH,QAAO,IAAI,KAAK,aAAa,CAAC,WAAW,EACvC,SAAS,yCACV,EAAC;EAGJ,MAAM,eAAeC,+CAAoB,OAAO,EAE9C,QAAQ,CAACC,OAAYC,aAAW,aAAa,OAAO,MAAM,KAAKA,SAAO,CACvE,EAAC;EACF,MAAM,aAAaF,+CAAoB,OAAO,EAC5C,QAAQ,MAAM,KACf,EAAC;EACF,MAAM,qBAAqB,aAAa,cAAc,EACpD,WAAW,CAAC,UAAW,EACxB,EAAC;AACF,SAAOG,4CAAiB,KAGtB,CACA,EACE,KAAK,IACN,GACD,kBACD,EAAC,CAAC,WAAW,EACZ,SAAS,2BACV,EAAC;CACH;AACF"}
@@ -1,4 +1,4 @@
1
- import { GoogleGenerativeAIToolType } from "./types.cjs";
1
+ import { GoogleGenerativeAIThinkingConfig, GoogleGenerativeAIToolType } from "./types.cjs";
2
2
  import * as _google_generative_ai0 from "@google/generative-ai";
3
3
  import { CachedContent, GenerateContentRequest, ModelParams, Part, RequestOptions, SafetySetting, Schema } from "@google/generative-ai";
4
4
  import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
@@ -126,6 +126,11 @@ interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGe
126
126
  * - Gemini 1.0 Pro version gemini-1.0-pro-002
127
127
  */
128
128
  convertSystemMessageToHumanContent?: boolean | undefined;
129
+ /**
130
+ * Optional. Config for thinking features. An error will be returned if this
131
+ * field is set for models that don't support thinking.
132
+ */
133
+ thinkingConfig?: GoogleGenerativeAIThinkingConfig;
129
134
  }
130
135
  /**
131
136
  * Google Generative AI chat model integration.
@@ -524,6 +529,7 @@ declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAICha
524
529
  json?: boolean;
525
530
  streamUsage: boolean;
526
531
  convertSystemMessageToHumanContent: boolean | undefined;
532
+ thinkingConfig?: GoogleGenerativeAIThinkingConfig;
527
533
  private client;
528
534
  get _isMultimodalModel(): boolean;
529
535
  constructor(fields: GoogleGenerativeAIChatInput);
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.d.cts","names":["GenerateContentRequest","SafetySetting","Part","GenerativeAIPart","ModelParams","RequestOptions","CachedContent","Schema","CallbackManagerForLLMRun","AIMessageChunk","BaseMessage","ChatGenerationChunk","ChatResult","BaseChatModel","BaseChatModelCallOptions","LangSmithParams","BaseChatModelParams","ModelProfile","BaseLanguageModelInput","StructuredOutputMethodOptions","Runnable","InteropZodType","GoogleGenerativeAIToolType","BaseMessageExamplePair","GoogleGenerativeAIChatCallOptions","GoogleGenerativeAIChatInput","Pick","ChatGoogleGenerativeAI","Partial","Omit","Promise","AsyncGenerator","_google_generative_ai0","GenerateContentResult","Record","RunOutput"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { GenerateContentRequest, SafetySetting, Part as GenerativeAIPart, ModelParams, RequestOptions, type CachedContent, Schema } from \"@google/generative-ai\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { AIMessageChunk, BaseMessage } from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, ChatResult } from \"@langchain/core/outputs\";\nimport { BaseChatModel, type BaseChatModelCallOptions, type LangSmithParams, type BaseChatModelParams } from \"@langchain/core/language_models/chat_models\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { GoogleGenerativeAIToolType } from \"./types.js\";\nexport type BaseMessageExamplePair = {\n input: BaseMessage;\n output: BaseMessage;\n};\nexport interface GoogleGenerativeAIChatCallOptions extends BaseChatModelCallOptions {\n tools?: GoogleGenerativeAIToolType[];\n /**\n * Allowed functions to call when the mode is \"any\".\n * If empty, any one of the provided functions are called.\n */\n allowedFunctionNames?: string[];\n /**\n * Whether or not to include usage data, like token counts\n * in the streamed response chunks.\n * @default true\n */\n streamUsage?: boolean;\n /**\n * JSON schema to be returned by the model.\n */\n responseSchema?: Schema;\n}\n/**\n * An interface defining the input to the ChatGoogleGenerativeAI class.\n */\nexport interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, \"streamUsage\"> {\n /**\n * Model Name to use\n *\n * Note: The format must follow the pattern - `{model}`\n */\n model: string;\n /**\n * Controls the randomness of the output.\n *\n * Values can range from [0.0,2.0], inclusive. A value closer to 2.0\n * will produce responses that are more varied and creative, while\n * a value closer to 0.0 will typically result in less surprising\n * responses from the model.\n *\n * Note: The default value varies by model\n */\n temperature?: number;\n /**\n * Maximum number of tokens to generate in the completion.\n */\n maxOutputTokens?: number;\n /**\n * Top-p changes how the model selects tokens for output.\n *\n * Tokens are selected from most probable to least until the sum\n * of their probabilities equals the top-p value.\n *\n * For example, if tokens A, B, and C have a probability of\n * .3, .2, and .1 and the top-p value is .5, then the model will\n * select either A or B as the next token (using temperature).\n *\n * Note: The default value varies by model\n */\n topP?: number;\n /**\n * Top-k changes how the model selects tokens for output.\n *\n * A top-k of 1 means the selected token is the most probable among\n * all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-k of 3 means that the next token is selected from\n * among the 3 most probable tokens (using temperature).\n *\n * Note: The default value varies by model\n */\n topK?: number;\n /**\n * The set of character sequences (up to 5) that will stop output generation.\n * If specified, the API will stop at the first appearance of a stop\n * sequence.\n *\n * Note: The stop sequence will not be included as part of the response.\n * Note: stopSequences is only supported for Gemini models\n */\n stopSequences?: string[];\n /**\n * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block\n * any prompts and responses that fail to meet the thresholds set by these settings. If there\n * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use\n * the default safety setting for that category.\n */\n safetySettings?: SafetySetting[];\n /**\n * Google API key to use\n */\n apiKey?: string;\n /**\n * Google API version to use\n */\n apiVersion?: string;\n /**\n * Google API base URL to use\n */\n baseUrl?: string;\n /** Whether to stream the results or not */\n streaming?: boolean;\n /**\n * Whether or not to force the model to respond with JSON.\n * Available for `gemini-1.5` models and later.\n * @default false\n */\n json?: boolean;\n /**\n * Whether or not model supports system instructions.\n * The following models support system instructions:\n * - All Gemini 1.5 Pro model versions\n * - All Gemini 1.5 Flash model versions\n * - Gemini 1.0 Pro version gemini-1.0-pro-002\n */\n convertSystemMessageToHumanContent?: boolean | undefined;\n}\n/**\n * Google Generative AI chat model integration.\n *\n * Setup:\n * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`.\n *\n * ```bash\n * npm install @langchain/google-genai\n * export GOOGLE_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * stop: [\"\\n\"],\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatGoogleGenerativeAI } from '@langchain/google-genai';\n *\n * const llm = new ChatGoogleGenerativeAI({\n * model: \"gemini-1.5-flash\",\n * temperature: 0,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"response_metadata\": {\n * \"finishReason\": \"STOP\",\n * \"index\": 0,\n * \"safetyRatings\": [\n * {\n * \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HARASSMENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * }\n * ]\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 149,\n * \"total_tokens\": 159\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There\",\n * \"response_metadata\": {\n * \"index\": 0\n * }\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 1,\n * \"total_tokens\": 11\n * }\n * }\n * AIMessageChunk {\n * \"content\": \" are a few ways to translate \\\"I love programming\\\" into French, depending on\",\n * }\n * AIMessageChunk {\n * \"content\": \" the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n\",\n * }\n * AIMessageChunk {\n * \"content\": \"* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This\",\n * }\n * AIMessageChunk {\n * \"content\": \" is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More\",\n * }\n * AIMessageChunk {\n * \"content\": \" specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and\",\n * }\n * AIMessageChunk {\n * \"content\": \" your intended audience. \\n\",\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 277,\n * \"total_tokens\": 287\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don\\\\'t cats play poker?\",\n * punchline: \"Why don\\\\'t cats play poker? Because they always have an ace up their sleeve!\"\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 10, output_tokens: 149, total_tokens: 159 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * finishReason: 'STOP',\n * index: 0,\n * safetyRatings: [\n * {\n * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n * probability: 'NEGLIGIBLE'\n * },\n * {\n * category: 'HARM_CATEGORY_HATE_SPEECH',\n * probability: 'NEGLIGIBLE'\n * },\n * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },\n * {\n * category: 'HARM_CATEGORY_DANGEROUS_CONTENT',\n * probability: 'NEGLIGIBLE'\n * }\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Document Messages</strong></summary>\n *\n * This example will show you how to pass documents such as PDFs to Google\n * Generative AI through messages.\n *\n * ```typescript\n * const pdfPath = \"/Users/my_user/Downloads/invoice.pdf\";\n * const pdfBase64 = await fs.readFile(pdfPath, \"base64\");\n *\n * const response = await llm.invoke([\n * [\"system\", \"Use the provided documents to answer the question\"],\n * [\n * \"user\",\n * [\n * {\n * type: \"application/pdf\", // If the `type` field includes a single slash (`/`), it will be treated as inline data.\n * data: pdfBase64,\n * },\n * {\n * type: \"text\",\n * text: \"Summarize the contents of this PDF\",\n * },\n * ],\n * ],\n * ]);\n *\n * console.log(response.content);\n * ```\n *\n * ```txt\n * This is a billing invoice from Twitter Developers for X API Basic Access. The transaction date is January 7, 2025,\n * and the amount is $194.34, which has been paid. The subscription period is from January 7, 2025 21:02 to February 7, 2025 00:00 (UTC).\n * The tax is $0.00, with a tax rate of 0%. The total amount is $194.34. The payment was made using a Visa card ending in 7022,\n * expiring in 12/2026. The billing address is Brace Sproul, 1234 Main Street, San Francisco, CA, US 94103. The company being billed is\n * X Corp, located at 865 FM 1209 Building 2, Bastrop, TX, US 78602. Terms and conditions apply.\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk> implements GoogleGenerativeAIChatInput {\n static lc_name(): string;\n lc_serializable: boolean;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_namespace: string[];\n get lc_aliases(): {\n apiKey: string;\n };\n model: string;\n temperature?: number; // default value chosen based on model\n maxOutputTokens?: number;\n topP?: number; // default value chosen based on model\n topK?: number; // default value chosen based on model\n stopSequences: string[];\n safetySettings?: SafetySetting[];\n apiKey?: string;\n streaming: boolean;\n json?: boolean;\n streamUsage: boolean;\n convertSystemMessageToHumanContent: boolean | undefined;\n private client;\n get _isMultimodalModel(): boolean;\n constructor(fields: GoogleGenerativeAIChatInput);\n useCachedContent(cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions): void;\n get useSystemInstruction(): boolean;\n get computeUseSystemInstruction(): boolean;\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams;\n _combineLLMOutput(): never[];\n _llmType(): string;\n bindTools(tools: GoogleGenerativeAIToolType[], kwargs?: Partial<GoogleGenerativeAIChatCallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, GoogleGenerativeAIChatCallOptions>;\n invocationParams(options?: this[\"ParsedCallOptions\"]): Omit<GenerateContentRequest, \"contents\">;\n _generate(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _streamResponseChunks(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n completionWithRetry(request: string | GenerateContentRequest | (string | GenerativeAIPart)[], options?: this[\"ParsedCallOptions\"]): Promise<import(\"@google/generative-ai\").GenerateContentResult>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-flash\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 2000000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n"],"mappings":";;;;;;;;;;;;;KAUYuB,sBAAAA;SACDb;UACCA;AAFZ,CAAA;AAAkC,UAIjBc,iCAAAA,SAA0CV,wBAJzB,CAAA;EAAA,KACvBJ,CAAAA,EAICY,0BAJDZ,EAAAA;EAAW;AACC;AAEvB;;EAAkD,oBACtCY,CAAAA,EAAAA,MAAAA,EAAAA;EAA0B;;AAD6C;AAqBnF;;EAA4C,WAAmCE,CAAAA,EAAAA,OAAAA;EAAiC;;;EAAlC,cAAA,CAAA,EALzDjB,MAKyD;AAkd9E;;;;AAgBqBN,UAleJwB,2BAAAA,SAAoCT,mBAkehCf,EAleqDyB,IAkerDzB,CAle0DuB,iCAke1DvB,EAAAA,aAAAA,CAAAA,CAAAA;EAAa;;;;;EAYkC,KAG/CqB,EAAAA,MAAAA;EAA0B;;;;;;;;;;EAEiE,WAAWV,CAAAA,EAAAA,MAAAA;EAAU;;;EACT,eAAkBD,CAAAA,EAAAA,MAAAA;EAAmB;;;;;;;;;;;;EAwB/F,IAAmBO,CAAAA,EAAAA,MAAAA;EAAsB;;;;;;;;;;EAMnF,IACRiB,CAAAA,EAAAA,MAAAA;EAAS;;;AAjEkI;;;;;;;;;;;;mBArZtIlC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAqZA0B,sBAAAA,SAA+Bd,cAAcW,mCAAmCf,2BAA2BgB;;;;;;;;;;;;;;;;mBAgB3GxB;;;;;;;;sBAQGwB;kCACYnB,6BAA6BF,8BAA8BC;;;mDAG1CU;;;mBAGhCO,uCAAuCM,QAAQJ,qCAAqCJ,SAASF,wBAAwBT,gBAAgBe;yDAC/FK,KAAK7B;sBACxCU,gEAAgEF,2BAA2BsB,QAAQlB;kCACvFF,gEAAgEF,2BAA2BuB,eAAepB;wCACpGX,mCAAmCG,+CAA2D2B,QAA3CE,sBAAAA,CAAmFC,qBAAAA;;;;;;;;;;;;;;;;;;iBAkB7JhB;;;oBAGGiB,sBAAsBA,mCAAmCb,eAAec;;IAEvFD,8BAA8Bf,uCAAuCC,SAASF,wBAAwBiB;;;oBAGvFD,sBAAsBA,mCAAmCb,eAAec;;IAEvFD,8BAA8Bf,sCAAsCC,SAASF;SACvER;YACGyB"}
1
+ {"version":3,"file":"chat_models.d.cts","names":["GenerateContentRequest","SafetySetting","Part","GenerativeAIPart","ModelParams","RequestOptions","CachedContent","Schema","CallbackManagerForLLMRun","AIMessageChunk","BaseMessage","ChatGenerationChunk","ChatResult","BaseChatModel","BaseChatModelCallOptions","LangSmithParams","BaseChatModelParams","ModelProfile","BaseLanguageModelInput","StructuredOutputMethodOptions","Runnable","InteropZodType","GoogleGenerativeAIThinkingConfig","GoogleGenerativeAIToolType","BaseMessageExamplePair","GoogleGenerativeAIChatCallOptions","GoogleGenerativeAIChatInput","Pick","ChatGoogleGenerativeAI","Partial","Omit","Promise","AsyncGenerator","_google_generative_ai0","GenerateContentResult","Record","RunOutput"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { GenerateContentRequest, SafetySetting, Part as GenerativeAIPart, ModelParams, RequestOptions, type CachedContent, Schema } from \"@google/generative-ai\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { AIMessageChunk, BaseMessage } from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, ChatResult } from \"@langchain/core/outputs\";\nimport { BaseChatModel, type BaseChatModelCallOptions, type LangSmithParams, type BaseChatModelParams } from \"@langchain/core/language_models/chat_models\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { GoogleGenerativeAIThinkingConfig, GoogleGenerativeAIToolType } from \"./types.js\";\nexport type BaseMessageExamplePair = {\n input: BaseMessage;\n output: BaseMessage;\n};\nexport interface GoogleGenerativeAIChatCallOptions extends BaseChatModelCallOptions {\n tools?: GoogleGenerativeAIToolType[];\n /**\n * Allowed functions to call when the mode is \"any\".\n * If empty, any one of the provided functions are called.\n */\n allowedFunctionNames?: string[];\n /**\n * Whether or not to include usage data, like token counts\n * in the streamed response chunks.\n * @default true\n */\n streamUsage?: boolean;\n /**\n * JSON schema to be returned by the model.\n */\n responseSchema?: Schema;\n}\n/**\n * An interface defining the input to the ChatGoogleGenerativeAI class.\n */\nexport interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, \"streamUsage\"> {\n /**\n * Model Name to use\n *\n * Note: The format must follow the pattern - `{model}`\n */\n model: string;\n /**\n * Controls the randomness of the output.\n *\n * Values can range from [0.0,2.0], inclusive. A value closer to 2.0\n * will produce responses that are more varied and creative, while\n * a value closer to 0.0 will typically result in less surprising\n * responses from the model.\n *\n * Note: The default value varies by model\n */\n temperature?: number;\n /**\n * Maximum number of tokens to generate in the completion.\n */\n maxOutputTokens?: number;\n /**\n * Top-p changes how the model selects tokens for output.\n *\n * Tokens are selected from most probable to least until the sum\n * of their probabilities equals the top-p value.\n *\n * For example, if tokens A, B, and C have a probability of\n * .3, .2, and .1 and the top-p value is .5, then the model will\n * select either A or B as the next token (using temperature).\n *\n * Note: The default value varies by model\n */\n topP?: number;\n /**\n * Top-k changes how the model selects tokens for output.\n *\n * A top-k of 1 means the selected token is the most probable among\n * all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-k of 3 means that the next token is selected from\n * among the 3 most probable tokens (using temperature).\n *\n * Note: The default value varies by model\n */\n topK?: number;\n /**\n * The set of character sequences (up to 5) that will stop output generation.\n * If specified, the API will stop at the first appearance of a stop\n * sequence.\n *\n * Note: The stop sequence will not be included as part of the response.\n * Note: stopSequences is only supported for Gemini models\n */\n stopSequences?: string[];\n /**\n * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block\n * any prompts and responses that fail to meet the thresholds set by these settings. If there\n * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use\n * the default safety setting for that category.\n */\n safetySettings?: SafetySetting[];\n /**\n * Google API key to use\n */\n apiKey?: string;\n /**\n * Google API version to use\n */\n apiVersion?: string;\n /**\n * Google API base URL to use\n */\n baseUrl?: string;\n /** Whether to stream the results or not */\n streaming?: boolean;\n /**\n * Whether or not to force the model to respond with JSON.\n * Available for `gemini-1.5` models and later.\n * @default false\n */\n json?: boolean;\n /**\n * Whether or not model supports system instructions.\n * The following models support system instructions:\n * - All Gemini 1.5 Pro model versions\n * - All Gemini 1.5 Flash model versions\n * - Gemini 1.0 Pro version gemini-1.0-pro-002\n */\n convertSystemMessageToHumanContent?: boolean | undefined;\n /**\n * Optional. Config for thinking features. An error will be returned if this\n * field is set for models that don't support thinking.\n */\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n}\n/**\n * Google Generative AI chat model integration.\n *\n * Setup:\n * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`.\n *\n * ```bash\n * npm install @langchain/google-genai\n * export GOOGLE_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * stop: [\"\\n\"],\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatGoogleGenerativeAI } from '@langchain/google-genai';\n *\n * const llm = new ChatGoogleGenerativeAI({\n * model: \"gemini-1.5-flash\",\n * temperature: 0,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"response_metadata\": {\n * \"finishReason\": \"STOP\",\n * \"index\": 0,\n * \"safetyRatings\": [\n * {\n * \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HARASSMENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * }\n * ]\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 149,\n * \"total_tokens\": 159\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There\",\n * \"response_metadata\": {\n * \"index\": 0\n * }\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 1,\n * \"total_tokens\": 11\n * }\n * }\n * AIMessageChunk {\n * \"content\": \" are a few ways to translate \\\"I love programming\\\" into French, depending on\",\n * }\n * AIMessageChunk {\n * \"content\": \" the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n\",\n * }\n * AIMessageChunk {\n * \"content\": \"* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This\",\n * }\n * AIMessageChunk {\n * \"content\": \" is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More\",\n * }\n * AIMessageChunk {\n * \"content\": \" specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and\",\n * }\n * AIMessageChunk {\n * \"content\": \" your intended audience. \\n\",\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 277,\n * \"total_tokens\": 287\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don\\\\'t cats play poker?\",\n * punchline: \"Why don\\\\'t cats play poker? Because they always have an ace up their sleeve!\"\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 10, output_tokens: 149, total_tokens: 159 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * finishReason: 'STOP',\n * index: 0,\n * safetyRatings: [\n * {\n * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n * probability: 'NEGLIGIBLE'\n * },\n * {\n * category: 'HARM_CATEGORY_HATE_SPEECH',\n * probability: 'NEGLIGIBLE'\n * },\n * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },\n * {\n * category: 'HARM_CATEGORY_DANGEROUS_CONTENT',\n * probability: 'NEGLIGIBLE'\n * }\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Document Messages</strong></summary>\n *\n * This example will show you how to pass documents such as PDFs to Google\n * Generative AI through messages.\n *\n * ```typescript\n * const pdfPath = \"/Users/my_user/Downloads/invoice.pdf\";\n * const pdfBase64 = await fs.readFile(pdfPath, \"base64\");\n *\n * const response = await llm.invoke([\n * [\"system\", \"Use the provided documents to answer the question\"],\n * [\n * \"user\",\n * [\n * {\n * type: \"application/pdf\", // If the `type` field includes a single slash (`/`), it will be treated as inline data.\n * data: pdfBase64,\n * },\n * {\n * type: \"text\",\n * text: \"Summarize the contents of this PDF\",\n * },\n * ],\n * ],\n * ]);\n *\n * console.log(response.content);\n * ```\n *\n * ```txt\n * This is a billing invoice from Twitter Developers for X API Basic Access. The transaction date is January 7, 2025,\n * and the amount is $194.34, which has been paid. The subscription period is from January 7, 2025 21:02 to February 7, 2025 00:00 (UTC).\n * The tax is $0.00, with a tax rate of 0%. The total amount is $194.34. The payment was made using a Visa card ending in 7022,\n * expiring in 12/2026. The billing address is Brace Sproul, 1234 Main Street, San Francisco, CA, US 94103. The company being billed is\n * X Corp, located at 865 FM 1209 Building 2, Bastrop, TX, US 78602. Terms and conditions apply.\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk> implements GoogleGenerativeAIChatInput {\n static lc_name(): string;\n lc_serializable: boolean;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_namespace: string[];\n get lc_aliases(): {\n apiKey: string;\n };\n model: string;\n temperature?: number; // default value chosen based on model\n maxOutputTokens?: number;\n topP?: number; // default value chosen based on model\n topK?: number; // default value chosen based on model\n stopSequences: string[];\n safetySettings?: SafetySetting[];\n apiKey?: string;\n streaming: boolean;\n json?: boolean;\n streamUsage: boolean;\n convertSystemMessageToHumanContent: boolean | undefined;\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n private client;\n get _isMultimodalModel(): boolean;\n constructor(fields: GoogleGenerativeAIChatInput);\n useCachedContent(cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions): void;\n get useSystemInstruction(): boolean;\n get computeUseSystemInstruction(): boolean;\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams;\n _combineLLMOutput(): never[];\n _llmType(): string;\n bindTools(tools: GoogleGenerativeAIToolType[], kwargs?: Partial<GoogleGenerativeAIChatCallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, GoogleGenerativeAIChatCallOptions>;\n invocationParams(options?: this[\"ParsedCallOptions\"]): Omit<GenerateContentRequest, \"contents\">;\n _generate(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _streamResponseChunks(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n completionWithRetry(request: string | GenerateContentRequest | (string | GenerativeAIPart)[], options?: this[\"ParsedCallOptions\"]): Promise<import(\"@google/generative-ai\").GenerateContentResult>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-flash\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 2000000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n"],"mappings":";;;;;;;;;;;;;KAUYwB,sBAAAA;SACDd;UACCA;AAFZ,CAAA;AAAkC,UAIjBe,iCAAAA,SAA0CX,wBAJzB,CAAA;EAAA,KACvBJ,CAAAA,EAICa,0BAJDb,EAAAA;EAAW;AACC;AAEvB;;EAAkD,oBACtCa,CAAAA,EAAAA,MAAAA,EAAAA;EAA0B;;AAD6C;AAqBnF;;EAA4C,WAAmCE,CAAAA,EAAAA,OAAAA;EAAiC;;;EAAxC,cAAEE,CAAAA,EALrDpB,MAKqDoB;AAAI;AAud9E;;;AAAqGlB,UAvdpFiB,2BAAAA,SAAoCV,mBAudgDP,EAvd3BkB,IAud2BlB,CAvdtBgB,iCAudsBhB,EAAAA,aAAAA,CAAAA,CAAAA;EAAc;;;;;EA0BvC,KAAmBJ,EAAAA,MAAAA;EAAc;;;;;;;;;;EAO9C,WACvCK,CAAAA,EAAAA,MAAAA;EAAW;;;EAAuF,eACtFA,CAAAA,EAAAA,MAAAA;EAAW;;;;;;;;;;;;EAsB8C,IAEtFyB,CAAAA,EAAAA,MAAAA;EAAM;;;;;;;;;;EAKqD,IAAkBjB,CAAAA,EAAAA,MAAAA;EAAsB;;;;;AAhEiD;;;;;;;;;;mBA1ZtIjB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;mBAiCAqB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAyXAM,sBAAAA,SAA+Bf,cAAcY,mCAAmChB,2BAA2BiB;;;;;;;;;;;;;;;;mBAgB3GzB;;;;;;mBAMAqB;;;sBAGGI;kCACYpB,6BAA6BF,8BAA8BC;;;mDAG1CU;;;mBAGhCQ,uCAAuCM,QAAQJ,qCAAqCL,SAASF,wBAAwBT,gBAAgBgB;yDAC/FK,KAAK9B;sBACxCU,gEAAgEF,2BAA2BuB,QAAQnB;kCACvFF,gEAAgEF,2BAA2BwB,eAAerB;wCACpGX,mCAAmCG,+CAA2D4B,QAA3CE,sBAAAA,CAAmFC,qBAAAA;;;;;;;;;;;;;;;;;;iBAkB7JjB;;;oBAGGkB,sBAAsBA,mCAAmCd,eAAee;;IAEvFD,8BAA8BhB,uCAAuCC,SAASF,wBAAwBkB;;;oBAGvFD,sBAAsBA,mCAAmCd,eAAee;;IAEvFD,8BAA8BhB,sCAAsCC,SAASF;SACvER;YACG0B"}
@@ -1,4 +1,4 @@
1
- import { GoogleGenerativeAIToolType } from "./types.js";
1
+ import { GoogleGenerativeAIThinkingConfig, GoogleGenerativeAIToolType } from "./types.js";
2
2
  import * as _google_generative_ai0 from "@google/generative-ai";
3
3
  import { CachedContent, GenerateContentRequest, ModelParams, Part, RequestOptions, SafetySetting, Schema } from "@google/generative-ai";
4
4
  import { BaseChatModel, BaseChatModelCallOptions, BaseChatModelParams, LangSmithParams } from "@langchain/core/language_models/chat_models";
@@ -126,6 +126,11 @@ interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGe
126
126
  * - Gemini 1.0 Pro version gemini-1.0-pro-002
127
127
  */
128
128
  convertSystemMessageToHumanContent?: boolean | undefined;
129
+ /**
130
+ * Optional. Config for thinking features. An error will be returned if this
131
+ * field is set for models that don't support thinking.
132
+ */
133
+ thinkingConfig?: GoogleGenerativeAIThinkingConfig;
129
134
  }
130
135
  /**
131
136
  * Google Generative AI chat model integration.
@@ -524,6 +529,7 @@ declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAICha
524
529
  json?: boolean;
525
530
  streamUsage: boolean;
526
531
  convertSystemMessageToHumanContent: boolean | undefined;
532
+ thinkingConfig?: GoogleGenerativeAIThinkingConfig;
527
533
  private client;
528
534
  get _isMultimodalModel(): boolean;
529
535
  constructor(fields: GoogleGenerativeAIChatInput);