@langchain/google-genai 2.1.18 → 2.1.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/dist/chat_models.cjs +25 -25
  3. package/dist/chat_models.cjs.map +1 -1
  4. package/dist/chat_models.d.cts +2 -1
  5. package/dist/chat_models.d.cts.map +1 -1
  6. package/dist/chat_models.d.ts +2 -1
  7. package/dist/chat_models.d.ts.map +1 -1
  8. package/dist/chat_models.js +13 -12
  9. package/dist/chat_models.js.map +1 -1
  10. package/dist/embeddings.cjs +11 -16
  11. package/dist/embeddings.cjs.map +1 -1
  12. package/dist/embeddings.d.cts +0 -1
  13. package/dist/embeddings.d.cts.map +1 -1
  14. package/dist/embeddings.d.ts +0 -1
  15. package/dist/embeddings.d.ts.map +1 -1
  16. package/dist/embeddings.js +3 -7
  17. package/dist/embeddings.js.map +1 -1
  18. package/dist/index.cjs +1 -0
  19. package/dist/output_parsers.cjs +6 -8
  20. package/dist/output_parsers.cjs.map +1 -1
  21. package/dist/output_parsers.js +1 -2
  22. package/dist/output_parsers.js.map +1 -1
  23. package/dist/profiles.cjs +112 -85
  24. package/dist/profiles.cjs.map +1 -1
  25. package/dist/profiles.js +112 -85
  26. package/dist/profiles.js.map +1 -1
  27. package/dist/types.d.cts +2 -5
  28. package/dist/types.d.cts.map +1 -1
  29. package/dist/types.d.ts +2 -5
  30. package/dist/types.d.ts.map +1 -1
  31. package/dist/utils/common.cjs +45 -54
  32. package/dist/utils/common.cjs.map +1 -1
  33. package/dist/utils/common.js +27 -35
  34. package/dist/utils/common.js.map +1 -1
  35. package/dist/utils/tools.cjs +11 -15
  36. package/dist/utils/tools.cjs.map +1 -1
  37. package/dist/utils/tools.js +2 -5
  38. package/dist/utils/tools.js.map +1 -1
  39. package/dist/utils/validate_schema.cjs.map +1 -1
  40. package/dist/utils/validate_schema.js.map +1 -1
  41. package/dist/utils/zod_to_genai_parameters.cjs +4 -7
  42. package/dist/utils/zod_to_genai_parameters.cjs.map +1 -1
  43. package/dist/utils/zod_to_genai_parameters.js +2 -4
  44. package/dist/utils/zod_to_genai_parameters.js.map +1 -1
  45. package/package.json +5 -5
  46. package/dist/_virtual/rolldown_runtime.cjs +0 -25
package/CHANGELOG.md CHANGED
@@ -1,5 +1,23 @@
1
1
  # @langchain/google-genai
2
2
 
3
+ ## 2.1.20
4
+
5
+ ### Patch Changes
6
+
7
+ - [#10080](https://github.com/langchain-ai/langchainjs/pull/10080) [`b583729`](https://github.com/langchain-ai/langchainjs/commit/b583729e99cf0c035630f6b311c4d069a1980cca) Thanks [@hntrl](https://github.com/hntrl)! - Add string-model constructor overloads for chat models (with supporting tests where applicable).
8
+
9
+ - Updated dependencies [[`fb2226e`](https://github.com/langchain-ai/langchainjs/commit/fb2226e6decdaba21e78b3f01877b45fa1eed6d3)]:
10
+ - @langchain/core@1.1.27
11
+
12
+ ## 2.1.19
13
+
14
+ ### Patch Changes
15
+
16
+ - [#10078](https://github.com/langchain-ai/langchainjs/pull/10078) [`7be50a7`](https://github.com/langchain-ai/langchainjs/commit/7be50a7014d7622e0ab8d303dfc9c633ebc96333) Thanks [@christian-bromann](https://github.com/christian-bromann)! - chore(\*): update model profiles
17
+
18
+ - Updated dependencies [[`27186c5`](https://github.com/langchain-ai/langchainjs/commit/27186c54884cfe7c2522fa50b42c3ca0ccaefdba), [`05396f7`](https://github.com/langchain-ai/langchainjs/commit/05396f7ce0a91c49a3bae4bbcd3dbdd6cbd18089), [`5a6f26b`](https://github.com/langchain-ai/langchainjs/commit/5a6f26bbaed80195dc538c538b96219a8b03f38f)]:
19
+ - @langchain/core@1.1.25
20
+
3
21
  ## 2.1.18
4
22
 
5
23
  ### Patch Changes
@@ -1,15 +1,14 @@
1
- const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
2
1
  const require_zod_to_genai_parameters = require('./utils/zod_to_genai_parameters.cjs');
3
2
  const require_common = require('./utils/common.cjs');
4
3
  const require_output_parsers = require('./output_parsers.cjs');
5
4
  const require_tools = require('./utils/tools.cjs');
6
5
  const require_profiles = require('./profiles.cjs');
7
- const __google_generative_ai = require_rolldown_runtime.__toESM(require("@google/generative-ai"));
8
- const __langchain_core_utils_env = require_rolldown_runtime.__toESM(require("@langchain/core/utils/env"));
9
- const __langchain_core_language_models_chat_models = require_rolldown_runtime.__toESM(require("@langchain/core/language_models/chat_models"));
10
- const __langchain_core_runnables = require_rolldown_runtime.__toESM(require("@langchain/core/runnables"));
11
- const __langchain_core_utils_types = require_rolldown_runtime.__toESM(require("@langchain/core/utils/types"));
12
- const __langchain_core_output_parsers = require_rolldown_runtime.__toESM(require("@langchain/core/output_parsers"));
6
+ let _google_generative_ai = require("@google/generative-ai");
7
+ let _langchain_core_utils_env = require("@langchain/core/utils/env");
8
+ let _langchain_core_language_models_chat_models = require("@langchain/core/language_models/chat_models");
9
+ let _langchain_core_runnables = require("@langchain/core/runnables");
10
+ let _langchain_core_utils_types = require("@langchain/core/utils/types");
11
+ let _langchain_core_output_parsers = require("@langchain/core/output_parsers");
13
12
 
14
13
  //#region src/chat_models.ts
15
14
  /**
@@ -387,7 +386,7 @@ const __langchain_core_output_parsers = require_rolldown_runtime.__toESM(require
387
386
  *
388
387
  * <br />
389
388
  */
390
- var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat_models.BaseChatModel {
389
+ var ChatGoogleGenerativeAI = class extends _langchain_core_language_models_chat_models.BaseChatModel {
391
390
  static lc_name() {
392
391
  return "ChatGoogleGenerativeAI";
393
392
  }
@@ -420,7 +419,11 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
420
419
  get _isMultimodalModel() {
421
420
  return this.model.includes("vision") || this.model.startsWith("gemini-1.5") || this.model.startsWith("gemini-2") || this.model.startsWith("gemma-3-") && !this.model.startsWith("gemma-3-1b") || this.model.startsWith("gemini-3");
422
421
  }
423
- constructor(fields) {
422
+ constructor(modelOrFields, fieldsArg) {
423
+ const fields = typeof modelOrFields === "string" ? {
424
+ ...fieldsArg ?? {},
425
+ model: modelOrFields
426
+ } : modelOrFields;
424
427
  super(fields);
425
428
  this.model = fields.model.replace(/^models\//, "");
426
429
  this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;
@@ -433,17 +436,16 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
433
436
  this.topK = fields.topK ?? this.topK;
434
437
  if (this.topK && this.topK < 0) throw new Error("`topK` must be a positive integer");
435
438
  this.stopSequences = fields.stopSequences ?? this.stopSequences;
436
- this.apiKey = fields.apiKey ?? (0, __langchain_core_utils_env.getEnvironmentVariable)("GOOGLE_API_KEY");
439
+ this.apiKey = fields.apiKey ?? (0, _langchain_core_utils_env.getEnvironmentVariable)("GOOGLE_API_KEY");
437
440
  if (!this.apiKey) throw new Error("Please set an API key for Google GenerativeAI in the environment variable GOOGLE_API_KEY or in the `apiKey` field of the ChatGoogleGenerativeAI constructor");
438
441
  this.safetySettings = fields.safetySettings ?? this.safetySettings;
439
442
  if (this.safetySettings && this.safetySettings.length > 0) {
440
- const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
441
- if (safetySettingsSet.size !== this.safetySettings.length) throw new Error("The categories in `safetySettings` array must be unique");
443
+ if (new Set(this.safetySettings.map((s) => s.category)).size !== this.safetySettings.length) throw new Error("The categories in `safetySettings` array must be unique");
442
444
  }
443
445
  this.streaming = fields.streaming ?? this.streaming;
444
446
  this.json = fields.json;
445
447
  this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;
446
- this.client = new __google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
448
+ this.client = new _google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
447
449
  model: this.model,
448
450
  safetySettings: this.safetySettings,
449
451
  generationConfig: {
@@ -464,7 +466,7 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
464
466
  }
465
467
  useCachedContent(cachedContent, modelParams, requestOptions) {
466
468
  if (!this.apiKey) return;
467
- this.client = new __google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModelFromCachedContent(cachedContent, modelParams, requestOptions);
469
+ this.client = new _google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModelFromCachedContent(cachedContent, modelParams, requestOptions);
468
470
  }
469
471
  get useSystemInstruction() {
470
472
  return typeof this.convertSystemMessageToHumanContent === "boolean" ? !this.convertSystemMessageToHumanContent : this.computeUseSystemInstruction;
@@ -534,9 +536,8 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
534
536
  if (finalChunks[index] === void 0) finalChunks[index] = chunk;
535
537
  else finalChunks[index] = finalChunks[index].concat(chunk);
536
538
  }
537
- const generations = finalChunks.filter((c) => c !== void 0);
538
539
  return {
539
- generations,
540
+ generations: finalChunks.filter((c) => c !== void 0),
540
541
  llmOutput: { estimatedTokenUsage: tokenUsage }
541
542
  };
542
543
  }
@@ -558,14 +559,13 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
558
559
  this.client.systemInstruction = systemInstruction;
559
560
  actualPrompt = prompt.slice(1);
560
561
  }
561
- const parameters = this.invocationParams(options);
562
562
  const request = {
563
- ...parameters,
563
+ ...this.invocationParams(options),
564
564
  contents: actualPrompt
565
565
  };
566
566
  const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
567
- const { stream: stream$1 } = await this.client.generateContentStream(request, { signal: options?.signal });
568
- return stream$1;
567
+ const { stream } = await this.client.generateContentStream(request, { signal: options?.signal });
568
+ return stream;
569
569
  });
570
570
  let usageMetadata;
571
571
  let prevPromptTokenCount = 0;
@@ -637,7 +637,7 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
637
637
  if (method === "functionCalling") {
638
638
  let functionName = name ?? "extract";
639
639
  let tools;
640
- if ((0, __langchain_core_utils_types.isInteropZodSchema)(schema)) {
640
+ if ((0, _langchain_core_utils_types.isInteropZodSchema)(schema)) {
641
641
  const jsonSchema = require_zod_to_genai_parameters.schemaToGenerativeAIParameters(schema);
642
642
  tools = [{ functionDeclarations: [{
643
643
  name: functionName,
@@ -670,13 +670,13 @@ var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat
670
670
  } else {
671
671
  const jsonSchema = require_zod_to_genai_parameters.schemaToGenerativeAIParameters(schema);
672
672
  llm = this.withConfig({ responseSchema: jsonSchema });
673
- outputParser = new __langchain_core_output_parsers.JsonOutputParser();
673
+ outputParser = new _langchain_core_output_parsers.JsonOutputParser();
674
674
  }
675
675
  if (!includeRaw) return llm.pipe(outputParser).withConfig({ runName: "ChatGoogleGenerativeAIStructuredOutput" });
676
- const parserAssign = __langchain_core_runnables.RunnablePassthrough.assign({ parsed: (input, config$1) => outputParser.invoke(input.raw, config$1) });
677
- const parserNone = __langchain_core_runnables.RunnablePassthrough.assign({ parsed: () => null });
676
+ const parserAssign = _langchain_core_runnables.RunnablePassthrough.assign({ parsed: (input, config) => outputParser.invoke(input.raw, config) });
677
+ const parserNone = _langchain_core_runnables.RunnablePassthrough.assign({ parsed: () => null });
678
678
  const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone] });
679
- return __langchain_core_runnables.RunnableSequence.from([{ raw: llm }, parsedWithFallback]).withConfig({ runName: "StructuredOutputRunnable" });
679
+ return _langchain_core_runnables.RunnableSequence.from([{ raw: llm }, parsedWithFallback]).withConfig({ runName: "StructuredOutputRunnable" });
680
680
  }
681
681
  };
682
682
 
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.cjs","names":["BaseChatModel","fields: GoogleGenerativeAIChatInput","GenerativeAI","cachedContent: CachedContent","modelParams?: ModelParams","requestOptions?: RequestOptions","options: this[\"ParsedCallOptions\"]","tools: GoogleGenerativeAIToolType[]","kwargs?: Partial<GoogleGenerativeAIChatCallOptions>","convertToolsToGenAI","options?: this[\"ParsedCallOptions\"]","messages: BaseMessage[]","runManager?: CallbackManagerForLLMRun","convertBaseMessagesToContent","tokenUsage: TokenUsage","finalChunks: ChatGenerationChunk[]","usageMetadata: UsageMetadata | undefined","convertUsageMetadata","mapGenerateContentResultToChatResult","stream","convertResponseContentToChatGenerationChunk","request: string | GenerateContentRequest | (string | GenerativeAIPart)[]","e: any","PROFILES","outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>","config?: StructuredOutputMethodOptions<boolean>","schema: InteropZodType<RunOutput> | Record<string, any>","outputParser: BaseLLMOutputParser<RunOutput>","tools: GoogleGenerativeAIFunctionDeclarationsTool[]","schemaToGenerativeAIParameters","GoogleGenerativeAIToolsOutputParser","geminiFunctionDefinition: GenerativeAIFunctionDeclaration","removeAdditionalProperties","JsonOutputParser","RunnablePassthrough","input: any","config","RunnableSequence"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n GenerativeModel,\n GoogleGenerativeAI as GenerativeAI,\n FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,\n FunctionDeclaration as GenerativeAIFunctionDeclaration,\n type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema,\n GenerateContentRequest,\n SafetySetting,\n Part as GenerativeAIPart,\n ModelParams,\n RequestOptions,\n type CachedContent,\n Schema,\n} from \"@google/generative-ai\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n AIMessageChunk,\n BaseMessage,\n UsageMetadata,\n} from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, ChatResult } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport {\n BaseChatModel,\n type BaseChatModelCallOptions,\n type LangSmithParams,\n type BaseChatModelParams,\n} from \"@langchain/core/language_models/chat_models\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { NewTokenIndices } from \"@langchain/core/callbacks/base\";\nimport {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport {\n Runnable,\n RunnablePassthrough,\n RunnableSequence,\n} from \"@langchain/core/runnables\";\nimport {\n InferInteropZodOutput,\n InteropZodType,\n isInteropZodSchema,\n} from \"@langchain/core/utils/types\";\nimport {\n BaseLLMOutputParser,\n JsonOutputParser,\n} from \"@langchain/core/output_parsers\";\nimport {\n schemaToGenerativeAIParameters,\n removeAdditionalProperties,\n} from \"./utils/zod_to_genai_parameters.js\";\nimport {\n convertBaseMessagesToContent,\n convertResponseContentToChatGenerationChunk,\n convertUsageMetadata,\n mapGenerateContentResultToChatResult,\n} from \"./utils/common.js\";\nimport { GoogleGenerativeAIToolsOutputParser } from \"./output_parsers.js\";\nimport {\n GoogleGenerativeAIThinkingConfig,\n GoogleGenerativeAIToolType,\n} from \"./types.js\";\nimport { convertToolsToGenAI } from \"./utils/tools.js\";\nimport PROFILES from \"./profiles.js\";\n\ninterface TokenUsage {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n}\n\nexport type BaseMessageExamplePair = {\n input: BaseMessage;\n output: BaseMessage;\n};\n\nexport interface GoogleGenerativeAIChatCallOptions extends BaseChatModelCallOptions {\n tools?: GoogleGenerativeAIToolType[];\n /**\n * Allowed functions to call when the mode is \"any\".\n * If empty, any one of the provided functions are called.\n */\n allowedFunctionNames?: string[];\n /**\n * Whether or not to include usage data, like token counts\n * in the streamed response chunks.\n * @default true\n */\n streamUsage?: boolean;\n\n /**\n * JSON schema to be returned by the model.\n */\n responseSchema?: Schema;\n}\n\n/**\n * An interface defining the input to the ChatGoogleGenerativeAI class.\n */\nexport interface GoogleGenerativeAIChatInput\n extends\n BaseChatModelParams,\n Pick<GoogleGenerativeAIChatCallOptions, \"streamUsage\"> {\n /**\n * Model Name to use\n *\n * Note: The format must follow the pattern - `{model}`\n */\n model: string;\n\n /**\n * Controls the randomness of the output.\n *\n * Values can range from [0.0,2.0], inclusive. A value closer to 2.0\n * will produce responses that are more varied and creative, while\n * a value closer to 0.0 will typically result in less surprising\n * responses from the model.\n *\n * Note: The default value varies by model\n */\n temperature?: number;\n\n /**\n * Maximum number of tokens to generate in the completion.\n */\n maxOutputTokens?: number;\n\n /**\n * Top-p changes how the model selects tokens for output.\n *\n * Tokens are selected from most probable to least until the sum\n * of their probabilities equals the top-p value.\n *\n * For example, if tokens A, B, and C have a probability of\n * .3, .2, and .1 and the top-p value is .5, then the model will\n * select either A or B as the next token (using temperature).\n *\n * Note: The default value varies by model\n */\n topP?: number;\n\n /**\n * Top-k changes how the model selects tokens for output.\n *\n * A top-k of 1 means the selected token is the most probable among\n * all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-k of 3 means that the next token is selected from\n * among the 3 most probable tokens (using temperature).\n *\n * Note: The default value varies by model\n */\n topK?: number;\n\n /**\n * The set of character sequences (up to 5) that will stop output generation.\n * If specified, the API will stop at the first appearance of a stop\n * sequence.\n *\n * Note: The stop sequence will not be included as part of the response.\n * Note: stopSequences is only supported for Gemini models\n */\n stopSequences?: string[];\n\n /**\n * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block\n * any prompts and responses that fail to meet the thresholds set by these settings. If there\n * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use\n * the default safety setting for that category.\n */\n safetySettings?: SafetySetting[];\n\n /**\n * Google API key to use\n */\n apiKey?: string;\n\n /**\n * Google API version to use\n */\n apiVersion?: string;\n\n /**\n * Google API base URL to use\n */\n baseUrl?: string;\n\n /**\n * Google API custom headers to use\n */\n customHeaders?: Record<string, string>;\n\n /** Whether to stream the results or not */\n streaming?: boolean;\n\n /**\n * Whether or not to force the model to respond with JSON.\n * Available for `gemini-1.5` models and later.\n * @default false\n */\n json?: boolean;\n\n /**\n * Whether or not model supports system instructions.\n * The following models support system instructions:\n * - All Gemini 1.5 Pro model versions\n * - All Gemini 1.5 Flash model versions\n * - Gemini 1.0 Pro version gemini-1.0-pro-002\n */\n convertSystemMessageToHumanContent?: boolean | undefined;\n\n /**\n * Optional. Config for thinking features. An error will be returned if this\n * field is set for models that don't support thinking.\n */\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n}\n\n/**\n * Google Generative AI chat model integration.\n *\n * Setup:\n * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`.\n *\n * ```bash\n * npm install @langchain/google-genai\n * export GOOGLE_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * stop: [\"\\n\"],\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatGoogleGenerativeAI } from '@langchain/google-genai';\n *\n * const llm = new ChatGoogleGenerativeAI({\n * model: \"gemini-1.5-flash\",\n * temperature: 0,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"response_metadata\": {\n * \"finishReason\": \"STOP\",\n * \"index\": 0,\n * \"safetyRatings\": [\n * {\n * \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HARASSMENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * }\n * ]\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 149,\n * \"total_tokens\": 159\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There\",\n * \"response_metadata\": {\n * \"index\": 0\n * }\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 1,\n * \"total_tokens\": 11\n * }\n * }\n * AIMessageChunk {\n * \"content\": \" are a few ways to translate \\\"I love programming\\\" into French, depending on\",\n * }\n * AIMessageChunk {\n * \"content\": \" the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n\",\n * }\n * AIMessageChunk {\n * \"content\": \"* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This\",\n * }\n * AIMessageChunk {\n * \"content\": \" is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More\",\n * }\n * AIMessageChunk {\n * \"content\": \" specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and\",\n * }\n * AIMessageChunk {\n * \"content\": \" your intended audience. \\n\",\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 277,\n * \"total_tokens\": 287\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don\\\\'t cats play poker?\",\n * punchline: \"Why don\\\\'t cats play poker? Because they always have an ace up their sleeve!\"\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 10, output_tokens: 149, total_tokens: 159 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * finishReason: 'STOP',\n * index: 0,\n * safetyRatings: [\n * {\n * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n * probability: 'NEGLIGIBLE'\n * },\n * {\n * category: 'HARM_CATEGORY_HATE_SPEECH',\n * probability: 'NEGLIGIBLE'\n * },\n * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },\n * {\n * category: 'HARM_CATEGORY_DANGEROUS_CONTENT',\n * probability: 'NEGLIGIBLE'\n * }\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Document Messages</strong></summary>\n *\n * This example will show you how to pass documents such as PDFs to Google\n * Generative AI through messages.\n *\n * ```typescript\n * const pdfPath = \"/Users/my_user/Downloads/invoice.pdf\";\n * const pdfBase64 = await fs.readFile(pdfPath, \"base64\");\n *\n * const response = await llm.invoke([\n * [\"system\", \"Use the provided documents to answer the question\"],\n * [\n * \"user\",\n * [\n * {\n * type: \"application/pdf\", // If the `type` field includes a single slash (`/`), it will be treated as inline data.\n * data: pdfBase64,\n * },\n * {\n * type: \"text\",\n * text: \"Summarize the contents of this PDF\",\n * },\n * ],\n * ],\n * ]);\n *\n * console.log(response.content);\n * ```\n *\n * ```txt\n * This is a billing invoice from Twitter Developers for X API Basic Access. The transaction date is January 7, 2025,\n * and the amount is $194.34, which has been paid. The subscription period is from January 7, 2025 21:02 to February 7, 2025 00:00 (UTC).\n * The tax is $0.00, with a tax rate of 0%. The total amount is $194.34. The payment was made using a Visa card ending in 7022,\n * expiring in 12/2026. The billing address is Brace Sproul, 1234 Main Street, San Francisco, CA, US 94103. The company being billed is\n * X Corp, located at 865 FM 1209 Building 2, Bastrop, TX, US 78602. Terms and conditions apply.\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatGoogleGenerativeAI\n extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk>\n implements GoogleGenerativeAIChatInput\n{\n static lc_name() {\n return \"ChatGoogleGenerativeAI\";\n }\n\n lc_serializable = true;\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"GOOGLE_API_KEY\",\n };\n }\n\n lc_namespace = [\"langchain\", \"chat_models\", \"google_genai\"];\n\n get lc_aliases() {\n return {\n apiKey: \"google_api_key\",\n };\n }\n\n model: string;\n\n temperature?: number; // default value chosen based on model\n\n maxOutputTokens?: number;\n\n topP?: number; // default value chosen based on model\n\n topK?: number; // default value chosen based on model\n\n stopSequences: string[] = [];\n\n safetySettings?: SafetySetting[];\n\n apiKey?: string;\n\n streaming = false;\n\n json?: boolean;\n\n streamUsage = true;\n\n convertSystemMessageToHumanContent: boolean | undefined;\n\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n\n private client: GenerativeModel;\n\n get _isMultimodalModel() {\n return (\n this.model.includes(\"vision\") ||\n this.model.startsWith(\"gemini-1.5\") ||\n this.model.startsWith(\"gemini-2\") ||\n (this.model.startsWith(\"gemma-3-\") &&\n !this.model.startsWith(\"gemma-3-1b\")) || // gemma-3 models are multimodal(but gemma-3n-* and gemma-3-1b are not)\n this.model.startsWith(\"gemini-3\")\n );\n }\n\n constructor(fields: GoogleGenerativeAIChatInput) {\n super(fields);\n\n this.model = fields.model.replace(/^models\\//, \"\");\n\n this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;\n\n if (this.maxOutputTokens && this.maxOutputTokens < 0) {\n throw new Error(\"`maxOutputTokens` must be a positive integer\");\n }\n\n this.temperature = fields.temperature ?? this.temperature;\n if (this.temperature && (this.temperature < 0 || this.temperature > 2)) {\n throw new Error(\"`temperature` must be in the range of [0.0,2.0]\");\n }\n\n this.topP = fields.topP ?? this.topP;\n if (this.topP && this.topP < 0) {\n throw new Error(\"`topP` must be a positive integer\");\n }\n\n if (this.topP && this.topP > 1) {\n throw new Error(\"`topP` must be below 1.\");\n }\n\n this.topK = fields.topK ?? this.topK;\n if (this.topK && this.topK < 0) {\n throw new Error(\"`topK` must be a positive integer\");\n }\n\n this.stopSequences = fields.stopSequences ?? this.stopSequences;\n\n this.apiKey = fields.apiKey ?? getEnvironmentVariable(\"GOOGLE_API_KEY\");\n if (!this.apiKey) {\n throw new Error(\n \"Please set an API key for Google GenerativeAI \" +\n \"in the environment variable GOOGLE_API_KEY \" +\n \"or in the `apiKey` field of the \" +\n \"ChatGoogleGenerativeAI constructor\"\n );\n }\n\n this.safetySettings = fields.safetySettings ?? this.safetySettings;\n if (this.safetySettings && this.safetySettings.length > 0) {\n const safetySettingsSet = new Set(\n this.safetySettings.map((s) => s.category)\n );\n if (safetySettingsSet.size !== this.safetySettings.length) {\n throw new Error(\n \"The categories in `safetySettings` array must be unique\"\n );\n }\n }\n\n this.streaming = fields.streaming ?? this.streaming;\n this.json = fields.json;\n\n this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;\n\n this.client = new GenerativeAI(this.apiKey).getGenerativeModel(\n {\n model: this.model,\n safetySettings: this.safetySettings as SafetySetting[],\n generationConfig: {\n stopSequences: this.stopSequences,\n maxOutputTokens: this.maxOutputTokens,\n temperature: this.temperature,\n topP: this.topP,\n topK: this.topK,\n ...(this.json ? { responseMimeType: \"application/json\" } : {}),\n ...(this.thinkingConfig\n ? { thinkingConfig: this.thinkingConfig }\n : {}),\n },\n },\n {\n apiVersion: fields.apiVersion,\n baseUrl: fields.baseUrl,\n customHeaders: fields.customHeaders,\n }\n );\n this.streamUsage = fields.streamUsage ?? this.streamUsage;\n }\n\n useCachedContent(\n cachedContent: CachedContent,\n modelParams?: ModelParams,\n requestOptions?: RequestOptions\n ): void {\n if (!this.apiKey) return;\n this.client = new GenerativeAI(\n this.apiKey\n ).getGenerativeModelFromCachedContent(\n cachedContent,\n modelParams,\n requestOptions\n );\n }\n\n get useSystemInstruction(): boolean {\n return typeof this.convertSystemMessageToHumanContent === \"boolean\"\n ? !this.convertSystemMessageToHumanContent\n : this.computeUseSystemInstruction;\n }\n\n get computeUseSystemInstruction(): boolean {\n // This works on models from April 2024 and later\n // Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later\n // AI Studio: gemini-1.5-pro-latest\n if (this.model === \"gemini-1.0-pro-001\") {\n return false;\n } else if (this.model.startsWith(\"gemini-pro-vision\")) {\n return false;\n } else if (this.model.startsWith(\"gemini-1.0-pro-vision\")) {\n return false;\n } else if (this.model === \"gemini-pro\") {\n // on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001\n return false;\n }\n return true;\n }\n\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams {\n return {\n ls_provider: \"google_genai\",\n ls_model_name: this.model,\n ls_model_type: \"chat\",\n ls_temperature: this.client.generationConfig.temperature,\n ls_max_tokens: this.client.generationConfig.maxOutputTokens,\n ls_stop: options.stop,\n };\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n _llmType() {\n return \"googlegenerativeai\";\n }\n\n override bindTools(\n tools: GoogleGenerativeAIToolType[],\n kwargs?: Partial<GoogleGenerativeAIChatCallOptions>\n ): Runnable<\n BaseLanguageModelInput,\n AIMessageChunk,\n GoogleGenerativeAIChatCallOptions\n > {\n return this.withConfig({\n tools: convertToolsToGenAI(tools)?.tools,\n ...kwargs,\n });\n }\n\n invocationParams(\n options?: this[\"ParsedCallOptions\"]\n ): Omit<GenerateContentRequest, \"contents\"> {\n const toolsAndConfig = options?.tools?.length\n ? convertToolsToGenAI(options.tools, {\n toolChoice: options.tool_choice,\n allowedFunctionNames: options.allowedFunctionNames,\n })\n : undefined;\n\n if (options?.responseSchema) {\n this.client.generationConfig.responseSchema = options.responseSchema;\n this.client.generationConfig.responseMimeType = \"application/json\";\n } else {\n this.client.generationConfig.responseSchema = undefined;\n this.client.generationConfig.responseMimeType = this.json\n ? \"application/json\"\n : undefined;\n }\n\n return {\n ...(toolsAndConfig?.tools ? { tools: toolsAndConfig.tools } : {}),\n ...(toolsAndConfig?.toolConfig\n ? { toolConfig: toolsAndConfig.toolConfig }\n : {}),\n };\n }\n\n async _generate(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n options.signal?.throwIfAborted();\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt[0].role === \"system\") {\n const [systemInstruction] = prompt;\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n\n // Handle streaming\n if (this.streaming) {\n const tokenUsage: TokenUsage = {};\n const stream = this._streamResponseChunks(messages, options, runManager);\n const finalChunks: ChatGenerationChunk[] = [];\n\n for await (const chunk of stream) {\n const index =\n (chunk.generationInfo as NewTokenIndices)?.completion ?? 0;\n if (finalChunks[index] === undefined) {\n finalChunks[index] = chunk;\n } else {\n finalChunks[index] = finalChunks[index].concat(chunk);\n }\n }\n const generations = finalChunks.filter(\n (c): c is ChatGenerationChunk => c !== undefined\n );\n\n return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };\n }\n\n const res = await this.completionWithRetry({\n ...parameters,\n contents: actualPrompt,\n });\n\n let usageMetadata: UsageMetadata | undefined;\n if (\"usageMetadata\" in res.response) {\n usageMetadata = convertUsageMetadata(\n res.response.usageMetadata,\n this.model\n );\n }\n\n const generationResult = mapGenerateContentResultToChatResult(\n res.response,\n {\n usageMetadata,\n }\n );\n // may not have generations in output if there was a refusal for safety reasons, malformed function call, etc.\n if (generationResult.generations?.length > 0) {\n await runManager?.handleLLMNewToken(\n generationResult.generations[0]?.text ?? \"\"\n );\n }\n return generationResult;\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt[0].role === \"system\") {\n const [systemInstruction] = prompt;\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n const request = {\n ...parameters,\n contents: actualPrompt,\n };\n const stream = await this.caller.callWithOptions(\n { signal: options?.signal },\n async () => {\n const { stream } = await this.client.generateContentStream(request, {\n signal: options?.signal,\n });\n return stream;\n }\n );\n\n let usageMetadata: UsageMetadata | undefined;\n // Keep prior cumulative counts for calculating token deltas while streaming\n let prevPromptTokenCount = 0;\n let prevCandidatesTokenCount = 0;\n let prevTotalTokenCount = 0;\n let index = 0;\n for await (const response of stream) {\n if (options.signal?.aborted) {\n return;\n }\n if (\n \"usageMetadata\" in response &&\n response.usageMetadata !== undefined &&\n this.streamUsage !== false &&\n options.streamUsage !== false\n ) {\n usageMetadata = convertUsageMetadata(\n response.usageMetadata,\n this.model\n );\n\n // Under the hood, LangChain combines the prompt tokens. Google returns the updated\n // total each time, so we need to find the difference between the tokens.\n const newPromptTokenCount =\n response.usageMetadata.promptTokenCount ?? 0;\n usageMetadata.input_tokens = Math.max(\n 0,\n newPromptTokenCount - prevPromptTokenCount\n );\n prevPromptTokenCount = newPromptTokenCount;\n\n const newCandidatesTokenCount =\n response.usageMetadata.candidatesTokenCount ?? 0;\n usageMetadata.output_tokens = Math.max(\n 0,\n newCandidatesTokenCount - prevCandidatesTokenCount\n );\n prevCandidatesTokenCount = newCandidatesTokenCount;\n\n const newTotalTokenCount = response.usageMetadata.totalTokenCount ?? 0;\n usageMetadata.total_tokens = Math.max(\n 0,\n newTotalTokenCount - prevTotalTokenCount\n );\n prevTotalTokenCount = newTotalTokenCount;\n }\n\n const chunk = convertResponseContentToChatGenerationChunk(response, {\n usageMetadata,\n index,\n });\n index += 1;\n if (!chunk) {\n continue;\n }\n\n yield chunk;\n await runManager?.handleLLMNewToken(chunk.text ?? \"\");\n }\n }\n\n async completionWithRetry(\n request: string | GenerateContentRequest | (string | GenerativeAIPart)[],\n options?: this[\"ParsedCallOptions\"]\n ) {\n return this.caller.callWithOptions(\n { signal: options?.signal },\n async () => {\n try {\n return await this.client.generateContent(request, {\n signal: options?.signal,\n });\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n // TODO: Improve error handling\n if (e.message?.includes(\"400 Bad Request\")) {\n e.status = 400;\n }\n throw e;\n }\n }\n );\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-flash\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 2000000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const schema: InteropZodType<RunOutput> | Record<string, any> =\n outputSchema;\n const name = config?.name;\n const method = config?.method;\n const includeRaw = config?.includeRaw;\n if (method === \"jsonMode\") {\n throw new Error(\n `ChatGoogleGenerativeAI only supports \"jsonSchema\" or \"functionCalling\" as a method.`\n );\n }\n\n let llm;\n let outputParser: BaseLLMOutputParser<RunOutput>;\n if (method === \"functionCalling\") {\n let functionName = name ?? \"extract\";\n let tools: GoogleGenerativeAIFunctionDeclarationsTool[];\n if (isInteropZodSchema(schema)) {\n const jsonSchema = schemaToGenerativeAIParameters(schema);\n tools = [\n {\n functionDeclarations: [\n {\n name: functionName,\n description:\n jsonSchema.description ?? \"A function available to call.\",\n parameters: jsonSchema as GenerativeAIFunctionDeclarationSchema,\n },\n ],\n },\n ];\n outputParser = new GoogleGenerativeAIToolsOutputParser<\n InferInteropZodOutput<typeof schema>\n >({\n returnSingle: true,\n keyName: functionName,\n zodSchema: schema,\n });\n } else {\n let geminiFunctionDefinition: GenerativeAIFunctionDeclaration;\n if (\n typeof schema.name === \"string\" &&\n typeof schema.parameters === \"object\" &&\n schema.parameters != null\n ) {\n geminiFunctionDefinition = schema as GenerativeAIFunctionDeclaration;\n geminiFunctionDefinition.parameters = removeAdditionalProperties(\n schema.parameters\n ) as GenerativeAIFunctionDeclarationSchema;\n functionName = schema.name;\n } else {\n geminiFunctionDefinition = {\n name: functionName,\n description: schema.description ?? \"\",\n parameters: removeAdditionalProperties(\n schema\n ) as GenerativeAIFunctionDeclarationSchema,\n };\n }\n tools = [\n {\n functionDeclarations: [geminiFunctionDefinition],\n },\n ];\n outputParser = new GoogleGenerativeAIToolsOutputParser<RunOutput>({\n returnSingle: true,\n keyName: functionName,\n });\n }\n llm = this.bindTools(tools).withConfig({\n allowedFunctionNames: [functionName],\n });\n } else {\n const jsonSchema = schemaToGenerativeAIParameters(schema);\n llm = this.withConfig({\n responseSchema: jsonSchema as Schema,\n });\n outputParser = new JsonOutputParser();\n }\n\n if (!includeRaw) {\n return llm.pipe(outputParser).withConfig({\n runName: \"ChatGoogleGenerativeAIStructuredOutput\",\n }) as Runnable<BaseLanguageModelInput, RunOutput>;\n }\n\n const parserAssign = RunnablePassthrough.assign({\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n parsed: (input: any, config) => outputParser.invoke(input.raw, config),\n });\n const parserNone = RunnablePassthrough.assign({\n parsed: () => null,\n });\n const parsedWithFallback = parserAssign.withFallbacks({\n fallbacks: [parserNone],\n });\n return RunnableSequence.from<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n >([\n {\n raw: llm,\n },\n parsedWithFallback,\n ]).withConfig({\n runName: \"StructuredOutputRunnable\",\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAilBA,IAAa,yBAAb,cACUA,2DAEV;CACE,OAAO,UAAU;AACf,SAAO;CACR;CAED,kBAAkB;CAElB,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,iBACT;CACF;CAED,eAAe;EAAC;EAAa;EAAe;CAAe;CAE3D,IAAI,aAAa;AACf,SAAO,EACL,QAAQ,iBACT;CACF;CAED;CAEA;CAEA;CAEA;CAEA;CAEA,gBAA0B,CAAE;CAE5B;CAEA;CAEA,YAAY;CAEZ;CAEA,cAAc;CAEd;CAEA;CAEA,AAAQ;CAER,IAAI,qBAAqB;AACvB,SACE,KAAK,MAAM,SAAS,SAAS,IAC7B,KAAK,MAAM,WAAW,aAAa,IACnC,KAAK,MAAM,WAAW,WAAW,IAChC,KAAK,MAAM,WAAW,WAAW,IAChC,CAAC,KAAK,MAAM,WAAW,aAAa,IACtC,KAAK,MAAM,WAAW,WAAW;CAEpC;CAED,YAAYC,QAAqC;EAC/C,MAAM,OAAO;EAEb,KAAK,QAAQ,OAAO,MAAM,QAAQ,aAAa,GAAG;EAElD,KAAK,kBAAkB,OAAO,mBAAmB,KAAK;AAEtD,MAAI,KAAK,mBAAmB,KAAK,kBAAkB,EACjD,OAAM,IAAI,MAAM;EAGlB,KAAK,cAAc,OAAO,eAAe,KAAK;AAC9C,MAAI,KAAK,gBAAgB,KAAK,cAAc,KAAK,KAAK,cAAc,GAClE,OAAM,IAAI,MAAM;EAGlB,KAAK,OAAO,OAAO,QAAQ,KAAK;AAChC,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;AAGlB,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;EAGlB,KAAK,OAAO,OAAO,QAAQ,KAAK;AAChC,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM;EAGlB,KAAK,gBAAgB,OAAO,iBAAiB,KAAK;EAElD,KAAK,SAAS,OAAO,iEAAiC,iBAAiB;AACvE,MAAI,CAAC,KAAK,OACR,OAAM,IAAI,MACR;EAOJ,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;AACpD,MAAI,KAAK,kBAAkB,KAAK,eAAe,SAAS,GAAG;GACzD,MAAM,oBAAoB,IAAI,IAC5B,KAAK,eAAe,IAAI,CAAC,MAAM,EAAE,SAAS;AAE5C,OAAI,kBAAkB,SAAS,KAAK,eAAe,OACjD,OAAM,IAAI,MACR;EAGL;EAED,KAAK,YAAY,OAAO,aAAa,KAAK;EAC1C,KAAK,OAAO,OAAO;EAEnB,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;EAEpD,KAAK,SAAS,IAAIC,0CAAa,KAAK,QAAQ,mBAC1C;GACE,OAAO,KAAK;GACZ,gBAAgB,KAAK;GACrB,kBAAkB;IAChB,eAAe,KAAK;IACpB,iBAAiB,KAAK;IACtB,aAAa,KAAK;IAClB,MAAM,KAAK;IACX,MAAM,KAAK;IACX,GAAI,KAAK,OAAO,EAAE,kBAAkB,mBAAoB,IAAG,CAAE;IAC7D,GAAI,KAAK,iBACL,EAAE,gBAAgB,KAAK,eAAgB,IACvC,CAAE;GACP;EACF,GACD;GACE,YAAY,OAAO;GACnB,SAAS,OAAO;GAChB,eAAe,OAAO;EACvB,EACF;EACD,KAAK,cAAc,OAAO,eAAe,KAAK;CAC/C;CAED,iBACEC,eACAC,aACAC,gBACM;AACN,MAAI,CAAC,KAAK,OAAQ;EAClB,KAAK,SAAS,IAAIH,0CAChB,KAAK,QACL,oCACA,eACA,aACA,eACD;CACF;CAED,IAAI,uBAAgC;AAClC,SAAO,OAAO,KAAK,uCAAuC,YACtD,CAAC,KAAK,qCACN,KAAK;CACV;CAED,IAAI,8BAAuC;AAIzC,MAAI,KAAK,UAAU,qBACjB,QAAO;WACE,KAAK,MAAM,WAAW,oBAAoB,CACnD,QAAO;WACE,KAAK,MAAM,WAAW,wBAAwB,CACvD,QAAO;WACE,KAAK,UAAU,aAExB,QAAO;AAET,SAAO;CACR;CAED,YAAYI,SAAqD;AAC/D,SAAO;GACL,aAAa;GACb,eAAe,KAAK;GACpB,eAAe;GACf,gBAAgB,KAAK,OAAO,iBAAiB;GAC7C,eAAe,KAAK,OAAO,iBAAiB;GAC5C,SAAS,QAAQ;EAClB;CACF;CAED,oBAAoB;AAClB,SAAO,CAAE;CACV;CAED,WAAW;AACT,SAAO;CACR;CAED,AAAS,UACPC,OACAC,QAKA;AACA,SAAO,KAAK,WAAW;GACrB,OAAOC,kCAAoB,MAAM,EAAE;GACnC,GAAG;EACJ,EAAC;CACH;CAED,iBACEC,SAC0C;EAC1C,MAAM,iBAAiB,SAAS,OAAO,SACnCD,kCAAoB,QAAQ,OAAO;GACjC,YAAY,QAAQ;GACpB,sBAAsB,QAAQ;EAC/B,EAAC,GACF;AAEJ,MAAI,SAAS,gBAAgB;GAC3B,KAAK,OAAO,iBAAiB,iBAAiB,QAAQ;GACtD,KAAK,OAAO,iBAAiB,mBAAmB;EACjD,OAAM;GACL,KAAK,OAAO,iBAAiB,iBAAiB;GAC9C,KAAK,OAAO,iBAAiB,mBAAmB,KAAK,OACjD,qBACA;EACL;AAED,SAAO;GACL,GAAI,gBAAgB,QAAQ,EAAE,OAAO,eAAe,MAAO,IAAG,CAAE;GAChE,GAAI,gBAAgB,aAChB,EAAE,YAAY,eAAe,WAAY,IACzC,CAAE;EACP;CACF;CAED,MAAM,UACJE,UACAL,SACAM,YACqB;EACrB,QAAQ,QAAQ,gBAAgB;EAChC,MAAM,SAASC,4CACb,UACA,KAAK,oBACL,KAAK,sBACL,KAAK,MACN;EACD,IAAI,eAAe;AACnB,MAAI,OAAO,GAAG,SAAS,UAAU;GAC/B,MAAM,CAAC,kBAAkB,GAAG;GAC5B,KAAK,OAAO,oBAAoB;GAChC,eAAe,OAAO,MAAM,EAAE;EAC/B;EACD,MAAM,aAAa,KAAK,iBAAiB,QAAQ;AAGjD,MAAI,KAAK,WAAW;GAClB,MAAMC,aAAyB,CAAE;GACjC,MAAM,SAAS,KAAK,sBAAsB,UAAU,SAAS,WAAW;GACxE,MAAMC,cAAqC,CAAE;AAE7C,cAAW,MAAM,SAAS,QAAQ;IAChC,MAAM,QACH,MAAM,gBAAoC,cAAc;AAC3D,QAAI,YAAY,WAAW,QACzB,YAAY,SAAS;SAErB,YAAY,SAAS,YAAY,OAAO,OAAO,MAAM;GAExD;GACD,MAAM,cAAc,YAAY,OAC9B,CAAC,MAAgC,MAAM,OACxC;AAED,UAAO;IAAE;IAAa,WAAW,EAAE,qBAAqB,WAAY;GAAE;EACvE;EAED,MAAM,MAAM,MAAM,KAAK,oBAAoB;GACzC,GAAG;GACH,UAAU;EACX,EAAC;EAEF,IAAIC;AACJ,MAAI,mBAAmB,IAAI,UACzB,gBAAgBC,oCACd,IAAI,SAAS,eACb,KAAK,MACN;EAGH,MAAM,mBAAmBC,oDACvB,IAAI,UACJ,EACE,cACD,EACF;AAED,MAAI,iBAAiB,aAAa,SAAS,GACzC,MAAM,YAAY,kBAChB,iBAAiB,YAAY,IAAI,QAAQ,GAC1C;AAEH,SAAO;CACR;CAED,OAAO,sBACLP,UACAL,SACAM,YACqC;EACrC,MAAM,SAASC,4CACb,UACA,KAAK,oBACL,KAAK,sBACL,KAAK,MACN;EACD,IAAI,eAAe;AACnB,MAAI,OAAO,GAAG,SAAS,UAAU;GAC/B,MAAM,CAAC,kBAAkB,GAAG;GAC5B,KAAK,OAAO,oBAAoB;GAChC,eAAe,OAAO,MAAM,EAAE;EAC/B;EACD,MAAM,aAAa,KAAK,iBAAiB,QAAQ;EACjD,MAAM,UAAU;GACd,GAAG;GACH,UAAU;EACX;EACD,MAAM,SAAS,MAAM,KAAK,OAAO,gBAC/B,EAAE,QAAQ,SAAS,OAAQ,GAC3B,YAAY;GACV,MAAM,EAAE,kBAAQ,GAAG,MAAM,KAAK,OAAO,sBAAsB,SAAS,EAClE,QAAQ,SAAS,OAClB,EAAC;AACF,UAAOM;EACR,EACF;EAED,IAAIH;EAEJ,IAAI,uBAAuB;EAC3B,IAAI,2BAA2B;EAC/B,IAAI,sBAAsB;EAC1B,IAAI,QAAQ;AACZ,aAAW,MAAM,YAAY,QAAQ;AACnC,OAAI,QAAQ,QAAQ,QAClB;AAEF,OACE,mBAAmB,YACnB,SAAS,kBAAkB,UAC3B,KAAK,gBAAgB,SACrB,QAAQ,gBAAgB,OACxB;IACA,gBAAgBC,oCACd,SAAS,eACT,KAAK,MACN;IAID,MAAM,sBACJ,SAAS,cAAc,oBAAoB;IAC7C,cAAc,eAAe,KAAK,IAChC,GACA,sBAAsB,qBACvB;IACD,uBAAuB;IAEvB,MAAM,0BACJ,SAAS,cAAc,wBAAwB;IACjD,cAAc,gBAAgB,KAAK,IACjC,GACA,0BAA0B,yBAC3B;IACD,2BAA2B;IAE3B,MAAM,qBAAqB,SAAS,cAAc,mBAAmB;IACrE,cAAc,eAAe,KAAK,IAChC,GACA,qBAAqB,oBACtB;IACD,sBAAsB;GACvB;GAED,MAAM,QAAQG,2DAA4C,UAAU;IAClE;IACA;GACD,EAAC;GACF,SAAS;AACT,OAAI,CAAC,MACH;GAGF,MAAM;GACN,MAAM,YAAY,kBAAkB,MAAM,QAAQ,GAAG;EACtD;CACF;CAED,MAAM,oBACJC,SACAX,SACA;AACA,SAAO,KAAK,OAAO,gBACjB,EAAE,QAAQ,SAAS,OAAQ,GAC3B,YAAY;AACV,OAAI;AACF,WAAO,MAAM,KAAK,OAAO,gBAAgB,SAAS,EAChD,QAAQ,SAAS,OAClB,EAAC;GAEH,SAAQY,GAAQ;AAEf,QAAI,EAAE,SAAS,SAAS,kBAAkB,EACxC,EAAE,SAAS;AAEb,UAAM;GACP;EACF,EACF;CACF;;;;;;;;;;;;;;;;;;CAmBD,IAAI,UAAwB;AAC1B,SAAOC,yBAAS,KAAK,UAAU,CAAE;CAClC;CAwBD,qBAIEC,cAIAC,QAMI;EAEJ,MAAMC,SACJ;EACF,MAAM,OAAO,QAAQ;EACrB,MAAM,SAAS,QAAQ;EACvB,MAAM,aAAa,QAAQ;AAC3B,MAAI,WAAW,WACb,OAAM,IAAI,MACR,CAAC,mFAAmF,CAAC;EAIzF,IAAI;EACJ,IAAIC;AACJ,MAAI,WAAW,mBAAmB;GAChC,IAAI,eAAe,QAAQ;GAC3B,IAAIC;AACJ,4DAAuB,OAAO,EAAE;IAC9B,MAAM,aAAaC,+DAA+B,OAAO;IACzD,QAAQ,CACN,EACE,sBAAsB,CACpB;KACE,MAAM;KACN,aACE,WAAW,eAAe;KAC5B,YAAY;IACb,CACF,EACF,CACF;IACD,eAAe,IAAIC,2DAEjB;KACA,cAAc;KACd,SAAS;KACT,WAAW;IACZ;GACF,OAAM;IACL,IAAIC;AACJ,QACE,OAAO,OAAO,SAAS,YACvB,OAAO,OAAO,eAAe,YAC7B,OAAO,cAAc,MACrB;KACA,2BAA2B;KAC3B,yBAAyB,aAAaC,2DACpC,OAAO,WACR;KACD,eAAe,OAAO;IACvB,OACC,2BAA2B;KACzB,MAAM;KACN,aAAa,OAAO,eAAe;KACnC,YAAYA,2DACV,OACD;IACF;IAEH,QAAQ,CACN,EACE,sBAAsB,CAAC,wBAAyB,EACjD,CACF;IACD,eAAe,IAAIF,2DAA+C;KAChE,cAAc;KACd,SAAS;IACV;GACF;GACD,MAAM,KAAK,UAAU,MAAM,CAAC,WAAW,EACrC,sBAAsB,CAAC,YAAa,EACrC,EAAC;EACH,OAAM;GACL,MAAM,aAAaD,+DAA+B,OAAO;GACzD,MAAM,KAAK,WAAW,EACpB,gBAAgB,WACjB,EAAC;GACF,eAAe,IAAII;EACpB;AAED,MAAI,CAAC,WACH,QAAO,IAAI,KAAK,aAAa,CAAC,WAAW,EACvC,SAAS,yCACV,EAAC;EAGJ,MAAM,eAAeC,+CAAoB,OAAO,EAE9C,QAAQ,CAACC,OAAYC,aAAW,aAAa,OAAO,MAAM,KAAKA,SAAO,CACvE,EAAC;EACF,MAAM,aAAaF,+CAAoB,OAAO,EAC5C,QAAQ,MAAM,KACf,EAAC;EACF,MAAM,qBAAqB,aAAa,cAAc,EACpD,WAAW,CAAC,UAAW,EACxB,EAAC;AACF,SAAOG,4CAAiB,KAGtB,CACA,EACE,KAAK,IACN,GACD,kBACD,EAAC,CAAC,WAAW,EACZ,SAAS,2BACV,EAAC;CACH;AACF"}
1
+ {"version":3,"file":"chat_models.cjs","names":["BaseChatModel","GenerativeAI","convertToolsToGenAI","convertBaseMessagesToContent","convertUsageMetadata","mapGenerateContentResultToChatResult","convertResponseContentToChatGenerationChunk","PROFILES","schemaToGenerativeAIParameters","GoogleGenerativeAIToolsOutputParser","removeAdditionalProperties","JsonOutputParser","RunnablePassthrough","RunnableSequence"],"sources":["../src/chat_models.ts"],"sourcesContent":["import {\n GenerativeModel,\n GoogleGenerativeAI as GenerativeAI,\n FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,\n FunctionDeclaration as GenerativeAIFunctionDeclaration,\n type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema,\n GenerateContentRequest,\n SafetySetting,\n Part as GenerativeAIPart,\n ModelParams,\n RequestOptions,\n type CachedContent,\n Schema,\n} from \"@google/generative-ai\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n AIMessageChunk,\n BaseMessage,\n UsageMetadata,\n} from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, ChatResult } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport {\n BaseChatModel,\n type BaseChatModelCallOptions,\n type LangSmithParams,\n type BaseChatModelParams,\n} from \"@langchain/core/language_models/chat_models\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { NewTokenIndices } from \"@langchain/core/callbacks/base\";\nimport {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport {\n Runnable,\n RunnablePassthrough,\n RunnableSequence,\n} from \"@langchain/core/runnables\";\nimport {\n InferInteropZodOutput,\n InteropZodType,\n isInteropZodSchema,\n} from \"@langchain/core/utils/types\";\nimport {\n BaseLLMOutputParser,\n JsonOutputParser,\n} from \"@langchain/core/output_parsers\";\nimport {\n schemaToGenerativeAIParameters,\n removeAdditionalProperties,\n} from \"./utils/zod_to_genai_parameters.js\";\nimport {\n convertBaseMessagesToContent,\n convertResponseContentToChatGenerationChunk,\n convertUsageMetadata,\n mapGenerateContentResultToChatResult,\n} from \"./utils/common.js\";\nimport { GoogleGenerativeAIToolsOutputParser } from \"./output_parsers.js\";\nimport {\n GoogleGenerativeAIThinkingConfig,\n GoogleGenerativeAIToolType,\n} from \"./types.js\";\nimport { convertToolsToGenAI } from \"./utils/tools.js\";\nimport PROFILES from \"./profiles.js\";\n\ninterface TokenUsage {\n completionTokens?: number;\n promptTokens?: number;\n totalTokens?: number;\n}\n\nexport type BaseMessageExamplePair = {\n input: BaseMessage;\n output: BaseMessage;\n};\n\nexport interface GoogleGenerativeAIChatCallOptions extends BaseChatModelCallOptions {\n tools?: GoogleGenerativeAIToolType[];\n /**\n * Allowed functions to call when the mode is \"any\".\n * If empty, any one of the provided functions are called.\n */\n allowedFunctionNames?: string[];\n /**\n * Whether or not to include usage data, like token counts\n * in the streamed response chunks.\n * @default true\n */\n streamUsage?: boolean;\n\n /**\n * JSON schema to be returned by the model.\n */\n responseSchema?: Schema;\n}\n\n/**\n * An interface defining the input to the ChatGoogleGenerativeAI class.\n */\nexport interface GoogleGenerativeAIChatInput\n extends\n BaseChatModelParams,\n Pick<GoogleGenerativeAIChatCallOptions, \"streamUsage\"> {\n /**\n * Model Name to use\n *\n * Note: The format must follow the pattern - `{model}`\n */\n model: ModelParams[\"model\"];\n\n /**\n * Controls the randomness of the output.\n *\n * Values can range from [0.0,2.0], inclusive. A value closer to 2.0\n * will produce responses that are more varied and creative, while\n * a value closer to 0.0 will typically result in less surprising\n * responses from the model.\n *\n * Note: The default value varies by model\n */\n temperature?: number;\n\n /**\n * Maximum number of tokens to generate in the completion.\n */\n maxOutputTokens?: number;\n\n /**\n * Top-p changes how the model selects tokens for output.\n *\n * Tokens are selected from most probable to least until the sum\n * of their probabilities equals the top-p value.\n *\n * For example, if tokens A, B, and C have a probability of\n * .3, .2, and .1 and the top-p value is .5, then the model will\n * select either A or B as the next token (using temperature).\n *\n * Note: The default value varies by model\n */\n topP?: number;\n\n /**\n * Top-k changes how the model selects tokens for output.\n *\n * A top-k of 1 means the selected token is the most probable among\n * all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-k of 3 means that the next token is selected from\n * among the 3 most probable tokens (using temperature).\n *\n * Note: The default value varies by model\n */\n topK?: number;\n\n /**\n * The set of character sequences (up to 5) that will stop output generation.\n * If specified, the API will stop at the first appearance of a stop\n * sequence.\n *\n * Note: The stop sequence will not be included as part of the response.\n * Note: stopSequences is only supported for Gemini models\n */\n stopSequences?: string[];\n\n /**\n * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block\n * any prompts and responses that fail to meet the thresholds set by these settings. If there\n * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use\n * the default safety setting for that category.\n */\n safetySettings?: SafetySetting[];\n\n /**\n * Google API key to use\n */\n apiKey?: string;\n\n /**\n * Google API version to use\n */\n apiVersion?: string;\n\n /**\n * Google API base URL to use\n */\n baseUrl?: string;\n\n /**\n * Google API custom headers to use\n */\n customHeaders?: Record<string, string>;\n\n /** Whether to stream the results or not */\n streaming?: boolean;\n\n /**\n * Whether or not to force the model to respond with JSON.\n * Available for `gemini-1.5` models and later.\n * @default false\n */\n json?: boolean;\n\n /**\n * Whether or not model supports system instructions.\n * The following models support system instructions:\n * - All Gemini 1.5 Pro model versions\n * - All Gemini 1.5 Flash model versions\n * - Gemini 1.0 Pro version gemini-1.0-pro-002\n */\n convertSystemMessageToHumanContent?: boolean | undefined;\n\n /**\n * Optional. Config for thinking features. An error will be returned if this\n * field is set for models that don't support thinking.\n */\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n}\n\n/**\n * Google Generative AI chat model integration.\n *\n * Setup:\n * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`.\n *\n * ```bash\n * npm install @langchain/google-genai\n * export GOOGLE_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * stop: [\"\\n\"],\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatGoogleGenerativeAI } from '@langchain/google-genai';\n *\n * const llm = new ChatGoogleGenerativeAI({\n * model: \"gemini-1.5-flash\",\n * temperature: 0,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"response_metadata\": {\n * \"finishReason\": \"STOP\",\n * \"index\": 0,\n * \"safetyRatings\": [\n * {\n * \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HARASSMENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * }\n * ]\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 149,\n * \"total_tokens\": 159\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There\",\n * \"response_metadata\": {\n * \"index\": 0\n * }\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 1,\n * \"total_tokens\": 11\n * }\n * }\n * AIMessageChunk {\n * \"content\": \" are a few ways to translate \\\"I love programming\\\" into French, depending on\",\n * }\n * AIMessageChunk {\n * \"content\": \" the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n\",\n * }\n * AIMessageChunk {\n * \"content\": \"* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This\",\n * }\n * AIMessageChunk {\n * \"content\": \" is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More\",\n * }\n * AIMessageChunk {\n * \"content\": \" specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and\",\n * }\n * AIMessageChunk {\n * \"content\": \" your intended audience. \\n\",\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 277,\n * \"total_tokens\": 287\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don\\\\'t cats play poker?\",\n * punchline: \"Why don\\\\'t cats play poker? Because they always have an ace up their sleeve!\"\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 10, output_tokens: 149, total_tokens: 159 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * finishReason: 'STOP',\n * index: 0,\n * safetyRatings: [\n * {\n * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n * probability: 'NEGLIGIBLE'\n * },\n * {\n * category: 'HARM_CATEGORY_HATE_SPEECH',\n * probability: 'NEGLIGIBLE'\n * },\n * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },\n * {\n * category: 'HARM_CATEGORY_DANGEROUS_CONTENT',\n * probability: 'NEGLIGIBLE'\n * }\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Document Messages</strong></summary>\n *\n * This example will show you how to pass documents such as PDFs to Google\n * Generative AI through messages.\n *\n * ```typescript\n * const pdfPath = \"/Users/my_user/Downloads/invoice.pdf\";\n * const pdfBase64 = await fs.readFile(pdfPath, \"base64\");\n *\n * const response = await llm.invoke([\n * [\"system\", \"Use the provided documents to answer the question\"],\n * [\n * \"user\",\n * [\n * {\n * type: \"application/pdf\", // If the `type` field includes a single slash (`/`), it will be treated as inline data.\n * data: pdfBase64,\n * },\n * {\n * type: \"text\",\n * text: \"Summarize the contents of this PDF\",\n * },\n * ],\n * ],\n * ]);\n *\n * console.log(response.content);\n * ```\n *\n * ```txt\n * This is a billing invoice from Twitter Developers for X API Basic Access. The transaction date is January 7, 2025,\n * and the amount is $194.34, which has been paid. The subscription period is from January 7, 2025 21:02 to February 7, 2025 00:00 (UTC).\n * The tax is $0.00, with a tax rate of 0%. The total amount is $194.34. The payment was made using a Visa card ending in 7022,\n * expiring in 12/2026. The billing address is Brace Sproul, 1234 Main Street, San Francisco, CA, US 94103. The company being billed is\n * X Corp, located at 865 FM 1209 Building 2, Bastrop, TX, US 78602. Terms and conditions apply.\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatGoogleGenerativeAI\n extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk>\n implements GoogleGenerativeAIChatInput\n{\n static lc_name() {\n return \"ChatGoogleGenerativeAI\";\n }\n\n lc_serializable = true;\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"GOOGLE_API_KEY\",\n };\n }\n\n lc_namespace = [\"langchain\", \"chat_models\", \"google_genai\"];\n\n get lc_aliases() {\n return {\n apiKey: \"google_api_key\",\n };\n }\n\n model: string;\n\n temperature?: number; // default value chosen based on model\n\n maxOutputTokens?: number;\n\n topP?: number; // default value chosen based on model\n\n topK?: number; // default value chosen based on model\n\n stopSequences: string[] = [];\n\n safetySettings?: SafetySetting[];\n\n apiKey?: string;\n\n streaming = false;\n\n json?: boolean;\n\n streamUsage = true;\n\n convertSystemMessageToHumanContent: boolean | undefined;\n\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n\n private client: GenerativeModel;\n\n get _isMultimodalModel() {\n return (\n this.model.includes(\"vision\") ||\n this.model.startsWith(\"gemini-1.5\") ||\n this.model.startsWith(\"gemini-2\") ||\n (this.model.startsWith(\"gemma-3-\") &&\n !this.model.startsWith(\"gemma-3-1b\")) || // gemma-3 models are multimodal(but gemma-3n-* and gemma-3-1b are not)\n this.model.startsWith(\"gemini-3\")\n );\n }\n\n constructor(\n model: ModelParams[\"model\"],\n fields?: Omit<GoogleGenerativeAIChatInput, \"model\">\n );\n constructor(fields: GoogleGenerativeAIChatInput);\n constructor(\n modelOrFields: ModelParams[\"model\"] | GoogleGenerativeAIChatInput,\n fieldsArg?: Omit<GoogleGenerativeAIChatInput, \"model\">\n ) {\n const fields =\n typeof modelOrFields === \"string\"\n ? { ...(fieldsArg ?? {}), model: modelOrFields }\n : modelOrFields;\n super(fields);\n\n this.model = fields.model.replace(/^models\\//, \"\");\n\n this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;\n\n if (this.maxOutputTokens && this.maxOutputTokens < 0) {\n throw new Error(\"`maxOutputTokens` must be a positive integer\");\n }\n\n this.temperature = fields.temperature ?? this.temperature;\n if (this.temperature && (this.temperature < 0 || this.temperature > 2)) {\n throw new Error(\"`temperature` must be in the range of [0.0,2.0]\");\n }\n\n this.topP = fields.topP ?? this.topP;\n if (this.topP && this.topP < 0) {\n throw new Error(\"`topP` must be a positive integer\");\n }\n\n if (this.topP && this.topP > 1) {\n throw new Error(\"`topP` must be below 1.\");\n }\n\n this.topK = fields.topK ?? this.topK;\n if (this.topK && this.topK < 0) {\n throw new Error(\"`topK` must be a positive integer\");\n }\n\n this.stopSequences = fields.stopSequences ?? this.stopSequences;\n\n this.apiKey = fields.apiKey ?? getEnvironmentVariable(\"GOOGLE_API_KEY\");\n if (!this.apiKey) {\n throw new Error(\n \"Please set an API key for Google GenerativeAI \" +\n \"in the environment variable GOOGLE_API_KEY \" +\n \"or in the `apiKey` field of the \" +\n \"ChatGoogleGenerativeAI constructor\"\n );\n }\n\n this.safetySettings = fields.safetySettings ?? this.safetySettings;\n if (this.safetySettings && this.safetySettings.length > 0) {\n const safetySettingsSet = new Set(\n this.safetySettings.map((s) => s.category)\n );\n if (safetySettingsSet.size !== this.safetySettings.length) {\n throw new Error(\n \"The categories in `safetySettings` array must be unique\"\n );\n }\n }\n\n this.streaming = fields.streaming ?? this.streaming;\n this.json = fields.json;\n\n this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;\n\n this.client = new GenerativeAI(this.apiKey).getGenerativeModel(\n {\n model: this.model,\n safetySettings: this.safetySettings as SafetySetting[],\n generationConfig: {\n stopSequences: this.stopSequences,\n maxOutputTokens: this.maxOutputTokens,\n temperature: this.temperature,\n topP: this.topP,\n topK: this.topK,\n ...(this.json ? { responseMimeType: \"application/json\" } : {}),\n ...(this.thinkingConfig\n ? { thinkingConfig: this.thinkingConfig }\n : {}),\n },\n },\n {\n apiVersion: fields.apiVersion,\n baseUrl: fields.baseUrl,\n customHeaders: fields.customHeaders,\n }\n );\n this.streamUsage = fields.streamUsage ?? this.streamUsage;\n }\n\n useCachedContent(\n cachedContent: CachedContent,\n modelParams?: ModelParams,\n requestOptions?: RequestOptions\n ): void {\n if (!this.apiKey) return;\n this.client = new GenerativeAI(\n this.apiKey\n ).getGenerativeModelFromCachedContent(\n cachedContent,\n modelParams,\n requestOptions\n );\n }\n\n get useSystemInstruction(): boolean {\n return typeof this.convertSystemMessageToHumanContent === \"boolean\"\n ? !this.convertSystemMessageToHumanContent\n : this.computeUseSystemInstruction;\n }\n\n get computeUseSystemInstruction(): boolean {\n // This works on models from April 2024 and later\n // Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later\n // AI Studio: gemini-1.5-pro-latest\n if (this.model === \"gemini-1.0-pro-001\") {\n return false;\n } else if (this.model.startsWith(\"gemini-pro-vision\")) {\n return false;\n } else if (this.model.startsWith(\"gemini-1.0-pro-vision\")) {\n return false;\n } else if (this.model === \"gemini-pro\") {\n // on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001\n return false;\n }\n return true;\n }\n\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams {\n return {\n ls_provider: \"google_genai\",\n ls_model_name: this.model,\n ls_model_type: \"chat\",\n ls_temperature: this.client.generationConfig.temperature,\n ls_max_tokens: this.client.generationConfig.maxOutputTokens,\n ls_stop: options.stop,\n };\n }\n\n _combineLLMOutput() {\n return [];\n }\n\n _llmType() {\n return \"googlegenerativeai\";\n }\n\n override bindTools(\n tools: GoogleGenerativeAIToolType[],\n kwargs?: Partial<GoogleGenerativeAIChatCallOptions>\n ): Runnable<\n BaseLanguageModelInput,\n AIMessageChunk,\n GoogleGenerativeAIChatCallOptions\n > {\n return this.withConfig({\n tools: convertToolsToGenAI(tools)?.tools,\n ...kwargs,\n });\n }\n\n invocationParams(\n options?: this[\"ParsedCallOptions\"]\n ): Omit<GenerateContentRequest, \"contents\"> {\n const toolsAndConfig = options?.tools?.length\n ? convertToolsToGenAI(options.tools, {\n toolChoice: options.tool_choice,\n allowedFunctionNames: options.allowedFunctionNames,\n })\n : undefined;\n\n if (options?.responseSchema) {\n this.client.generationConfig.responseSchema = options.responseSchema;\n this.client.generationConfig.responseMimeType = \"application/json\";\n } else {\n this.client.generationConfig.responseSchema = undefined;\n this.client.generationConfig.responseMimeType = this.json\n ? \"application/json\"\n : undefined;\n }\n\n return {\n ...(toolsAndConfig?.tools ? { tools: toolsAndConfig.tools } : {}),\n ...(toolsAndConfig?.toolConfig\n ? { toolConfig: toolsAndConfig.toolConfig }\n : {}),\n };\n }\n\n async _generate(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n options.signal?.throwIfAborted();\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt[0].role === \"system\") {\n const [systemInstruction] = prompt;\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n\n // Handle streaming\n if (this.streaming) {\n const tokenUsage: TokenUsage = {};\n const stream = this._streamResponseChunks(messages, options, runManager);\n const finalChunks: ChatGenerationChunk[] = [];\n\n for await (const chunk of stream) {\n const index =\n (chunk.generationInfo as NewTokenIndices)?.completion ?? 0;\n if (finalChunks[index] === undefined) {\n finalChunks[index] = chunk;\n } else {\n finalChunks[index] = finalChunks[index].concat(chunk);\n }\n }\n const generations = finalChunks.filter(\n (c): c is ChatGenerationChunk => c !== undefined\n );\n\n return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };\n }\n\n const res = await this.completionWithRetry({\n ...parameters,\n contents: actualPrompt,\n });\n\n let usageMetadata: UsageMetadata | undefined;\n if (\"usageMetadata\" in res.response) {\n usageMetadata = convertUsageMetadata(\n res.response.usageMetadata,\n this.model\n );\n }\n\n const generationResult = mapGenerateContentResultToChatResult(\n res.response,\n {\n usageMetadata,\n }\n );\n // may not have generations in output if there was a refusal for safety reasons, malformed function call, etc.\n if (generationResult.generations?.length > 0) {\n await runManager?.handleLLMNewToken(\n generationResult.generations[0]?.text ?? \"\"\n );\n }\n return generationResult;\n }\n\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const prompt = convertBaseMessagesToContent(\n messages,\n this._isMultimodalModel,\n this.useSystemInstruction,\n this.model\n );\n let actualPrompt = prompt;\n if (prompt[0].role === \"system\") {\n const [systemInstruction] = prompt;\n this.client.systemInstruction = systemInstruction;\n actualPrompt = prompt.slice(1);\n }\n const parameters = this.invocationParams(options);\n const request = {\n ...parameters,\n contents: actualPrompt,\n };\n const stream = await this.caller.callWithOptions(\n { signal: options?.signal },\n async () => {\n const { stream } = await this.client.generateContentStream(request, {\n signal: options?.signal,\n });\n return stream;\n }\n );\n\n let usageMetadata: UsageMetadata | undefined;\n // Keep prior cumulative counts for calculating token deltas while streaming\n let prevPromptTokenCount = 0;\n let prevCandidatesTokenCount = 0;\n let prevTotalTokenCount = 0;\n let index = 0;\n for await (const response of stream) {\n if (options.signal?.aborted) {\n return;\n }\n if (\n \"usageMetadata\" in response &&\n response.usageMetadata !== undefined &&\n this.streamUsage !== false &&\n options.streamUsage !== false\n ) {\n usageMetadata = convertUsageMetadata(\n response.usageMetadata,\n this.model\n );\n\n // Under the hood, LangChain combines the prompt tokens. Google returns the updated\n // total each time, so we need to find the difference between the tokens.\n const newPromptTokenCount =\n response.usageMetadata.promptTokenCount ?? 0;\n usageMetadata.input_tokens = Math.max(\n 0,\n newPromptTokenCount - prevPromptTokenCount\n );\n prevPromptTokenCount = newPromptTokenCount;\n\n const newCandidatesTokenCount =\n response.usageMetadata.candidatesTokenCount ?? 0;\n usageMetadata.output_tokens = Math.max(\n 0,\n newCandidatesTokenCount - prevCandidatesTokenCount\n );\n prevCandidatesTokenCount = newCandidatesTokenCount;\n\n const newTotalTokenCount = response.usageMetadata.totalTokenCount ?? 0;\n usageMetadata.total_tokens = Math.max(\n 0,\n newTotalTokenCount - prevTotalTokenCount\n );\n prevTotalTokenCount = newTotalTokenCount;\n }\n\n const chunk = convertResponseContentToChatGenerationChunk(response, {\n usageMetadata,\n index,\n });\n index += 1;\n if (!chunk) {\n continue;\n }\n\n yield chunk;\n await runManager?.handleLLMNewToken(chunk.text ?? \"\");\n }\n }\n\n async completionWithRetry(\n request: string | GenerateContentRequest | (string | GenerativeAIPart)[],\n options?: this[\"ParsedCallOptions\"]\n ) {\n return this.caller.callWithOptions(\n { signal: options?.signal },\n async () => {\n try {\n return await this.client.generateContent(request, {\n signal: options?.signal,\n });\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } catch (e: any) {\n // TODO: Improve error handling\n if (e.message?.includes(\"400 Bad Request\")) {\n e.status = 400;\n }\n throw e;\n }\n }\n );\n }\n\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-flash\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 2000000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>,\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n > {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const schema: InteropZodType<RunOutput> | Record<string, any> =\n outputSchema;\n const name = config?.name;\n const method = config?.method;\n const includeRaw = config?.includeRaw;\n if (method === \"jsonMode\") {\n throw new Error(\n `ChatGoogleGenerativeAI only supports \"jsonSchema\" or \"functionCalling\" as a method.`\n );\n }\n\n let llm;\n let outputParser: BaseLLMOutputParser<RunOutput>;\n if (method === \"functionCalling\") {\n let functionName = name ?? \"extract\";\n let tools: GoogleGenerativeAIFunctionDeclarationsTool[];\n if (isInteropZodSchema(schema)) {\n const jsonSchema = schemaToGenerativeAIParameters(schema);\n tools = [\n {\n functionDeclarations: [\n {\n name: functionName,\n description:\n jsonSchema.description ?? \"A function available to call.\",\n parameters: jsonSchema as GenerativeAIFunctionDeclarationSchema,\n },\n ],\n },\n ];\n outputParser = new GoogleGenerativeAIToolsOutputParser<\n InferInteropZodOutput<typeof schema>\n >({\n returnSingle: true,\n keyName: functionName,\n zodSchema: schema,\n });\n } else {\n let geminiFunctionDefinition: GenerativeAIFunctionDeclaration;\n if (\n typeof schema.name === \"string\" &&\n typeof schema.parameters === \"object\" &&\n schema.parameters != null\n ) {\n geminiFunctionDefinition = schema as GenerativeAIFunctionDeclaration;\n geminiFunctionDefinition.parameters = removeAdditionalProperties(\n schema.parameters\n ) as GenerativeAIFunctionDeclarationSchema;\n functionName = schema.name;\n } else {\n geminiFunctionDefinition = {\n name: functionName,\n description: schema.description ?? \"\",\n parameters: removeAdditionalProperties(\n schema\n ) as GenerativeAIFunctionDeclarationSchema,\n };\n }\n tools = [\n {\n functionDeclarations: [geminiFunctionDefinition],\n },\n ];\n outputParser = new GoogleGenerativeAIToolsOutputParser<RunOutput>({\n returnSingle: true,\n keyName: functionName,\n });\n }\n llm = this.bindTools(tools).withConfig({\n allowedFunctionNames: [functionName],\n });\n } else {\n const jsonSchema = schemaToGenerativeAIParameters(schema);\n llm = this.withConfig({\n responseSchema: jsonSchema as Schema,\n });\n outputParser = new JsonOutputParser();\n }\n\n if (!includeRaw) {\n return llm.pipe(outputParser).withConfig({\n runName: \"ChatGoogleGenerativeAIStructuredOutput\",\n }) as Runnable<BaseLanguageModelInput, RunOutput>;\n }\n\n const parserAssign = RunnablePassthrough.assign({\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n parsed: (input: any, config) => outputParser.invoke(input.raw, config),\n });\n const parserNone = RunnablePassthrough.assign({\n parsed: () => null,\n });\n const parsedWithFallback = parserAssign.withFallbacks({\n fallbacks: [parserNone],\n });\n return RunnableSequence.from<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n >([\n {\n raw: llm,\n },\n parsedWithFallback,\n ]).withConfig({\n runName: \"StructuredOutputRunnable\",\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAilBA,IAAa,yBAAb,cACUA,0DAEV;CACE,OAAO,UAAU;AACf,SAAO;;CAGT,kBAAkB;CAElB,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,kBACT;;CAGH,eAAe;EAAC;EAAa;EAAe;EAAe;CAE3D,IAAI,aAAa;AACf,SAAO,EACL,QAAQ,kBACT;;CAGH;CAEA;CAEA;CAEA;CAEA;CAEA,gBAA0B,EAAE;CAE5B;CAEA;CAEA,YAAY;CAEZ;CAEA,cAAc;CAEd;CAEA;CAEA,AAAQ;CAER,IAAI,qBAAqB;AACvB,SACE,KAAK,MAAM,SAAS,SAAS,IAC7B,KAAK,MAAM,WAAW,aAAa,IACnC,KAAK,MAAM,WAAW,WAAW,IAChC,KAAK,MAAM,WAAW,WAAW,IAChC,CAAC,KAAK,MAAM,WAAW,aAAa,IACtC,KAAK,MAAM,WAAW,WAAW;;CASrC,YACE,eACA,WACA;EACA,MAAM,SACJ,OAAO,kBAAkB,WACrB;GAAE,GAAI,aAAa,EAAE;GAAG,OAAO;GAAe,GAC9C;AACN,QAAM,OAAO;AAEb,OAAK,QAAQ,OAAO,MAAM,QAAQ,aAAa,GAAG;AAElD,OAAK,kBAAkB,OAAO,mBAAmB,KAAK;AAEtD,MAAI,KAAK,mBAAmB,KAAK,kBAAkB,EACjD,OAAM,IAAI,MAAM,+CAA+C;AAGjE,OAAK,cAAc,OAAO,eAAe,KAAK;AAC9C,MAAI,KAAK,gBAAgB,KAAK,cAAc,KAAK,KAAK,cAAc,GAClE,OAAM,IAAI,MAAM,kDAAkD;AAGpE,OAAK,OAAO,OAAO,QAAQ,KAAK;AAChC,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM,oCAAoC;AAGtD,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM,0BAA0B;AAG5C,OAAK,OAAO,OAAO,QAAQ,KAAK;AAChC,MAAI,KAAK,QAAQ,KAAK,OAAO,EAC3B,OAAM,IAAI,MAAM,oCAAoC;AAGtD,OAAK,gBAAgB,OAAO,iBAAiB,KAAK;AAElD,OAAK,SAAS,OAAO,gEAAiC,iBAAiB;AACvE,MAAI,CAAC,KAAK,OACR,OAAM,IAAI,MACR,8JAID;AAGH,OAAK,iBAAiB,OAAO,kBAAkB,KAAK;AACpD,MAAI,KAAK,kBAAkB,KAAK,eAAe,SAAS,GAItD;OAH0B,IAAI,IAC5B,KAAK,eAAe,KAAK,MAAM,EAAE,SAAS,CAC3C,CACqB,SAAS,KAAK,eAAe,OACjD,OAAM,IAAI,MACR,0DACD;;AAIL,OAAK,YAAY,OAAO,aAAa,KAAK;AAC1C,OAAK,OAAO,OAAO;AAEnB,OAAK,iBAAiB,OAAO,kBAAkB,KAAK;AAEpD,OAAK,SAAS,IAAIC,yCAAa,KAAK,OAAO,CAAC,mBAC1C;GACE,OAAO,KAAK;GACZ,gBAAgB,KAAK;GACrB,kBAAkB;IAChB,eAAe,KAAK;IACpB,iBAAiB,KAAK;IACtB,aAAa,KAAK;IAClB,MAAM,KAAK;IACX,MAAM,KAAK;IACX,GAAI,KAAK,OAAO,EAAE,kBAAkB,oBAAoB,GAAG,EAAE;IAC7D,GAAI,KAAK,iBACL,EAAE,gBAAgB,KAAK,gBAAgB,GACvC,EAAE;IACP;GACF,EACD;GACE,YAAY,OAAO;GACnB,SAAS,OAAO;GAChB,eAAe,OAAO;GACvB,CACF;AACD,OAAK,cAAc,OAAO,eAAe,KAAK;;CAGhD,iBACE,eACA,aACA,gBACM;AACN,MAAI,CAAC,KAAK,OAAQ;AAClB,OAAK,SAAS,IAAIA,yCAChB,KAAK,OACN,CAAC,oCACA,eACA,aACA,eACD;;CAGH,IAAI,uBAAgC;AAClC,SAAO,OAAO,KAAK,uCAAuC,YACtD,CAAC,KAAK,qCACN,KAAK;;CAGX,IAAI,8BAAuC;AAIzC,MAAI,KAAK,UAAU,qBACjB,QAAO;WACE,KAAK,MAAM,WAAW,oBAAoB,CACnD,QAAO;WACE,KAAK,MAAM,WAAW,wBAAwB,CACvD,QAAO;WACE,KAAK,UAAU,aAExB,QAAO;AAET,SAAO;;CAGT,YAAY,SAAqD;AAC/D,SAAO;GACL,aAAa;GACb,eAAe,KAAK;GACpB,eAAe;GACf,gBAAgB,KAAK,OAAO,iBAAiB;GAC7C,eAAe,KAAK,OAAO,iBAAiB;GAC5C,SAAS,QAAQ;GAClB;;CAGH,oBAAoB;AAClB,SAAO,EAAE;;CAGX,WAAW;AACT,SAAO;;CAGT,AAAS,UACP,OACA,QAKA;AACA,SAAO,KAAK,WAAW;GACrB,OAAOC,kCAAoB,MAAM,EAAE;GACnC,GAAG;GACJ,CAAC;;CAGJ,iBACE,SAC0C;EAC1C,MAAM,iBAAiB,SAAS,OAAO,SACnCA,kCAAoB,QAAQ,OAAO;GACjC,YAAY,QAAQ;GACpB,sBAAsB,QAAQ;GAC/B,CAAC,GACF;AAEJ,MAAI,SAAS,gBAAgB;AAC3B,QAAK,OAAO,iBAAiB,iBAAiB,QAAQ;AACtD,QAAK,OAAO,iBAAiB,mBAAmB;SAC3C;AACL,QAAK,OAAO,iBAAiB,iBAAiB;AAC9C,QAAK,OAAO,iBAAiB,mBAAmB,KAAK,OACjD,qBACA;;AAGN,SAAO;GACL,GAAI,gBAAgB,QAAQ,EAAE,OAAO,eAAe,OAAO,GAAG,EAAE;GAChE,GAAI,gBAAgB,aAChB,EAAE,YAAY,eAAe,YAAY,GACzC,EAAE;GACP;;CAGH,MAAM,UACJ,UACA,SACA,YACqB;AACrB,UAAQ,QAAQ,gBAAgB;EAChC,MAAM,SAASC,4CACb,UACA,KAAK,oBACL,KAAK,sBACL,KAAK,MACN;EACD,IAAI,eAAe;AACnB,MAAI,OAAO,GAAG,SAAS,UAAU;GAC/B,MAAM,CAAC,qBAAqB;AAC5B,QAAK,OAAO,oBAAoB;AAChC,kBAAe,OAAO,MAAM,EAAE;;EAEhC,MAAM,aAAa,KAAK,iBAAiB,QAAQ;AAGjD,MAAI,KAAK,WAAW;GAClB,MAAM,aAAyB,EAAE;GACjC,MAAM,SAAS,KAAK,sBAAsB,UAAU,SAAS,WAAW;GACxE,MAAM,cAAqC,EAAE;AAE7C,cAAW,MAAM,SAAS,QAAQ;IAChC,MAAM,QACH,MAAM,gBAAoC,cAAc;AAC3D,QAAI,YAAY,WAAW,OACzB,aAAY,SAAS;QAErB,aAAY,SAAS,YAAY,OAAO,OAAO,MAAM;;AAOzD,UAAO;IAAE,aAJW,YAAY,QAC7B,MAAgC,MAAM,OACxC;IAEqB,WAAW,EAAE,qBAAqB,YAAY;IAAE;;EAGxE,MAAM,MAAM,MAAM,KAAK,oBAAoB;GACzC,GAAG;GACH,UAAU;GACX,CAAC;EAEF,IAAI;AACJ,MAAI,mBAAmB,IAAI,SACzB,iBAAgBC,oCACd,IAAI,SAAS,eACb,KAAK,MACN;EAGH,MAAM,mBAAmBC,oDACvB,IAAI,UACJ,EACE,eACD,CACF;AAED,MAAI,iBAAiB,aAAa,SAAS,EACzC,OAAM,YAAY,kBAChB,iBAAiB,YAAY,IAAI,QAAQ,GAC1C;AAEH,SAAO;;CAGT,OAAO,sBACL,UACA,SACA,YACqC;EACrC,MAAM,SAASF,4CACb,UACA,KAAK,oBACL,KAAK,sBACL,KAAK,MACN;EACD,IAAI,eAAe;AACnB,MAAI,OAAO,GAAG,SAAS,UAAU;GAC/B,MAAM,CAAC,qBAAqB;AAC5B,QAAK,OAAO,oBAAoB;AAChC,kBAAe,OAAO,MAAM,EAAE;;EAGhC,MAAM,UAAU;GACd,GAFiB,KAAK,iBAAiB,QAAQ;GAG/C,UAAU;GACX;EACD,MAAM,SAAS,MAAM,KAAK,OAAO,gBAC/B,EAAE,QAAQ,SAAS,QAAQ,EAC3B,YAAY;GACV,MAAM,EAAE,WAAW,MAAM,KAAK,OAAO,sBAAsB,SAAS,EAClE,QAAQ,SAAS,QAClB,CAAC;AACF,UAAO;IAEV;EAED,IAAI;EAEJ,IAAI,uBAAuB;EAC3B,IAAI,2BAA2B;EAC/B,IAAI,sBAAsB;EAC1B,IAAI,QAAQ;AACZ,aAAW,MAAM,YAAY,QAAQ;AACnC,OAAI,QAAQ,QAAQ,QAClB;AAEF,OACE,mBAAmB,YACnB,SAAS,kBAAkB,UAC3B,KAAK,gBAAgB,SACrB,QAAQ,gBAAgB,OACxB;AACA,oBAAgBC,oCACd,SAAS,eACT,KAAK,MACN;IAID,MAAM,sBACJ,SAAS,cAAc,oBAAoB;AAC7C,kBAAc,eAAe,KAAK,IAChC,GACA,sBAAsB,qBACvB;AACD,2BAAuB;IAEvB,MAAM,0BACJ,SAAS,cAAc,wBAAwB;AACjD,kBAAc,gBAAgB,KAAK,IACjC,GACA,0BAA0B,yBAC3B;AACD,+BAA2B;IAE3B,MAAM,qBAAqB,SAAS,cAAc,mBAAmB;AACrE,kBAAc,eAAe,KAAK,IAChC,GACA,qBAAqB,oBACtB;AACD,0BAAsB;;GAGxB,MAAM,QAAQE,2DAA4C,UAAU;IAClE;IACA;IACD,CAAC;AACF,YAAS;AACT,OAAI,CAAC,MACH;AAGF,SAAM;AACN,SAAM,YAAY,kBAAkB,MAAM,QAAQ,GAAG;;;CAIzD,MAAM,oBACJ,SACA,SACA;AACA,SAAO,KAAK,OAAO,gBACjB,EAAE,QAAQ,SAAS,QAAQ,EAC3B,YAAY;AACV,OAAI;AACF,WAAO,MAAM,KAAK,OAAO,gBAAgB,SAAS,EAChD,QAAQ,SAAS,QAClB,CAAC;YAEK,GAAQ;AAEf,QAAI,EAAE,SAAS,SAAS,kBAAkB,CACxC,GAAE,SAAS;AAEb,UAAM;;IAGX;;;;;;;;;;;;;;;;;;;CAoBH,IAAI,UAAwB;AAC1B,SAAOC,yBAAS,KAAK,UAAU,EAAE;;CAyBnC,qBAIE,cAIA,QAMI;EAEJ,MAAM,SACJ;EACF,MAAM,OAAO,QAAQ;EACrB,MAAM,SAAS,QAAQ;EACvB,MAAM,aAAa,QAAQ;AAC3B,MAAI,WAAW,WACb,OAAM,IAAI,MACR,sFACD;EAGH,IAAI;EACJ,IAAI;AACJ,MAAI,WAAW,mBAAmB;GAChC,IAAI,eAAe,QAAQ;GAC3B,IAAI;AACJ,2DAAuB,OAAO,EAAE;IAC9B,MAAM,aAAaC,+DAA+B,OAAO;AACzD,YAAQ,CACN,EACE,sBAAsB,CACpB;KACE,MAAM;KACN,aACE,WAAW,eAAe;KAC5B,YAAY;KACb,CACF,EACF,CACF;AACD,mBAAe,IAAIC,2DAEjB;KACA,cAAc;KACd,SAAS;KACT,WAAW;KACZ,CAAC;UACG;IACL,IAAI;AACJ,QACE,OAAO,OAAO,SAAS,YACvB,OAAO,OAAO,eAAe,YAC7B,OAAO,cAAc,MACrB;AACA,gCAA2B;AAC3B,8BAAyB,aAAaC,2DACpC,OAAO,WACR;AACD,oBAAe,OAAO;UAEtB,4BAA2B;KACzB,MAAM;KACN,aAAa,OAAO,eAAe;KACnC,YAAYA,2DACV,OACD;KACF;AAEH,YAAQ,CACN,EACE,sBAAsB,CAAC,yBAAyB,EACjD,CACF;AACD,mBAAe,IAAID,2DAA+C;KAChE,cAAc;KACd,SAAS;KACV,CAAC;;AAEJ,SAAM,KAAK,UAAU,MAAM,CAAC,WAAW,EACrC,sBAAsB,CAAC,aAAa,EACrC,CAAC;SACG;GACL,MAAM,aAAaD,+DAA+B,OAAO;AACzD,SAAM,KAAK,WAAW,EACpB,gBAAgB,YACjB,CAAC;AACF,kBAAe,IAAIG,iDAAkB;;AAGvC,MAAI,CAAC,WACH,QAAO,IAAI,KAAK,aAAa,CAAC,WAAW,EACvC,SAAS,0CACV,CAAC;EAGJ,MAAM,eAAeC,8CAAoB,OAAO,EAE9C,SAAS,OAAY,WAAW,aAAa,OAAO,MAAM,KAAK,OAAO,EACvE,CAAC;EACF,MAAM,aAAaA,8CAAoB,OAAO,EAC5C,cAAc,MACf,CAAC;EACF,MAAM,qBAAqB,aAAa,cAAc,EACpD,WAAW,CAAC,WAAW,EACxB,CAAC;AACF,SAAOC,2CAAiB,KAGtB,CACA,EACE,KAAK,KACN,EACD,mBACD,CAAC,CAAC,WAAW,EACZ,SAAS,4BACV,CAAC"}
@@ -42,7 +42,7 @@ interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGe
42
42
  *
43
43
  * Note: The format must follow the pattern - `{model}`
44
44
  */
45
- model: string;
45
+ model: ModelParams["model"];
46
46
  /**
47
47
  * Controls the randomness of the output.
48
48
  *
@@ -536,6 +536,7 @@ declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAICha
536
536
  thinkingConfig?: GoogleGenerativeAIThinkingConfig;
537
537
  private client;
538
538
  get _isMultimodalModel(): boolean;
539
+ constructor(model: ModelParams["model"], fields?: Omit<GoogleGenerativeAIChatInput, "model">);
539
540
  constructor(fields: GoogleGenerativeAIChatInput);
540
541
  useCachedContent(cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions): void;
541
542
  get useSystemInstruction(): boolean;
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.d.cts","names":["GenerateContentRequest","SafetySetting","Part","GenerativeAIPart","ModelParams","RequestOptions","CachedContent","Schema","CallbackManagerForLLMRun","AIMessageChunk","BaseMessage","ChatGenerationChunk","ChatResult","BaseChatModel","BaseChatModelCallOptions","LangSmithParams","BaseChatModelParams","ModelProfile","BaseLanguageModelInput","StructuredOutputMethodOptions","Runnable","InteropZodType","GoogleGenerativeAIThinkingConfig","GoogleGenerativeAIToolType","BaseMessageExamplePair","GoogleGenerativeAIChatCallOptions","GoogleGenerativeAIChatInput","Record","Pick","ChatGoogleGenerativeAI","RunOutput","Partial","Omit","Promise","AsyncGenerator","_google_generative_ai0","GenerateContentResult"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { GenerateContentRequest, SafetySetting, Part as GenerativeAIPart, ModelParams, RequestOptions, type CachedContent, Schema } from \"@google/generative-ai\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { AIMessageChunk, BaseMessage } from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, ChatResult } from \"@langchain/core/outputs\";\nimport { BaseChatModel, type BaseChatModelCallOptions, type LangSmithParams, type BaseChatModelParams } from \"@langchain/core/language_models/chat_models\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { GoogleGenerativeAIThinkingConfig, GoogleGenerativeAIToolType } from \"./types.js\";\nexport type BaseMessageExamplePair = {\n input: BaseMessage;\n output: BaseMessage;\n};\nexport interface GoogleGenerativeAIChatCallOptions extends BaseChatModelCallOptions {\n tools?: GoogleGenerativeAIToolType[];\n /**\n * Allowed functions to call when the mode is \"any\".\n * If empty, any one of the provided functions are called.\n */\n allowedFunctionNames?: string[];\n /**\n * Whether or not to include usage data, like token counts\n * in the streamed response chunks.\n * @default true\n */\n streamUsage?: boolean;\n /**\n * JSON schema to be returned by the model.\n */\n responseSchema?: Schema;\n}\n/**\n * An interface defining the input to the ChatGoogleGenerativeAI class.\n */\nexport interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, \"streamUsage\"> {\n /**\n * Model Name to use\n *\n * Note: The format must follow the pattern - `{model}`\n */\n model: string;\n /**\n * Controls the randomness of the output.\n *\n * Values can range from [0.0,2.0], inclusive. A value closer to 2.0\n * will produce responses that are more varied and creative, while\n * a value closer to 0.0 will typically result in less surprising\n * responses from the model.\n *\n * Note: The default value varies by model\n */\n temperature?: number;\n /**\n * Maximum number of tokens to generate in the completion.\n */\n maxOutputTokens?: number;\n /**\n * Top-p changes how the model selects tokens for output.\n *\n * Tokens are selected from most probable to least until the sum\n * of their probabilities equals the top-p value.\n *\n * For example, if tokens A, B, and C have a probability of\n * .3, .2, and .1 and the top-p value is .5, then the model will\n * select either A or B as the next token (using temperature).\n *\n * Note: The default value varies by model\n */\n topP?: number;\n /**\n * Top-k changes how the model selects tokens for output.\n *\n * A top-k of 1 means the selected token is the most probable among\n * all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-k of 3 means that the next token is selected from\n * among the 3 most probable tokens (using temperature).\n *\n * Note: The default value varies by model\n */\n topK?: number;\n /**\n * The set of character sequences (up to 5) that will stop output generation.\n * If specified, the API will stop at the first appearance of a stop\n * sequence.\n *\n * Note: The stop sequence will not be included as part of the response.\n * Note: stopSequences is only supported for Gemini models\n */\n stopSequences?: string[];\n /**\n * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block\n * any prompts and responses that fail to meet the thresholds set by these settings. If there\n * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use\n * the default safety setting for that category.\n */\n safetySettings?: SafetySetting[];\n /**\n * Google API key to use\n */\n apiKey?: string;\n /**\n * Google API version to use\n */\n apiVersion?: string;\n /**\n * Google API base URL to use\n */\n baseUrl?: string;\n /**\n * Google API custom headers to use\n */\n customHeaders?: Record<string, string>;\n /** Whether to stream the results or not */\n streaming?: boolean;\n /**\n * Whether or not to force the model to respond with JSON.\n * Available for `gemini-1.5` models and later.\n * @default false\n */\n json?: boolean;\n /**\n * Whether or not model supports system instructions.\n * The following models support system instructions:\n * - All Gemini 1.5 Pro model versions\n * - All Gemini 1.5 Flash model versions\n * - Gemini 1.0 Pro version gemini-1.0-pro-002\n */\n convertSystemMessageToHumanContent?: boolean | undefined;\n /**\n * Optional. Config for thinking features. An error will be returned if this\n * field is set for models that don't support thinking.\n */\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n}\n/**\n * Google Generative AI chat model integration.\n *\n * Setup:\n * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`.\n *\n * ```bash\n * npm install @langchain/google-genai\n * export GOOGLE_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * stop: [\"\\n\"],\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatGoogleGenerativeAI } from '@langchain/google-genai';\n *\n * const llm = new ChatGoogleGenerativeAI({\n * model: \"gemini-1.5-flash\",\n * temperature: 0,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"response_metadata\": {\n * \"finishReason\": \"STOP\",\n * \"index\": 0,\n * \"safetyRatings\": [\n * {\n * \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HARASSMENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * }\n * ]\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 149,\n * \"total_tokens\": 159\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There\",\n * \"response_metadata\": {\n * \"index\": 0\n * }\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 1,\n * \"total_tokens\": 11\n * }\n * }\n * AIMessageChunk {\n * \"content\": \" are a few ways to translate \\\"I love programming\\\" into French, depending on\",\n * }\n * AIMessageChunk {\n * \"content\": \" the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n\",\n * }\n * AIMessageChunk {\n * \"content\": \"* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This\",\n * }\n * AIMessageChunk {\n * \"content\": \" is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More\",\n * }\n * AIMessageChunk {\n * \"content\": \" specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and\",\n * }\n * AIMessageChunk {\n * \"content\": \" your intended audience. \\n\",\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 277,\n * \"total_tokens\": 287\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don\\\\'t cats play poker?\",\n * punchline: \"Why don\\\\'t cats play poker? Because they always have an ace up their sleeve!\"\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 10, output_tokens: 149, total_tokens: 159 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * finishReason: 'STOP',\n * index: 0,\n * safetyRatings: [\n * {\n * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n * probability: 'NEGLIGIBLE'\n * },\n * {\n * category: 'HARM_CATEGORY_HATE_SPEECH',\n * probability: 'NEGLIGIBLE'\n * },\n * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },\n * {\n * category: 'HARM_CATEGORY_DANGEROUS_CONTENT',\n * probability: 'NEGLIGIBLE'\n * }\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Document Messages</strong></summary>\n *\n * This example will show you how to pass documents such as PDFs to Google\n * Generative AI through messages.\n *\n * ```typescript\n * const pdfPath = \"/Users/my_user/Downloads/invoice.pdf\";\n * const pdfBase64 = await fs.readFile(pdfPath, \"base64\");\n *\n * const response = await llm.invoke([\n * [\"system\", \"Use the provided documents to answer the question\"],\n * [\n * \"user\",\n * [\n * {\n * type: \"application/pdf\", // If the `type` field includes a single slash (`/`), it will be treated as inline data.\n * data: pdfBase64,\n * },\n * {\n * type: \"text\",\n * text: \"Summarize the contents of this PDF\",\n * },\n * ],\n * ],\n * ]);\n *\n * console.log(response.content);\n * ```\n *\n * ```txt\n * This is a billing invoice from Twitter Developers for X API Basic Access. The transaction date is January 7, 2025,\n * and the amount is $194.34, which has been paid. The subscription period is from January 7, 2025 21:02 to February 7, 2025 00:00 (UTC).\n * The tax is $0.00, with a tax rate of 0%. The total amount is $194.34. The payment was made using a Visa card ending in 7022,\n * expiring in 12/2026. The billing address is Brace Sproul, 1234 Main Street, San Francisco, CA, US 94103. The company being billed is\n * X Corp, located at 865 FM 1209 Building 2, Bastrop, TX, US 78602. Terms and conditions apply.\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk> implements GoogleGenerativeAIChatInput {\n static lc_name(): string;\n lc_serializable: boolean;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_namespace: string[];\n get lc_aliases(): {\n apiKey: string;\n };\n model: string;\n temperature?: number;\n maxOutputTokens?: number;\n topP?: number;\n topK?: number;\n stopSequences: string[];\n safetySettings?: SafetySetting[];\n apiKey?: string;\n streaming: boolean;\n json?: boolean;\n streamUsage: boolean;\n convertSystemMessageToHumanContent: boolean | undefined;\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n private client;\n get _isMultimodalModel(): boolean;\n constructor(fields: GoogleGenerativeAIChatInput);\n useCachedContent(cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions): void;\n get useSystemInstruction(): boolean;\n get computeUseSystemInstruction(): boolean;\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams;\n _combineLLMOutput(): never[];\n _llmType(): string;\n bindTools(tools: GoogleGenerativeAIToolType[], kwargs?: Partial<GoogleGenerativeAIChatCallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, GoogleGenerativeAIChatCallOptions>;\n invocationParams(options?: this[\"ParsedCallOptions\"]): Omit<GenerateContentRequest, \"contents\">;\n _generate(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _streamResponseChunks(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n completionWithRetry(request: string | GenerateContentRequest | (string | GenerativeAIPart)[], options?: this[\"ParsedCallOptions\"]): Promise<import(\"@google/generative-ai\").GenerateContentResult>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-flash\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 2000000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n//# sourceMappingURL=chat_models.d.ts.map"],"mappings":";;;;;;;;;;;;;KAUYwB,sBAAAA;SACDd;UACCA;AAFZ,CAAA;AAIiBe,UAAAA,iCAAAA,SAA0CX,wBAAT,CAAA;EACtCS,KAAAA,CAAAA,EAAAA,0BAAAA,EAAAA;EAeShB;;AAhB8D;AAqBnF;EAA+EkB,oBAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EA6D1DxB;;;;;EA7DyD,WAAA,CAAA,EAAA,OAAA;EA2dzD4B;;;EAgBA5B,cAAAA,CAAAA,EAhfAM,MAgfAN;;;;;AAU0EI,UArf9EqB,2BAAAA,SAAoCV,mBAqf0CX,EArfrBuB,IAqfqBvB,CArfhBoB,iCAqfgBpB,EAAAA,aAAAA,CAAAA,CAAAA;EAG1CU;;;;;EAGqFN,KAAAA,EAAAA,MAAAA;EAAgBgB;;;;;;;;;;EAGZd,WAAAA,CAAAA,EAAAA,MAAAA;EAAfuB;;;EAClCC,eAAAA,CAAAA,EAAAA,MAAmFC;EAAxCH;;;;;;;;;;;;EAoBvEN,IAAAA,CAAAA,EAAAA,MAAAA;EAAkDG;;;;;;;;;;EAxDwC,IAAA,CAAA,EAAA,MAAA;;;;;;;;;;;;;;;;mBA9ZtI7B;;;;;;;;;;;;;;;;kBAgBD0B;;;;;;;;;;;;;;;;;;;;;mBAqBCL;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAyXAO,sBAAAA,SAA+BhB,cAAcY,mCAAmChB,2BAA2BiB;;;;;;;;;;;;;;;;mBAgB3GzB;;;;;;mBAMAqB;;;sBAGGI;kCACYpB,6BAA6BF,8BAA8BC;;;mDAG1CU;;;mBAGhCQ,uCAAuCQ,QAAQN,qCAAqCL,SAASF,wBAAwBT,gBAAgBgB;yDAC/FO,KAAKhC;sBACxCU,gEAAgEF,2BAA2ByB,QAAQrB;kCACvFF,gEAAgEF,2BAA2B0B,eAAevB;wCACpGX,mCAAmCG,+CAA2D8B,QAA3CE,sBAAAA,CAAmFC,qBAAAA;;;;;;;;;;;;;;;;;;iBAkB7JnB;yCACwBU,sBAAsBA,mCAAmCN,eAAeS,aAAaH,8BAA8BR,uCAAuCC,SAASF,wBAAwBY;yCAC3LH,sBAAsBA,mCAAmCN,eAAeS,aAAaH,8BAA8BR,sCAAsCC,SAASF;SAChMR;YACGoB"}
1
+ {"version":3,"file":"chat_models.d.cts","names":[],"sources":["../src/chat_models.ts"],"mappings":";;;;;;;;;;;;;KAwEY,sBAAA;EACV,KAAA,EAAO,WAAA;EACP,MAAA,EAAQ,WAAA;AAAA;AAAA,UAGO,iCAAA,SAA0C,wBAAA;EACzD,KAAA,GAAQ,0BAAA;EAJW;;;;EASnB,oBAAA;EATmB;;AAGrB;;;EAYE,WAAA;EAKiB;;;EAAjB,cAAA,GAAiB,MAAA;AAAA;;;;UAMF,2BAAA,SAEb,mBAAA,EACA,IAAA,CAAK,iCAAA;EATP;;;;AAMF;EASE,KAAA,EAAO,WAAA;;;;;;;;;;;EAYP,WAAA;EAlBE;;;EAuBF,eAAA;EALA;;;;;;;;;;;;EAmBA,IAAA;EA4DA;;;;;;AAyYF;;;;EAzbE,IAAA;EA6diB;;;;;;;;EAndjB,aAAA;EAonBiD;;;;;;EA5mBjD,cAAA,GAAiB,aAAA;EAkoBd;;;EA7nBH,MAAA;EAuqBe;;;EAlqBf,UAAA;EAwuBe;;;EAnuBf,OAAA;EA8zBuD;;;EAzzBvD,aAAA,GAAgB,MAAA;EAs2BI;EAn2BpB,SAAA;EAs2BqB;;;;;EA/1BrB,IAAA;EAm2BG;;;;;;;EA11BH,kCAAA;EAq2B2C;;;;EA/1B3C,cAAA,GAAiB,gCAAA;AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cA0XN,sBAAA,SACH,aAAA,CAAc,iCAAA,EAAmC,cAAA,aAC9C,2BAAA;EAAA,OAEJ,OAAA,CAAA;EAIP,eAAA;EAAA,IAEI,UAAA,CAAA;IAAA,CAAiB,GAAA;EAAA;EAMrB,YAAA;EAAA,IAEI,UAAA,CAAA;;;EAMJ,KAAA;EAEA,WAAA;EAEA,eAAA;EAEA,IAAA;EAEA,IAAA;EAEA,aAAA;EAEA,cAAA,GAAiB,aAAA;EAEjB,MAAA;EAEA,SAAA;EAEA,IAAA;EAEA,WAAA;EAEA,kCAAA;EAEA,cAAA,GAAiB,gCAAA;EAAA,QAET,MAAA;EAAA,IAEJ,kBAAA,CAAA;EAWJ,WAAA,CACE,KAAA,EAAO,WAAA,WACP,MAAA,GAAS,IAAA,CAAK,2BAAA;EAEhB,WAAA,CAAY,MAAA,EAAQ,2BAAA;EA4FpB,gBAAA,CACE,aAAA,EAAe,aAAA,EACf,WAAA,GAAc,WAAA,EACd,cAAA,GAAiB,cAAA;EAAA,IAYf,oBAAA,CAAA;EAAA,IAMA,2BAAA,CAAA;EAiBJ,WAAA,CAAY,OAAA,8BAAqC,eAAA;EAWjD,iBAAA,CAAA;EAIA,QAAA,CAAA;EAIS,SAAA,CACP,KAAA,EAAO,0BAAA,IACP,MAAA,GAAS,OAAA,CAAQ,iCAAA,IAChB,QAAA,CACD,sBAAA,EACA,cAAA,EACA,iCAAA;EAQF,gBAAA,CACE,OAAA,+BACC,IAAA,CAAK,sBAAA;EA0BF,SAAA,CACJ,QAAA,EAAU,WAAA,IACV,OAAA,6BACA,UAAA,GAAa,wBAAA,GACZ,OAAA,CAAQ,UAAA;EAkEJ,qBAAA,CACL,QAAA,EAAU,WAAA,IACV,OAAA,6BACA,UAAA,GAAa,wBAAA,GACZ,cAAA,CAAe,mBAAA;EAyFZ,mBAAA,CACJ,OAAA,WAAkB,sBAAA,aAAmC,IAAA,KACrD,OAAA,+BAAmC,OAAA,CADkC,sBAAA,CAClC,qBAAA;;;;;;;;;;;;;;;;;;MAsCjC,OAAA,CAAA,GAAW,YAAA;EAIf,oBAAA,mBAEoB,MAAA,gBAAsB,MAAA,cAAA,CAExC,YAAA,EACI,cAAA,CAAe,SAAA,IAEf,MAAA,eACJ,MAAA,GAAS,6BAAA,UACR,QAAA,CAAS,sBAAA,EAAwB,SAAA;EAEpC,oBAAA,mBAEoB,MAAA,gBAAsB,MAAA,cAAA,CAExC,YAAA,EACI,cAAA,CAAe,SAAA,IAEf,MAAA,eACJ,MAAA,GAAS,6BAAA,SACR,QAAA,CAAS,sBAAA;IAA0B,GAAA,EAAK,WAAA;IAAa,MAAA,EAAQ,SAAA;EAAA;AAAA"}
@@ -42,7 +42,7 @@ interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGe
42
42
  *
43
43
  * Note: The format must follow the pattern - `{model}`
44
44
  */
45
- model: string;
45
+ model: ModelParams["model"];
46
46
  /**
47
47
  * Controls the randomness of the output.
48
48
  *
@@ -536,6 +536,7 @@ declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAICha
536
536
  thinkingConfig?: GoogleGenerativeAIThinkingConfig;
537
537
  private client;
538
538
  get _isMultimodalModel(): boolean;
539
+ constructor(model: ModelParams["model"], fields?: Omit<GoogleGenerativeAIChatInput, "model">);
539
540
  constructor(fields: GoogleGenerativeAIChatInput);
540
541
  useCachedContent(cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions): void;
541
542
  get useSystemInstruction(): boolean;
@@ -1 +1 @@
1
- {"version":3,"file":"chat_models.d.ts","names":["GenerateContentRequest","SafetySetting","Part","GenerativeAIPart","ModelParams","RequestOptions","CachedContent","Schema","CallbackManagerForLLMRun","AIMessageChunk","BaseMessage","ChatGenerationChunk","ChatResult","BaseChatModel","BaseChatModelCallOptions","LangSmithParams","BaseChatModelParams","ModelProfile","BaseLanguageModelInput","StructuredOutputMethodOptions","Runnable","InteropZodType","GoogleGenerativeAIThinkingConfig","GoogleGenerativeAIToolType","BaseMessageExamplePair","GoogleGenerativeAIChatCallOptions","GoogleGenerativeAIChatInput","Record","Pick","ChatGoogleGenerativeAI","RunOutput","Partial","Omit","Promise","AsyncGenerator","_google_generative_ai0","GenerateContentResult"],"sources":["../src/chat_models.d.ts"],"sourcesContent":["import { GenerateContentRequest, SafetySetting, Part as GenerativeAIPart, ModelParams, RequestOptions, type CachedContent, Schema } from \"@google/generative-ai\";\nimport { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport { AIMessageChunk, BaseMessage } from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, ChatResult } from \"@langchain/core/outputs\";\nimport { BaseChatModel, type BaseChatModelCallOptions, type LangSmithParams, type BaseChatModelParams } from \"@langchain/core/language_models/chat_models\";\nimport { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport { BaseLanguageModelInput, StructuredOutputMethodOptions } from \"@langchain/core/language_models/base\";\nimport { Runnable } from \"@langchain/core/runnables\";\nimport { InteropZodType } from \"@langchain/core/utils/types\";\nimport { GoogleGenerativeAIThinkingConfig, GoogleGenerativeAIToolType } from \"./types.js\";\nexport type BaseMessageExamplePair = {\n input: BaseMessage;\n output: BaseMessage;\n};\nexport interface GoogleGenerativeAIChatCallOptions extends BaseChatModelCallOptions {\n tools?: GoogleGenerativeAIToolType[];\n /**\n * Allowed functions to call when the mode is \"any\".\n * If empty, any one of the provided functions are called.\n */\n allowedFunctionNames?: string[];\n /**\n * Whether or not to include usage data, like token counts\n * in the streamed response chunks.\n * @default true\n */\n streamUsage?: boolean;\n /**\n * JSON schema to be returned by the model.\n */\n responseSchema?: Schema;\n}\n/**\n * An interface defining the input to the ChatGoogleGenerativeAI class.\n */\nexport interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, \"streamUsage\"> {\n /**\n * Model Name to use\n *\n * Note: The format must follow the pattern - `{model}`\n */\n model: string;\n /**\n * Controls the randomness of the output.\n *\n * Values can range from [0.0,2.0], inclusive. A value closer to 2.0\n * will produce responses that are more varied and creative, while\n * a value closer to 0.0 will typically result in less surprising\n * responses from the model.\n *\n * Note: The default value varies by model\n */\n temperature?: number;\n /**\n * Maximum number of tokens to generate in the completion.\n */\n maxOutputTokens?: number;\n /**\n * Top-p changes how the model selects tokens for output.\n *\n * Tokens are selected from most probable to least until the sum\n * of their probabilities equals the top-p value.\n *\n * For example, if tokens A, B, and C have a probability of\n * .3, .2, and .1 and the top-p value is .5, then the model will\n * select either A or B as the next token (using temperature).\n *\n * Note: The default value varies by model\n */\n topP?: number;\n /**\n * Top-k changes how the model selects tokens for output.\n *\n * A top-k of 1 means the selected token is the most probable among\n * all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-k of 3 means that the next token is selected from\n * among the 3 most probable tokens (using temperature).\n *\n * Note: The default value varies by model\n */\n topK?: number;\n /**\n * The set of character sequences (up to 5) that will stop output generation.\n * If specified, the API will stop at the first appearance of a stop\n * sequence.\n *\n * Note: The stop sequence will not be included as part of the response.\n * Note: stopSequences is only supported for Gemini models\n */\n stopSequences?: string[];\n /**\n * A list of unique `SafetySetting` instances for blocking unsafe content. The API will block\n * any prompts and responses that fail to meet the thresholds set by these settings. If there\n * is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use\n * the default safety setting for that category.\n */\n safetySettings?: SafetySetting[];\n /**\n * Google API key to use\n */\n apiKey?: string;\n /**\n * Google API version to use\n */\n apiVersion?: string;\n /**\n * Google API base URL to use\n */\n baseUrl?: string;\n /**\n * Google API custom headers to use\n */\n customHeaders?: Record<string, string>;\n /** Whether to stream the results or not */\n streaming?: boolean;\n /**\n * Whether or not to force the model to respond with JSON.\n * Available for `gemini-1.5` models and later.\n * @default false\n */\n json?: boolean;\n /**\n * Whether or not model supports system instructions.\n * The following models support system instructions:\n * - All Gemini 1.5 Pro model versions\n * - All Gemini 1.5 Flash model versions\n * - Gemini 1.0 Pro version gemini-1.0-pro-002\n */\n convertSystemMessageToHumanContent?: boolean | undefined;\n /**\n * Optional. Config for thinking features. An error will be returned if this\n * field is set for models that don't support thinking.\n */\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n}\n/**\n * Google Generative AI chat model integration.\n *\n * Setup:\n * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`.\n *\n * ```bash\n * npm install @langchain/google-genai\n * export GOOGLE_API_KEY=\"your-api-key\"\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // When calling `.withConfig`, call options should be passed via the first argument\n * const llmWithArgsBound = llm.withConfig({\n * stop: [\"\\n\"],\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * stop: [\"\\n\"],\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatGoogleGenerativeAI } from '@langchain/google-genai';\n *\n * const llm = new ChatGoogleGenerativeAI({\n * model: \"gemini-1.5-flash\",\n * temperature: 0,\n * maxRetries: 2,\n * // apiKey: \"...\",\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessage {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"response_metadata\": {\n * \"finishReason\": \"STOP\",\n * \"index\": 0,\n * \"safetyRatings\": [\n * {\n * \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_HARASSMENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * },\n * {\n * \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n * \"probability\": \"NEGLIGIBLE\"\n * }\n * ]\n * },\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 149,\n * \"total_tokens\": 159\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There\",\n * \"response_metadata\": {\n * \"index\": 0\n * }\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 1,\n * \"total_tokens\": 11\n * }\n * }\n * AIMessageChunk {\n * \"content\": \" are a few ways to translate \\\"I love programming\\\" into French, depending on\",\n * }\n * AIMessageChunk {\n * \"content\": \" the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n\",\n * }\n * AIMessageChunk {\n * \"content\": \"* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This\",\n * }\n * AIMessageChunk {\n * \"content\": \" is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More\",\n * }\n * AIMessageChunk {\n * \"content\": \" specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and\",\n * }\n * AIMessageChunk {\n * \"content\": \" your intended audience. \\n\",\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"There are a few ways to translate \\\"I love programming\\\" into French, depending on the level of formality and nuance you want to convey:\\n\\n**Formal:**\\n\\n* **J'aime la programmation.** (This is the most literal and formal translation.)\\n\\n**Informal:**\\n\\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\\n\\n**More specific:**\\n\\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\\n\\nThe best translation will depend on the context and your intended audience. \\n\",\n * \"usage_metadata\": {\n * \"input_tokens\": 10,\n * \"output_tokens\": 277,\n * \"total_tokens\": 287\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetWeather',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetWeather',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'Los Angeles, CA' },\n * type: 'tool_call'\n * },\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York, NY' },\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: \"Why don\\\\'t cats play poker?\",\n * punchline: \"Why don\\\\'t cats play poker? Because they always have an ace up their sleeve!\"\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Multimodal</strong></summary>\n *\n * ```typescript\n * import { HumanMessage } from '@langchain/core/messages';\n *\n * const imageUrl = \"https://example.com/image.jpg\";\n * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());\n * const base64Image = Buffer.from(imageData).toString('base64');\n *\n * const message = new HumanMessage({\n * content: [\n * { type: \"text\", text: \"describe the weather in this image\" },\n * {\n * type: \"image_url\",\n * image_url: { url: `data:image/jpeg;base64,${base64Image}` },\n * },\n * ]\n * });\n *\n * const imageDescriptionAiMsg = await llm.invoke([message]);\n * console.log(imageDescriptionAiMsg.content);\n * ```\n *\n * ```txt\n * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 10, output_tokens: 149, total_tokens: 159 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Response Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForResponseMetadata = await llm.invoke(input);\n * console.log(aiMsgForResponseMetadata.response_metadata);\n * ```\n *\n * ```txt\n * {\n * finishReason: 'STOP',\n * index: 0,\n * safetyRatings: [\n * {\n * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n * probability: 'NEGLIGIBLE'\n * },\n * {\n * category: 'HARM_CATEGORY_HATE_SPEECH',\n * probability: 'NEGLIGIBLE'\n * },\n * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },\n * {\n * category: 'HARM_CATEGORY_DANGEROUS_CONTENT',\n * probability: 'NEGLIGIBLE'\n * }\n * ]\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Document Messages</strong></summary>\n *\n * This example will show you how to pass documents such as PDFs to Google\n * Generative AI through messages.\n *\n * ```typescript\n * const pdfPath = \"/Users/my_user/Downloads/invoice.pdf\";\n * const pdfBase64 = await fs.readFile(pdfPath, \"base64\");\n *\n * const response = await llm.invoke([\n * [\"system\", \"Use the provided documents to answer the question\"],\n * [\n * \"user\",\n * [\n * {\n * type: \"application/pdf\", // If the `type` field includes a single slash (`/`), it will be treated as inline data.\n * data: pdfBase64,\n * },\n * {\n * type: \"text\",\n * text: \"Summarize the contents of this PDF\",\n * },\n * ],\n * ],\n * ]);\n *\n * console.log(response.content);\n * ```\n *\n * ```txt\n * This is a billing invoice from Twitter Developers for X API Basic Access. The transaction date is January 7, 2025,\n * and the amount is $194.34, which has been paid. The subscription period is from January 7, 2025 21:02 to February 7, 2025 00:00 (UTC).\n * The tax is $0.00, with a tax rate of 0%. The total amount is $194.34. The payment was made using a Visa card ending in 7022,\n * expiring in 12/2026. The billing address is Brace Sproul, 1234 Main Street, San Francisco, CA, US 94103. The company being billed is\n * X Corp, located at 865 FM 1209 Building 2, Bastrop, TX, US 78602. Terms and conditions apply.\n * ```\n * </details>\n *\n * <br />\n */\nexport declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk> implements GoogleGenerativeAIChatInput {\n static lc_name(): string;\n lc_serializable: boolean;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n lc_namespace: string[];\n get lc_aliases(): {\n apiKey: string;\n };\n model: string;\n temperature?: number;\n maxOutputTokens?: number;\n topP?: number;\n topK?: number;\n stopSequences: string[];\n safetySettings?: SafetySetting[];\n apiKey?: string;\n streaming: boolean;\n json?: boolean;\n streamUsage: boolean;\n convertSystemMessageToHumanContent: boolean | undefined;\n thinkingConfig?: GoogleGenerativeAIThinkingConfig;\n private client;\n get _isMultimodalModel(): boolean;\n constructor(fields: GoogleGenerativeAIChatInput);\n useCachedContent(cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions): void;\n get useSystemInstruction(): boolean;\n get computeUseSystemInstruction(): boolean;\n getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams;\n _combineLLMOutput(): never[];\n _llmType(): string;\n bindTools(tools: GoogleGenerativeAIToolType[], kwargs?: Partial<GoogleGenerativeAIChatCallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, GoogleGenerativeAIChatCallOptions>;\n invocationParams(options?: this[\"ParsedCallOptions\"]): Omit<GenerateContentRequest, \"contents\">;\n _generate(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;\n _streamResponseChunks(messages: BaseMessage[], options: this[\"ParsedCallOptions\"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;\n completionWithRetry(request: string | GenerateContentRequest | (string | GenerativeAIPart)[], options?: this[\"ParsedCallOptions\"]): Promise<import(\"@google/generative-ai\").GenerateContentResult>;\n /**\n * Return profiling information for the model.\n *\n * Provides information about the model's capabilities and constraints,\n * including token limits, multimodal support, and advanced features like\n * tool calling and structured output.\n *\n * @returns {ModelProfile} An object describing the model's capabilities and constraints\n *\n * @example\n * ```typescript\n * const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-flash\" });\n * const profile = model.profile;\n * console.log(profile.maxInputTokens); // 2000000\n * console.log(profile.imageInputs); // true\n * ```\n */\n get profile(): ModelProfile;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;\n withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {\n raw: BaseMessage;\n parsed: RunOutput;\n }>;\n}\n//# sourceMappingURL=chat_models.d.ts.map"],"mappings":";;;;;;;;;;;;;KAUYwB,sBAAAA;SACDd;UACCA;AAFZ,CAAA;AAIiBe,UAAAA,iCAAAA,SAA0CX,wBAAT,CAAA;EACtCS,KAAAA,CAAAA,EAAAA,0BAAAA,EAAAA;EAeShB;;AAhB8D;AAqBnF;EAA+EkB,oBAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EA6D1DxB;;;;;EA7DyD,WAAA,CAAA,EAAA,OAAA;EA2dzD4B;;;EAgBA5B,cAAAA,CAAAA,EAhfAM,MAgfAN;;;;;AAU0EI,UArf9EqB,2BAAAA,SAAoCV,mBAqf0CX,EArfrBuB,IAqfqBvB,CArfhBoB,iCAqfgBpB,EAAAA,aAAAA,CAAAA,CAAAA;EAG1CU;;;;;EAGqFN,KAAAA,EAAAA,MAAAA;EAAgBgB;;;;;;;;;;EAGZd,WAAAA,CAAAA,EAAAA,MAAAA;EAAfuB;;;EAClCC,eAAAA,CAAAA,EAAAA,MAAmFC;EAAxCH;;;;;;;;;;;;EAoBvEN,IAAAA,CAAAA,EAAAA,MAAAA;EAAkDG;;;;;;;;;;EAxDwC,IAAA,CAAA,EAAA,MAAA;;;;;;;;;;;;;;;;mBA9ZtI7B;;;;;;;;;;;;;;;;kBAgBD0B;;;;;;;;;;;;;;;;;;;;;mBAqBCL;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAyXAO,sBAAAA,SAA+BhB,cAAcY,mCAAmChB,2BAA2BiB;;;;;;;;;;;;;;;;mBAgB3GzB;;;;;;mBAMAqB;;;sBAGGI;kCACYpB,6BAA6BF,8BAA8BC;;;mDAG1CU;;;mBAGhCQ,uCAAuCQ,QAAQN,qCAAqCL,SAASF,wBAAwBT,gBAAgBgB;yDAC/FO,KAAKhC;sBACxCU,gEAAgEF,2BAA2ByB,QAAQrB;kCACvFF,gEAAgEF,2BAA2B0B,eAAevB;wCACpGX,mCAAmCG,+CAA2D8B,QAA3CE,sBAAAA,CAAmFC,qBAAAA;;;;;;;;;;;;;;;;;;iBAkB7JnB;yCACwBU,sBAAsBA,mCAAmCN,eAAeS,aAAaH,8BAA8BR,uCAAuCC,SAASF,wBAAwBY;yCAC3LH,sBAAsBA,mCAAmCN,eAAeS,aAAaH,8BAA8BR,sCAAsCC,SAASF;SAChMR;YACGoB"}
1
+ {"version":3,"file":"chat_models.d.ts","names":[],"sources":["../src/chat_models.ts"],"mappings":";;;;;;;;;;;;;KAwEY,sBAAA;EACV,KAAA,EAAO,WAAA;EACP,MAAA,EAAQ,WAAA;AAAA;AAAA,UAGO,iCAAA,SAA0C,wBAAA;EACzD,KAAA,GAAQ,0BAAA;EAJW;;;;EASnB,oBAAA;EATmB;;AAGrB;;;EAYE,WAAA;EAKiB;;;EAAjB,cAAA,GAAiB,MAAA;AAAA;;;;UAMF,2BAAA,SAEb,mBAAA,EACA,IAAA,CAAK,iCAAA;EATP;;;;AAMF;EASE,KAAA,EAAO,WAAA;;;;;;;;;;;EAYP,WAAA;EAlBE;;;EAuBF,eAAA;EALA;;;;;;;;;;;;EAmBA,IAAA;EA4DA;;;;;;AAyYF;;;;EAzbE,IAAA;EA6diB;;;;;;;;EAndjB,aAAA;EAonBiD;;;;;;EA5mBjD,cAAA,GAAiB,aAAA;EAkoBd;;;EA7nBH,MAAA;EAuqBe;;;EAlqBf,UAAA;EAwuBe;;;EAnuBf,OAAA;EA8zBuD;;;EAzzBvD,aAAA,GAAgB,MAAA;EAs2BI;EAn2BpB,SAAA;EAs2BqB;;;;;EA/1BrB,IAAA;EAm2BG;;;;;;;EA11BH,kCAAA;EAq2B2C;;;;EA/1B3C,cAAA,GAAiB,gCAAA;AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cA0XN,sBAAA,SACH,aAAA,CAAc,iCAAA,EAAmC,cAAA,aAC9C,2BAAA;EAAA,OAEJ,OAAA,CAAA;EAIP,eAAA;EAAA,IAEI,UAAA,CAAA;IAAA,CAAiB,GAAA;EAAA;EAMrB,YAAA;EAAA,IAEI,UAAA,CAAA;;;EAMJ,KAAA;EAEA,WAAA;EAEA,eAAA;EAEA,IAAA;EAEA,IAAA;EAEA,aAAA;EAEA,cAAA,GAAiB,aAAA;EAEjB,MAAA;EAEA,SAAA;EAEA,IAAA;EAEA,WAAA;EAEA,kCAAA;EAEA,cAAA,GAAiB,gCAAA;EAAA,QAET,MAAA;EAAA,IAEJ,kBAAA,CAAA;EAWJ,WAAA,CACE,KAAA,EAAO,WAAA,WACP,MAAA,GAAS,IAAA,CAAK,2BAAA;EAEhB,WAAA,CAAY,MAAA,EAAQ,2BAAA;EA4FpB,gBAAA,CACE,aAAA,EAAe,aAAA,EACf,WAAA,GAAc,WAAA,EACd,cAAA,GAAiB,cAAA;EAAA,IAYf,oBAAA,CAAA;EAAA,IAMA,2BAAA,CAAA;EAiBJ,WAAA,CAAY,OAAA,8BAAqC,eAAA;EAWjD,iBAAA,CAAA;EAIA,QAAA,CAAA;EAIS,SAAA,CACP,KAAA,EAAO,0BAAA,IACP,MAAA,GAAS,OAAA,CAAQ,iCAAA,IAChB,QAAA,CACD,sBAAA,EACA,cAAA,EACA,iCAAA;EAQF,gBAAA,CACE,OAAA,+BACC,IAAA,CAAK,sBAAA;EA0BF,SAAA,CACJ,QAAA,EAAU,WAAA,IACV,OAAA,6BACA,UAAA,GAAa,wBAAA,GACZ,OAAA,CAAQ,UAAA;EAkEJ,qBAAA,CACL,QAAA,EAAU,WAAA,IACV,OAAA,6BACA,UAAA,GAAa,wBAAA,GACZ,cAAA,CAAe,mBAAA;EAyFZ,mBAAA,CACJ,OAAA,WAAkB,sBAAA,aAAmC,IAAA,KACrD,OAAA,+BAAmC,OAAA,CADkC,sBAAA,CAClC,qBAAA;;;;;;;;;;;;;;;;;;MAsCjC,OAAA,CAAA,GAAW,YAAA;EAIf,oBAAA,mBAEoB,MAAA,gBAAsB,MAAA,cAAA,CAExC,YAAA,EACI,cAAA,CAAe,SAAA,IAEf,MAAA,eACJ,MAAA,GAAS,6BAAA,UACR,QAAA,CAAS,sBAAA,EAAwB,SAAA;EAEpC,oBAAA,mBAEoB,MAAA,gBAAsB,MAAA,cAAA,CAExC,YAAA,EACI,cAAA,CAAe,SAAA,IAEf,MAAA,eACJ,MAAA,GAAS,6BAAA,SACR,QAAA,CAAS,sBAAA;IAA0B,GAAA,EAAK,WAAA;IAAa,MAAA,EAAQ,SAAA;EAAA;AAAA"}