@langchain/google-common 0.2.18 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (173) hide show
  1. package/CHANGELOG.md +23 -0
  2. package/LICENSE +6 -6
  3. package/dist/_virtual/rolldown_runtime.cjs +25 -0
  4. package/dist/auth.cjs +82 -116
  5. package/dist/auth.cjs.map +1 -0
  6. package/dist/auth.d.cts +46 -0
  7. package/dist/auth.d.cts.map +1 -0
  8. package/dist/auth.d.ts +41 -36
  9. package/dist/auth.d.ts.map +1 -0
  10. package/dist/auth.js +80 -110
  11. package/dist/auth.js.map +1 -0
  12. package/dist/chat_models.cjs +264 -466
  13. package/dist/chat_models.cjs.map +1 -0
  14. package/dist/chat_models.d.cts +109 -0
  15. package/dist/chat_models.d.cts.map +1 -0
  16. package/dist/chat_models.d.ts +98 -73
  17. package/dist/chat_models.d.ts.map +1 -0
  18. package/dist/chat_models.js +258 -457
  19. package/dist/chat_models.js.map +1 -0
  20. package/dist/connection.cjs +321 -466
  21. package/dist/connection.cjs.map +1 -0
  22. package/dist/connection.d.cts +109 -0
  23. package/dist/connection.d.cts.map +1 -0
  24. package/dist/connection.d.ts +98 -91
  25. package/dist/connection.d.ts.map +1 -0
  26. package/dist/connection.js +317 -459
  27. package/dist/connection.js.map +1 -0
  28. package/dist/embeddings.cjs +135 -186
  29. package/dist/embeddings.cjs.map +1 -0
  30. package/dist/embeddings.d.cts +44 -0
  31. package/dist/embeddings.d.cts.map +1 -0
  32. package/dist/embeddings.d.ts +38 -32
  33. package/dist/embeddings.d.ts.map +1 -0
  34. package/dist/embeddings.js +133 -181
  35. package/dist/embeddings.js.map +1 -0
  36. package/dist/experimental/media.cjs +380 -482
  37. package/dist/experimental/media.cjs.map +1 -0
  38. package/dist/experimental/media.d.cts +198 -0
  39. package/dist/experimental/media.d.cts.map +1 -0
  40. package/dist/experimental/media.d.ts +190 -202
  41. package/dist/experimental/media.d.ts.map +1 -0
  42. package/dist/experimental/media.js +369 -468
  43. package/dist/experimental/media.js.map +1 -0
  44. package/dist/experimental/utils/media_core.cjs +403 -517
  45. package/dist/experimental/utils/media_core.cjs.map +1 -0
  46. package/dist/experimental/utils/media_core.d.cts +215 -0
  47. package/dist/experimental/utils/media_core.d.cts.map +1 -0
  48. package/dist/experimental/utils/media_core.d.ts +171 -165
  49. package/dist/experimental/utils/media_core.d.ts.map +1 -0
  50. package/dist/experimental/utils/media_core.js +395 -506
  51. package/dist/experimental/utils/media_core.js.map +1 -0
  52. package/dist/index.cjs +58 -27
  53. package/dist/index.d.cts +13 -0
  54. package/dist/index.d.ts +13 -11
  55. package/dist/index.js +13 -11
  56. package/dist/llms.cjs +157 -244
  57. package/dist/llms.cjs.map +1 -0
  58. package/dist/llms.d.cts +72 -0
  59. package/dist/llms.d.cts.map +1 -0
  60. package/dist/llms.d.ts +64 -54
  61. package/dist/llms.d.ts.map +1 -0
  62. package/dist/llms.js +154 -238
  63. package/dist/llms.js.map +1 -0
  64. package/dist/output_parsers.cjs +148 -173
  65. package/dist/output_parsers.cjs.map +1 -0
  66. package/dist/output_parsers.d.cts +53 -0
  67. package/dist/output_parsers.d.cts.map +1 -0
  68. package/dist/output_parsers.d.ts +46 -42
  69. package/dist/output_parsers.d.ts.map +1 -0
  70. package/dist/output_parsers.js +146 -168
  71. package/dist/output_parsers.js.map +1 -0
  72. package/dist/profiles.cjs +219 -0
  73. package/dist/profiles.cjs.map +1 -0
  74. package/dist/profiles.js +218 -0
  75. package/dist/profiles.js.map +1 -0
  76. package/dist/types-anthropic.d.cts +229 -0
  77. package/dist/types-anthropic.d.cts.map +1 -0
  78. package/dist/types-anthropic.d.ts +221 -215
  79. package/dist/types-anthropic.d.ts.map +1 -0
  80. package/dist/types.cjs +51 -62
  81. package/dist/types.cjs.map +1 -0
  82. package/dist/types.d.cts +748 -0
  83. package/dist/types.d.cts.map +1 -0
  84. package/dist/types.d.ts +669 -656
  85. package/dist/types.d.ts.map +1 -0
  86. package/dist/types.js +46 -45
  87. package/dist/types.js.map +1 -0
  88. package/dist/utils/anthropic.cjs +598 -821
  89. package/dist/utils/anthropic.cjs.map +1 -0
  90. package/dist/utils/anthropic.js +597 -818
  91. package/dist/utils/anthropic.js.map +1 -0
  92. package/dist/utils/common.cjs +130 -211
  93. package/dist/utils/common.cjs.map +1 -0
  94. package/dist/utils/common.d.cts +13 -0
  95. package/dist/utils/common.d.cts.map +1 -0
  96. package/dist/utils/common.d.ts +12 -7
  97. package/dist/utils/common.d.ts.map +1 -0
  98. package/dist/utils/common.js +128 -207
  99. package/dist/utils/common.js.map +1 -0
  100. package/dist/utils/failed_handler.cjs +28 -30
  101. package/dist/utils/failed_handler.cjs.map +1 -0
  102. package/dist/utils/failed_handler.d.cts +9 -0
  103. package/dist/utils/failed_handler.d.cts.map +1 -0
  104. package/dist/utils/failed_handler.d.ts +8 -2
  105. package/dist/utils/failed_handler.d.ts.map +1 -0
  106. package/dist/utils/failed_handler.js +28 -28
  107. package/dist/utils/failed_handler.js.map +1 -0
  108. package/dist/utils/gemini.cjs +1020 -1488
  109. package/dist/utils/gemini.cjs.map +1 -0
  110. package/dist/utils/gemini.d.cts +51 -0
  111. package/dist/utils/gemini.d.cts.map +1 -0
  112. package/dist/utils/gemini.d.ts +51 -48
  113. package/dist/utils/gemini.d.ts.map +1 -0
  114. package/dist/utils/gemini.js +1015 -1479
  115. package/dist/utils/gemini.js.map +1 -0
  116. package/dist/utils/index.cjs +38 -23
  117. package/dist/utils/index.d.cts +8 -0
  118. package/dist/utils/index.d.ts +8 -7
  119. package/dist/utils/index.js +8 -7
  120. package/dist/utils/palm.d.cts +11 -0
  121. package/dist/utils/palm.d.cts.map +1 -0
  122. package/dist/utils/palm.d.ts +9 -4
  123. package/dist/utils/palm.d.ts.map +1 -0
  124. package/dist/utils/safety.cjs +13 -22
  125. package/dist/utils/safety.cjs.map +1 -0
  126. package/dist/utils/safety.d.cts +12 -0
  127. package/dist/utils/safety.d.cts.map +1 -0
  128. package/dist/utils/safety.d.ts +10 -4
  129. package/dist/utils/safety.d.ts.map +1 -0
  130. package/dist/utils/safety.js +13 -19
  131. package/dist/utils/safety.js.map +1 -0
  132. package/dist/utils/stream.cjs +296 -475
  133. package/dist/utils/stream.cjs.map +1 -0
  134. package/dist/utils/stream.d.cts +165 -0
  135. package/dist/utils/stream.d.cts.map +1 -0
  136. package/dist/utils/stream.d.ts +156 -131
  137. package/dist/utils/stream.d.ts.map +1 -0
  138. package/dist/utils/stream.js +293 -469
  139. package/dist/utils/stream.js.map +1 -0
  140. package/dist/utils/zod_to_gemini_parameters.cjs +43 -81
  141. package/dist/utils/zod_to_gemini_parameters.cjs.map +1 -0
  142. package/dist/utils/zod_to_gemini_parameters.d.cts +22 -0
  143. package/dist/utils/zod_to_gemini_parameters.d.cts.map +1 -0
  144. package/dist/utils/zod_to_gemini_parameters.d.ts +21 -6
  145. package/dist/utils/zod_to_gemini_parameters.d.ts.map +1 -0
  146. package/dist/utils/zod_to_gemini_parameters.js +40 -76
  147. package/dist/utils/zod_to_gemini_parameters.js.map +1 -0
  148. package/package.json +72 -85
  149. package/dist/types-anthropic.cjs +0 -2
  150. package/dist/types-anthropic.js +0 -1
  151. package/dist/utils/anthropic.d.ts +0 -4
  152. package/dist/utils/palm.cjs +0 -2
  153. package/dist/utils/palm.js +0 -1
  154. package/experimental/media.cjs +0 -1
  155. package/experimental/media.d.cts +0 -1
  156. package/experimental/media.d.ts +0 -1
  157. package/experimental/media.js +0 -1
  158. package/experimental/utils/media_core.cjs +0 -1
  159. package/experimental/utils/media_core.d.cts +0 -1
  160. package/experimental/utils/media_core.d.ts +0 -1
  161. package/experimental/utils/media_core.js +0 -1
  162. package/index.cjs +0 -1
  163. package/index.d.cts +0 -1
  164. package/index.d.ts +0 -1
  165. package/index.js +0 -1
  166. package/types.cjs +0 -1
  167. package/types.d.cts +0 -1
  168. package/types.d.ts +0 -1
  169. package/types.js +0 -1
  170. package/utils.cjs +0 -1
  171. package/utils.d.cts +0 -1
  172. package/utils.d.ts +0 -1
  173. package/utils.js +0 -1
package/dist/llms.d.ts CHANGED
@@ -1,62 +1,72 @@
1
- import { Callbacks } from "@langchain/core/callbacks/manager";
2
- import { LLM } from "@langchain/core/language_models/llms";
3
- import { type BaseLanguageModelCallOptions, BaseLanguageModelInput } from "@langchain/core/language_models/base";
4
- import { BaseMessage, MessageContent } from "@langchain/core/messages";
5
- import { AbstractGoogleLLMConnection } from "./connection.js";
6
- import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GooglePlatformType, GeminiContent, GoogleAIResponseMimeType } from "./types.js";
1
+ import { GeminiContent, GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAIResponseMimeType, GoogleAISafetyHandler, GoogleAISafetySetting, GoogleBaseLLMInput, GooglePlatformType } from "./types.js";
7
2
  import { GoogleAbstractedClient } from "./auth.js";
3
+ import { AbstractGoogleLLMConnection } from "./connection.js";
8
4
  import { ChatGoogleBase } from "./chat_models.js";
9
- import type { GoogleBaseLLMInput, GoogleAISafetyHandler } from "./types.js";
10
- export { GoogleBaseLLMInput };
5
+ import { BaseMessage, MessageContent } from "@langchain/core/messages";
6
+ import { BaseLanguageModelCallOptions, BaseLanguageModelInput } from "@langchain/core/language_models/base";
7
+ import { Callbacks } from "@langchain/core/callbacks/manager";
8
+ import { LLM } from "@langchain/core/language_models/llms";
9
+
10
+ //#region src/llms.d.ts
11
11
  declare class GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<MessageContent, AuthOptions> {
12
- formatContents(input: MessageContent, _parameters: GoogleAIModelParams): Promise<GeminiContent[]>;
12
+ formatContents(input: MessageContent, _parameters: GoogleAIModelParams): Promise<GeminiContent[]>;
13
13
  }
14
14
  /**
15
15
  * Integration with an LLM.
16
16
  */
17
- export declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguageModelCallOptions> implements GoogleBaseLLMInput<AuthOptions> {
18
- static lc_name(): string;
19
- get lc_secrets(): {
20
- [key: string]: string;
21
- } | undefined;
22
- originalFields?: GoogleBaseLLMInput<AuthOptions>;
23
- lc_serializable: boolean;
24
- modelName: string;
25
- model: string;
26
- temperature: number;
27
- maxOutputTokens: number;
28
- topP: number;
29
- topK: number;
30
- stopSequences: string[];
31
- safetySettings: GoogleAISafetySetting[];
32
- safetyHandler: GoogleAISafetyHandler;
33
- responseMimeType: GoogleAIResponseMimeType;
34
- protected connection: GoogleLLMConnection<AuthOptions>;
35
- protected streamedConnection: GoogleLLMConnection<AuthOptions>;
36
- constructor(fields?: GoogleBaseLLMInput<AuthOptions>);
37
- abstract buildAbstractedClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
38
- buildApiKeyClient(apiKey: string): GoogleAbstractedClient;
39
- buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined;
40
- buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
41
- buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;
42
- get platform(): GooglePlatformType;
43
- _llmType(): string;
44
- formatPrompt(prompt: string): MessageContent;
45
- /**
46
- * For some given input string and options, return a string output.
47
- *
48
- * Despite the fact that `invoke` is overridden below, we still need this
49
- * in order to handle public APi calls to `generate()`.
50
- */
51
- _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
52
- _streamIterator(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): AsyncGenerator<string>;
53
- predictMessages(messages: BaseMessage[], options?: string[] | BaseLanguageModelCallOptions, _callbacks?: Callbacks): Promise<BaseMessage>;
54
- /**
55
- * Internal implementation detail to allow Google LLMs to support
56
- * multimodal input by delegating to the chat model implementation.
57
- *
58
- * TODO: Replace with something less hacky.
59
- */
60
- protected createProxyChat(): ChatGoogleBase<AuthOptions>;
61
- invoke(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): Promise<string>;
17
+ declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguageModelCallOptions> implements GoogleBaseLLMInput<AuthOptions> {
18
+ // Used for tracing, replace with the same name as your class
19
+ static lc_name(): string;
20
+ get lc_secrets(): {
21
+ [key: string]: string;
22
+ } | undefined;
23
+ originalFields?: GoogleBaseLLMInput<AuthOptions>;
24
+ lc_serializable: boolean;
25
+ modelName: string;
26
+ model: string;
27
+ temperature: number;
28
+ maxOutputTokens: number;
29
+ topP: number;
30
+ topK: number;
31
+ stopSequences: string[];
32
+ safetySettings: GoogleAISafetySetting[];
33
+ safetyHandler: GoogleAISafetyHandler;
34
+ responseMimeType: GoogleAIResponseMimeType;
35
+ protected connection: GoogleLLMConnection<AuthOptions>;
36
+ protected streamedConnection: GoogleLLMConnection<AuthOptions>;
37
+ constructor(fields?: GoogleBaseLLMInput<AuthOptions>);
38
+ abstract buildAbstractedClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
39
+ buildApiKeyClient(apiKey: string): GoogleAbstractedClient;
40
+ buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined;
41
+ buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
42
+ buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;
43
+ get platform(): GooglePlatformType;
44
+ // Replace
45
+ _llmType(): string;
46
+ formatPrompt(prompt: string): MessageContent;
47
+ /**
48
+ * For some given input string and options, return a string output.
49
+ *
50
+ * Despite the fact that `invoke` is overridden below, we still need this
51
+ * in order to handle public APi calls to `generate()`.
52
+ */
53
+ _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
54
+ // Normally, you should not override this method and instead should override
55
+ // _streamResponseChunks. We are doing so here to allow for multimodal inputs into
56
+ // the LLM.
57
+ _streamIterator(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): AsyncGenerator<string>;
58
+ predictMessages(messages: BaseMessage[], options?: string[] | BaseLanguageModelCallOptions, _callbacks?: Callbacks): Promise<BaseMessage>;
59
+ /**
60
+ * Internal implementation detail to allow Google LLMs to support
61
+ * multimodal input by delegating to the chat model implementation.
62
+ *
63
+ * TODO: Replace with something less hacky.
64
+ */
65
+ protected createProxyChat(): ChatGoogleBase<AuthOptions>;
66
+ // TODO: Remove the need to override this - we are doing it to
67
+ // allow the LLM to handle multimodal types of input.
68
+ invoke(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): Promise<string>;
62
69
  }
70
+ //#endregion
71
+ export { GoogleBaseLLM };
72
+ //# sourceMappingURL=llms.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llms.d.ts","names":["Callbacks","LLM","BaseLanguageModelCallOptions","BaseLanguageModelInput","BaseMessage","MessageContent","AbstractGoogleLLMConnection","GoogleAIBaseLLMInput","GoogleAIModelParams","GoogleAISafetySetting","GooglePlatformType","GeminiContent","GoogleAIResponseMimeType","GoogleAbstractedClient","ChatGoogleBase","GoogleBaseLLMInput","GoogleAISafetyHandler","GoogleLLMConnection","AuthOptions","Promise","GoogleBaseLLM","AsyncGenerator"],"sources":["../src/llms.d.ts"],"sourcesContent":["import { Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { LLM } from \"@langchain/core/language_models/llms\";\nimport { type BaseLanguageModelCallOptions, BaseLanguageModelInput } from \"@langchain/core/language_models/base\";\nimport { BaseMessage, MessageContent } from \"@langchain/core/messages\";\nimport { AbstractGoogleLLMConnection } from \"./connection.js\";\nimport { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GooglePlatformType, GeminiContent, GoogleAIResponseMimeType } from \"./types.js\";\nimport { GoogleAbstractedClient } from \"./auth.js\";\nimport { ChatGoogleBase } from \"./chat_models.js\";\nimport type { GoogleBaseLLMInput, GoogleAISafetyHandler } from \"./types.js\";\nexport { GoogleBaseLLMInput };\ndeclare class GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<MessageContent, AuthOptions> {\n formatContents(input: MessageContent, _parameters: GoogleAIModelParams): Promise<GeminiContent[]>;\n}\n/**\n * Integration with an LLM.\n */\nexport declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguageModelCallOptions> implements GoogleBaseLLMInput<AuthOptions> {\n // Used for tracing, replace with the same name as your class\n static lc_name(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n originalFields?: GoogleBaseLLMInput<AuthOptions>;\n lc_serializable: boolean;\n modelName: string;\n model: string;\n temperature: number;\n maxOutputTokens: number;\n topP: number;\n topK: number;\n stopSequences: string[];\n safetySettings: GoogleAISafetySetting[];\n safetyHandler: GoogleAISafetyHandler;\n responseMimeType: GoogleAIResponseMimeType;\n protected connection: GoogleLLMConnection<AuthOptions>;\n protected streamedConnection: GoogleLLMConnection<AuthOptions>;\n constructor(fields?: GoogleBaseLLMInput<AuthOptions>);\n abstract buildAbstractedClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;\n buildApiKeyClient(apiKey: string): GoogleAbstractedClient;\n buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined;\n buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;\n buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;\n get platform(): GooglePlatformType;\n // Replace\n _llmType(): string;\n formatPrompt(prompt: string): MessageContent;\n /**\n * For some given input string and options, return a string output.\n *\n * Despite the fact that `invoke` is overridden below, we still need this\n * in order to handle public APi calls to `generate()`.\n */\n _call(prompt: string, options: this[\"ParsedCallOptions\"]): Promise<string>;\n // Normally, you should not override this method and instead should override\n // _streamResponseChunks. We are doing so here to allow for multimodal inputs into\n // the LLM.\n _streamIterator(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): AsyncGenerator<string>;\n predictMessages(messages: BaseMessage[], options?: string[] | BaseLanguageModelCallOptions, _callbacks?: Callbacks): Promise<BaseMessage>;\n /**\n * Internal implementation detail to allow Google LLMs to support\n * multimodal input by delegating to the chat model implementation.\n *\n * TODO: Replace with something less hacky.\n */\n protected createProxyChat(): ChatGoogleBase<AuthOptions>;\n // TODO: Remove the need to override this - we are doing it to\n // allow the LLM to handle multimodal types of input.\n invoke(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): Promise<string>;\n}\n"],"mappings":";;;;;;;;;;AAS8B,cAChBiB,mBAAmB,CAAA,WAAA,CAAA,SAAsBX,2BAAtB,CAAkDD,cAAlD,EAAkEa,WAAlE,CAAA,CAAA;EAAA,cAAA,CAAA,KAAA,EACPb,cADO,EAAA,WAAA,EACsBG,mBADtB,CAAA,EAC4CW,OAD5C,CACoDR,aADpD,EAAA,CAAA;;;;;AACoDA,uBAKvDS,aALuDT,CAAAA,WAAAA,CAAAA,SAKpBV,GALoBU,CAKhBT,4BALgBS,CAAAA,YAKyBI,kBALzBJ,CAK4CO,WAL5CP,CAAAA,CAAAA;EAAa;EAAd,OAD7BL,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAA2B,IAAA,UAAA,CAAA,CAAA,EAAA;IAMpDc,CAAAA,GAAAA,EAAAA,MAAa,CAAA,EAAA,MAAA;EAAA,CAAA,GAAA,SAAA;EAAA,cAA0BlB,CAAAA,EAMhDa,kBANgDb,CAM7BgB,WAN6BhB,CAAAA;EAA4B,eAAgCgB,EAAAA,OAAAA;EAAW,SAMpGA,EAAAA,MAAAA;EAAW,KAA9BH,EAAAA,MAAAA;EAAkB,WASnBN,EAAAA,MAAAA;EAAqB,eACtBO,EAAAA,MAAAA;EAAqB,IAClBJ,EAAAA,MAAAA;EAAwB,IACAM,EAAAA,MAAAA;EAAW,aAA/BD,EAAAA,MAAAA,EAAAA;EAAmB,cACSC,EAJlCT,qBAIkCS,EAAAA;EAAW,aAA/BD,EAHfD,qBAGeC;EAAmB,gBACTC,EAHtBN,wBAGsBM;EAAW,UAA9BH,UAAAA,EAFCE,mBAEDF,CAFqBG,WAErBH,CAAAA;EAAkB,UACsBG,kBAAAA,EAF/BD,mBAE+BC,CAFXA,WAEWA,CAAAA;EAAW,WAAhCX,CAAAA,MAAAA,CAAAA,EADnBQ,kBACmBR,CADAW,WACAX,CAAAA;EAAoB,SAAgBM,qBAAAA,CAAAA,MAAAA,CAAAA,EAApCN,oBAAoCM,CAAfK,WAAeL,CAAAA,CAAAA,EAAAA,sBAAAA;EAAsB,iBAC/DA,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA,EAAAA,sBAAAA;EAAsB,WACfK,CAAAA,MAAAA,CAAAA,EAArBX,oBAAqBW,CAAAA,WAAAA,CAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAW,WAAhCX,CAAAA,MAAAA,CAAAA,EACAA,oBADAA,CACqBW,WADrBX,CAAAA,CAAAA,EACoCM,sBADpCN;EAAoB,eACCW,CAAAA,MAAAA,EAClBH,kBADkBG,CACCA,WADDA,CAAAA,EAAAA,MAAAA,EACuBL,sBADvBK,CAAAA,EAAAA,IAAAA;EAAW,IAAhCX,QAAAA,CAAAA,CAAAA,EAELG,kBAFKH;EAAoB;EAAsC,QACpCW,CAAAA,CAAAA,EAAAA,MAAAA;EAAW,YAA9BH,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA,EAIMV,cAJNU;EAAkB;;;;;;EAe2C,KAAGM,CAAAA,MAAAA,EAAAA,MAAAA,EAAAA,OAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,CAAAA,EAJ7BF,OAI6BE,CAAAA,MAAAA,CAAAA;EAAc;EACjE;EAAqD;EAAwB,eAAWjB,CAAAA,KAAAA,EADtGD,sBACsGC,EAAAA,OAAAA,CAAAA,EADpEF,4BACoEE,CAAAA,EADrCiB,cACqCjB,CAAAA,MAAAA,CAAAA;EAAW,eAAnBe,CAAAA,QAAAA,EAA3Ff,WAA2Fe,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,MAAAA,EAAAA,GAAvDjB,4BAAuDiB,EAAAA,UAAAA,CAAAA,EAAZnB,SAAYmB,CAAAA,EAAAA,OAAAA,CAAQf,WAARe,CAAAA;EAAO;;;;;;EAzC5D,UAA0CJ,eAAAA,CAAAA,CAAAA,EAgD7ED,cAhD6EC,CAgD9DG,WAhD8DH,CAAAA;EAAkB;;gBAmD9GZ,kCAAkCD,+BAA+BiB"}
package/dist/llms.js CHANGED
@@ -1,242 +1,158 @@
1
- import { CallbackManager } from "@langchain/core/callbacks/manager";
2
- import { BaseLLM, LLM } from "@langchain/core/language_models/llms";
3
- import { GenerationChunk } from "@langchain/core/outputs";
4
- import { getEnvironmentVariable } from "@langchain/core/utils/env";
5
- import { AbstractGoogleLLMConnection } from "./connection.js";
6
- import { copyAIModelParams, copyAndValidateModelParamsInto, } from "./utils/common.js";
7
1
  import { DefaultGeminiSafetyHandler } from "./utils/gemini.js";
8
- import { ApiKeyGoogleAuth } from "./auth.js";
2
+ import { copyAIModelParams, copyAndValidateModelParamsInto } from "./utils/common.js";
9
3
  import { ensureParams } from "./utils/failed_handler.js";
4
+ import { AbstractGoogleLLMConnection } from "./connection.js";
5
+ import { ApiKeyGoogleAuth } from "./auth.js";
10
6
  import { ChatGoogleBase } from "./chat_models.js";
11
- class GoogleLLMConnection extends AbstractGoogleLLMConnection {
12
- async formatContents(input, _parameters) {
13
- const parts = await this.api.messageContentToParts(input);
14
- const contents = [
15
- {
16
- role: "user", // Required by Vertex AI
17
- parts,
18
- },
19
- ];
20
- return contents;
21
- }
22
- }
23
- class ProxyChatGoogle extends ChatGoogleBase {
24
- constructor(fields) {
25
- super(fields);
26
- }
27
- buildAbstractedClient(fields) {
28
- return fields.connection.client;
29
- }
30
- }
7
+ import { getEnvironmentVariable } from "@langchain/core/utils/env";
8
+ import { GenerationChunk } from "@langchain/core/outputs";
9
+ import { CallbackManager } from "@langchain/core/callbacks/manager";
10
+ import { BaseLLM, LLM } from "@langchain/core/language_models/llms";
11
+
12
+ //#region src/llms.ts
13
+ var GoogleLLMConnection = class extends AbstractGoogleLLMConnection {
14
+ async formatContents(input, _parameters) {
15
+ const parts = await this.api.messageContentToParts(input);
16
+ const contents = [{
17
+ role: "user",
18
+ parts
19
+ }];
20
+ return contents;
21
+ }
22
+ };
23
+ var ProxyChatGoogle = class extends ChatGoogleBase {
24
+ constructor(fields) {
25
+ super(fields);
26
+ }
27
+ buildAbstractedClient(fields) {
28
+ return fields.connection.client;
29
+ }
30
+ };
31
31
  /**
32
- * Integration with an LLM.
33
- */
34
- export class GoogleBaseLLM extends LLM {
35
- // Used for tracing, replace with the same name as your class
36
- static lc_name() {
37
- return "GoogleLLM";
38
- }
39
- get lc_secrets() {
40
- return {
41
- authOptions: "GOOGLE_AUTH_OPTIONS",
42
- };
43
- }
44
- constructor(fields) {
45
- super(ensureParams(fields));
46
- Object.defineProperty(this, "originalFields", {
47
- enumerable: true,
48
- configurable: true,
49
- writable: true,
50
- value: void 0
51
- });
52
- Object.defineProperty(this, "lc_serializable", {
53
- enumerable: true,
54
- configurable: true,
55
- writable: true,
56
- value: true
57
- });
58
- Object.defineProperty(this, "modelName", {
59
- enumerable: true,
60
- configurable: true,
61
- writable: true,
62
- value: "gemini-pro"
63
- });
64
- Object.defineProperty(this, "model", {
65
- enumerable: true,
66
- configurable: true,
67
- writable: true,
68
- value: "gemini-pro"
69
- });
70
- Object.defineProperty(this, "temperature", {
71
- enumerable: true,
72
- configurable: true,
73
- writable: true,
74
- value: 0.7
75
- });
76
- Object.defineProperty(this, "maxOutputTokens", {
77
- enumerable: true,
78
- configurable: true,
79
- writable: true,
80
- value: 1024
81
- });
82
- Object.defineProperty(this, "topP", {
83
- enumerable: true,
84
- configurable: true,
85
- writable: true,
86
- value: 0.8
87
- });
88
- Object.defineProperty(this, "topK", {
89
- enumerable: true,
90
- configurable: true,
91
- writable: true,
92
- value: 40
93
- });
94
- Object.defineProperty(this, "stopSequences", {
95
- enumerable: true,
96
- configurable: true,
97
- writable: true,
98
- value: []
99
- });
100
- Object.defineProperty(this, "safetySettings", {
101
- enumerable: true,
102
- configurable: true,
103
- writable: true,
104
- value: []
105
- });
106
- Object.defineProperty(this, "safetyHandler", {
107
- enumerable: true,
108
- configurable: true,
109
- writable: true,
110
- value: void 0
111
- });
112
- Object.defineProperty(this, "responseMimeType", {
113
- enumerable: true,
114
- configurable: true,
115
- writable: true,
116
- value: "text/plain"
117
- });
118
- Object.defineProperty(this, "connection", {
119
- enumerable: true,
120
- configurable: true,
121
- writable: true,
122
- value: void 0
123
- });
124
- Object.defineProperty(this, "streamedConnection", {
125
- enumerable: true,
126
- configurable: true,
127
- writable: true,
128
- value: void 0
129
- });
130
- this.originalFields = fields;
131
- copyAndValidateModelParamsInto(fields, this);
132
- this.safetyHandler =
133
- fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();
134
- const client = this.buildClient(fields);
135
- this.buildConnection(fields ?? {}, client);
136
- }
137
- buildApiKeyClient(apiKey) {
138
- return new ApiKeyGoogleAuth(apiKey);
139
- }
140
- buildApiKey(fields) {
141
- return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
142
- }
143
- buildClient(fields) {
144
- const apiKey = this.buildApiKey(fields);
145
- if (apiKey) {
146
- return this.buildApiKeyClient(apiKey);
147
- }
148
- else {
149
- return this.buildAbstractedClient(fields);
150
- }
151
- }
152
- buildConnection(fields, client) {
153
- this.connection = new GoogleLLMConnection({ ...fields, ...this }, this.caller, client, false);
154
- this.streamedConnection = new GoogleLLMConnection({ ...fields, ...this }, this.caller, client, true);
155
- }
156
- get platform() {
157
- return this.connection.platform;
158
- }
159
- // Replace
160
- _llmType() {
161
- return "googlellm";
162
- }
163
- formatPrompt(prompt) {
164
- return prompt;
165
- }
166
- /**
167
- * For some given input string and options, return a string output.
168
- *
169
- * Despite the fact that `invoke` is overridden below, we still need this
170
- * in order to handle public APi calls to `generate()`.
171
- */
172
- async _call(prompt, options) {
173
- const parameters = copyAIModelParams(this, options);
174
- const result = await this.connection.request(prompt, parameters, options);
175
- const ret = this.connection.api.responseToString(result);
176
- return ret;
177
- }
178
- // Normally, you should not override this method and instead should override
179
- // _streamResponseChunks. We are doing so here to allow for multimodal inputs into
180
- // the LLM.
181
- async *_streamIterator(input, options) {
182
- // TODO: Refactor callback setup and teardown code into core
183
- const prompt = BaseLLM._convertInputToPromptValue(input);
184
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
185
- const callbackManager_ = await CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
186
- const extra = {
187
- options: callOptions,
188
- invocation_params: this?.invocationParams(callOptions),
189
- batch_size: 1,
190
- };
191
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], undefined, undefined, extra, undefined, undefined, runnableConfig.runName);
192
- let generation = new GenerationChunk({
193
- text: "",
194
- });
195
- const proxyChat = this.createProxyChat();
196
- try {
197
- for await (const chunk of proxyChat._streamIterator(input, options)) {
198
- const stringValue = this.connection.api.chunkToString(chunk);
199
- const generationChunk = new GenerationChunk({
200
- text: stringValue,
201
- });
202
- generation = generation.concat(generationChunk);
203
- yield stringValue;
204
- }
205
- }
206
- catch (err) {
207
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
208
- throw err;
209
- }
210
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({
211
- generations: [[generation]],
212
- })));
213
- }
214
- async predictMessages(messages, options, _callbacks) {
215
- const { content } = messages[0];
216
- const result = await this.connection.request(content, {}, options);
217
- const ret = this.connection.api.responseToBaseMessage(result);
218
- return ret;
219
- }
220
- /**
221
- * Internal implementation detail to allow Google LLMs to support
222
- * multimodal input by delegating to the chat model implementation.
223
- *
224
- * TODO: Replace with something less hacky.
225
- */
226
- createProxyChat() {
227
- return new ProxyChatGoogle({
228
- ...this.originalFields,
229
- connection: this.connection,
230
- });
231
- }
232
- // TODO: Remove the need to override this - we are doing it to
233
- // allow the LLM to handle multimodal types of input.
234
- async invoke(input, options) {
235
- const stream = await this._streamIterator(input, options);
236
- let generatedOutput = "";
237
- for await (const chunk of stream) {
238
- generatedOutput += chunk;
239
- }
240
- return generatedOutput;
241
- }
242
- }
32
+ * Integration with an LLM.
33
+ */
34
+ var GoogleBaseLLM = class extends LLM {
35
+ static lc_name() {
36
+ return "GoogleLLM";
37
+ }
38
+ get lc_secrets() {
39
+ return { authOptions: "GOOGLE_AUTH_OPTIONS" };
40
+ }
41
+ originalFields;
42
+ lc_serializable = true;
43
+ modelName = "gemini-pro";
44
+ model = "gemini-pro";
45
+ temperature = .7;
46
+ maxOutputTokens = 1024;
47
+ topP = .8;
48
+ topK = 40;
49
+ stopSequences = [];
50
+ safetySettings = [];
51
+ safetyHandler;
52
+ responseMimeType = "text/plain";
53
+ connection;
54
+ streamedConnection;
55
+ constructor(fields) {
56
+ super(ensureParams(fields));
57
+ this.originalFields = fields;
58
+ copyAndValidateModelParamsInto(fields, this);
59
+ this.safetyHandler = fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();
60
+ const client = this.buildClient(fields);
61
+ this.buildConnection(fields ?? {}, client);
62
+ }
63
+ buildApiKeyClient(apiKey) {
64
+ return new ApiKeyGoogleAuth(apiKey);
65
+ }
66
+ buildApiKey(fields) {
67
+ return fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
68
+ }
69
+ buildClient(fields) {
70
+ const apiKey = this.buildApiKey(fields);
71
+ if (apiKey) return this.buildApiKeyClient(apiKey);
72
+ else return this.buildAbstractedClient(fields);
73
+ }
74
+ buildConnection(fields, client) {
75
+ this.connection = new GoogleLLMConnection({
76
+ ...fields,
77
+ ...this
78
+ }, this.caller, client, false);
79
+ this.streamedConnection = new GoogleLLMConnection({
80
+ ...fields,
81
+ ...this
82
+ }, this.caller, client, true);
83
+ }
84
+ get platform() {
85
+ return this.connection.platform;
86
+ }
87
+ _llmType() {
88
+ return "googlellm";
89
+ }
90
+ formatPrompt(prompt) {
91
+ return prompt;
92
+ }
93
+ /**
94
+ * For some given input string and options, return a string output.
95
+ *
96
+ * Despite the fact that `invoke` is overridden below, we still need this
97
+ * in order to handle public APi calls to `generate()`.
98
+ */
99
+ async _call(prompt, options) {
100
+ const parameters = copyAIModelParams(this, options);
101
+ const result = await this.connection.request(prompt, parameters, options);
102
+ const ret = this.connection.api.responseToString(result);
103
+ return ret;
104
+ }
105
+ async *_streamIterator(input, options) {
106
+ const prompt = BaseLLM._convertInputToPromptValue(input);
107
+ const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
108
+ const callbackManager_ = await CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
109
+ const extra = {
110
+ options: callOptions,
111
+ invocation_params: this?.invocationParams(callOptions),
112
+ batch_size: 1
113
+ };
114
+ const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], void 0, void 0, extra, void 0, void 0, runnableConfig.runName);
115
+ let generation = new GenerationChunk({ text: "" });
116
+ const proxyChat = this.createProxyChat();
117
+ try {
118
+ for await (const chunk of proxyChat._streamIterator(input, options)) {
119
+ const stringValue = this.connection.api.chunkToString(chunk);
120
+ const generationChunk = new GenerationChunk({ text: stringValue });
121
+ generation = generation.concat(generationChunk);
122
+ yield stringValue;
123
+ }
124
+ } catch (err) {
125
+ await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
126
+ throw err;
127
+ }
128
+ await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({ generations: [[generation]] })));
129
+ }
130
+ async predictMessages(messages, options, _callbacks) {
131
+ const { content } = messages[0];
132
+ const result = await this.connection.request(content, {}, options);
133
+ const ret = this.connection.api.responseToBaseMessage(result);
134
+ return ret;
135
+ }
136
+ /**
137
+ * Internal implementation detail to allow Google LLMs to support
138
+ * multimodal input by delegating to the chat model implementation.
139
+ *
140
+ * TODO: Replace with something less hacky.
141
+ */
142
+ createProxyChat() {
143
+ return new ProxyChatGoogle({
144
+ ...this.originalFields,
145
+ connection: this.connection
146
+ });
147
+ }
148
+ async invoke(input, options) {
149
+ const stream = await this._streamIterator(input, options);
150
+ let generatedOutput = "";
151
+ for await (const chunk of stream) generatedOutput += chunk;
152
+ return generatedOutput;
153
+ }
154
+ };
155
+
156
+ //#endregion
157
+ export { GoogleBaseLLM };
158
+ //# sourceMappingURL=llms.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llms.js","names":["input: MessageContent","_parameters: GoogleAIModelParams","contents: GeminiContent[]","fields: ProxyChatInput<AuthOptions>","fields?: GoogleBaseLLMInput<AuthOptions>","apiKey: string","fields?: GoogleAIBaseLLMInput<AuthOptions>","fields: GoogleBaseLLMInput<AuthOptions>","client: GoogleAbstractedClient","prompt: string","options: this[\"ParsedCallOptions\"]","input: BaseLanguageModelInput","options?: BaseLanguageModelCallOptions","messages: BaseMessage[]","options?: string[] | BaseLanguageModelCallOptions","_callbacks?: Callbacks"],"sources":["../src/llms.ts"],"sourcesContent":["import { CallbackManager, Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { BaseLLM, LLM } from \"@langchain/core/language_models/llms\";\nimport {\n type BaseLanguageModelCallOptions,\n BaseLanguageModelInput,\n} from \"@langchain/core/language_models/base\";\nimport { BaseMessage, MessageContent } from \"@langchain/core/messages\";\nimport { GenerationChunk } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\n\nimport { AbstractGoogleLLMConnection } from \"./connection.js\";\nimport {\n GoogleAIBaseLLMInput,\n GoogleAIModelParams,\n GoogleAISafetySetting,\n GooglePlatformType,\n GeminiContent,\n GoogleAIResponseMimeType,\n} from \"./types.js\";\nimport {\n copyAIModelParams,\n copyAndValidateModelParamsInto,\n} from \"./utils/common.js\";\nimport { DefaultGeminiSafetyHandler } from \"./utils/gemini.js\";\nimport { ApiKeyGoogleAuth, GoogleAbstractedClient } from \"./auth.js\";\nimport { ensureParams } from \"./utils/failed_handler.js\";\nimport { ChatGoogleBase } from \"./chat_models.js\";\nimport type { GoogleBaseLLMInput, GoogleAISafetyHandler } from \"./types.js\";\n\nexport { GoogleBaseLLMInput };\n\nclass GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<\n MessageContent,\n AuthOptions\n> {\n async formatContents(\n input: MessageContent,\n _parameters: GoogleAIModelParams\n ): Promise<GeminiContent[]> {\n const parts = await this.api.messageContentToParts!(input);\n const contents: GeminiContent[] = [\n {\n role: \"user\", // Required by Vertex AI\n parts,\n },\n ];\n return contents;\n }\n}\n\ntype ProxyChatInput<AuthOptions> = GoogleAIBaseLLMInput<AuthOptions> & {\n connection: GoogleLLMConnection<AuthOptions>;\n};\n\nclass ProxyChatGoogle<AuthOptions> extends ChatGoogleBase<AuthOptions> {\n constructor(fields: ProxyChatInput<AuthOptions>) {\n super(fields);\n }\n\n buildAbstractedClient(\n fields: ProxyChatInput<AuthOptions>\n ): GoogleAbstractedClient {\n return fields.connection.client;\n }\n}\n\n/**\n * Integration with an LLM.\n */\nexport abstract class GoogleBaseLLM<AuthOptions>\n extends LLM<BaseLanguageModelCallOptions>\n implements GoogleBaseLLMInput<AuthOptions>\n{\n // Used for tracing, replace with the same name as your class\n static lc_name() {\n return \"GoogleLLM\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n authOptions: \"GOOGLE_AUTH_OPTIONS\",\n };\n }\n\n originalFields?: GoogleBaseLLMInput<AuthOptions>;\n\n lc_serializable = true;\n\n modelName = \"gemini-pro\";\n\n model = \"gemini-pro\";\n\n temperature = 0.7;\n\n maxOutputTokens = 1024;\n\n topP = 0.8;\n\n topK = 40;\n\n stopSequences: string[] = [];\n\n safetySettings: GoogleAISafetySetting[] = [];\n\n safetyHandler: GoogleAISafetyHandler;\n\n responseMimeType: GoogleAIResponseMimeType = \"text/plain\";\n\n protected connection: GoogleLLMConnection<AuthOptions>;\n\n protected streamedConnection: GoogleLLMConnection<AuthOptions>;\n\n constructor(fields?: GoogleBaseLLMInput<AuthOptions>) {\n super(ensureParams(fields));\n this.originalFields = fields;\n\n copyAndValidateModelParamsInto(fields, this);\n this.safetyHandler =\n fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();\n\n const client = this.buildClient(fields);\n this.buildConnection(fields ?? {}, client);\n }\n\n abstract buildAbstractedClient(\n fields?: GoogleAIBaseLLMInput<AuthOptions>\n ): GoogleAbstractedClient;\n\n buildApiKeyClient(apiKey: string): GoogleAbstractedClient {\n return new ApiKeyGoogleAuth(apiKey);\n }\n\n buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined {\n return fields?.apiKey ?? getEnvironmentVariable(\"GOOGLE_API_KEY\");\n }\n\n buildClient(\n fields?: GoogleAIBaseLLMInput<AuthOptions>\n ): GoogleAbstractedClient {\n const apiKey = this.buildApiKey(fields);\n if (apiKey) {\n return this.buildApiKeyClient(apiKey);\n } else {\n return this.buildAbstractedClient(fields);\n }\n }\n\n buildConnection(\n fields: GoogleBaseLLMInput<AuthOptions>,\n client: GoogleAbstractedClient\n ) {\n this.connection = new GoogleLLMConnection(\n { ...fields, ...this },\n this.caller,\n client,\n false\n );\n\n this.streamedConnection = new GoogleLLMConnection(\n { ...fields, ...this },\n this.caller,\n client,\n true\n );\n }\n\n get platform(): GooglePlatformType {\n return this.connection.platform;\n }\n\n // Replace\n _llmType() {\n return \"googlellm\";\n }\n\n formatPrompt(prompt: string): MessageContent {\n return prompt;\n }\n\n /**\n * For some given input string and options, return a string output.\n *\n * Despite the fact that `invoke` is overridden below, we still need this\n * in order to handle public APi calls to `generate()`.\n */\n async _call(\n prompt: string,\n options: this[\"ParsedCallOptions\"]\n ): Promise<string> {\n const parameters = copyAIModelParams(this, options);\n const result = await this.connection.request(prompt, parameters, options);\n const ret = this.connection.api.responseToString(result);\n return ret;\n }\n\n // Normally, you should not override this method and instead should override\n // _streamResponseChunks. We are doing so here to allow for multimodal inputs into\n // the LLM.\n async *_streamIterator(\n input: BaseLanguageModelInput,\n options?: BaseLanguageModelCallOptions\n ): AsyncGenerator<string> {\n // TODO: Refactor callback setup and teardown code into core\n const prompt = BaseLLM._convertInputToPromptValue(input);\n const [runnableConfig, callOptions] =\n this._separateRunnableConfigFromCallOptions(options);\n const callbackManager_ = await CallbackManager.configure(\n runnableConfig.callbacks,\n this.callbacks,\n runnableConfig.tags,\n this.tags,\n runnableConfig.metadata,\n this.metadata,\n { verbose: this.verbose }\n );\n const extra = {\n options: callOptions,\n invocation_params: this?.invocationParams(callOptions),\n batch_size: 1,\n };\n const runManagers = await callbackManager_?.handleLLMStart(\n this.toJSON(),\n [prompt.toString()],\n undefined,\n undefined,\n extra,\n undefined,\n undefined,\n runnableConfig.runName\n );\n let generation = new GenerationChunk({\n text: \"\",\n });\n const proxyChat = this.createProxyChat();\n try {\n for await (const chunk of proxyChat._streamIterator(input, options)) {\n const stringValue = this.connection.api.chunkToString(chunk);\n const generationChunk = new GenerationChunk({\n text: stringValue,\n });\n generation = generation.concat(generationChunk);\n yield stringValue;\n }\n } catch (err) {\n await Promise.all(\n (runManagers ?? []).map((runManager) => runManager?.handleLLMError(err))\n );\n throw err;\n }\n await Promise.all(\n (runManagers ?? []).map((runManager) =>\n runManager?.handleLLMEnd({\n generations: [[generation]],\n })\n )\n );\n }\n\n async predictMessages(\n messages: BaseMessage[],\n options?: string[] | BaseLanguageModelCallOptions,\n _callbacks?: Callbacks\n ): Promise<BaseMessage> {\n const { content } = messages[0];\n const result = await this.connection.request(\n content,\n {},\n options as BaseLanguageModelCallOptions\n );\n const ret = this.connection.api.responseToBaseMessage(result);\n return ret;\n }\n\n /**\n * Internal implementation detail to allow Google LLMs to support\n * multimodal input by delegating to the chat model implementation.\n *\n * TODO: Replace with something less hacky.\n */\n protected createProxyChat(): ChatGoogleBase<AuthOptions> {\n return new ProxyChatGoogle<AuthOptions>({\n ...this.originalFields,\n connection: this.connection,\n });\n }\n\n // TODO: Remove the need to override this - we are doing it to\n // allow the LLM to handle multimodal types of input.\n async invoke(\n input: BaseLanguageModelInput,\n options?: BaseLanguageModelCallOptions\n ): Promise<string> {\n const stream = await this._streamIterator(input, options);\n let generatedOutput = \"\";\n for await (const chunk of stream) {\n generatedOutput += chunk;\n }\n return generatedOutput;\n }\n}\n"],"mappings":";;;;;;;;;;;;AA+BA,IAAM,sBAAN,cAA+C,4BAG7C;CACA,MAAM,eACJA,OACAC,aAC0B;EAC1B,MAAM,QAAQ,MAAM,KAAK,IAAI,sBAAuB,MAAM;EAC1D,MAAMC,WAA4B,CAChC;GACE,MAAM;GACN;EACD,CACF;AACD,SAAO;CACR;AACF;AAMD,IAAM,kBAAN,cAA2C,eAA4B;CACrE,YAAYC,QAAqC;EAC/C,MAAM,OAAO;CACd;CAED,sBACEA,QACwB;AACxB,SAAO,OAAO,WAAW;CAC1B;AACF;;;;AAKD,IAAsB,gBAAtB,cACU,IAEV;CAEE,OAAO,UAAU;AACf,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,aAAa,sBACd;CACF;CAED;CAEA,kBAAkB;CAElB,YAAY;CAEZ,QAAQ;CAER,cAAc;CAEd,kBAAkB;CAElB,OAAO;CAEP,OAAO;CAEP,gBAA0B,CAAE;CAE5B,iBAA0C,CAAE;CAE5C;CAEA,mBAA6C;CAE7C,AAAU;CAEV,AAAU;CAEV,YAAYC,QAA0C;EACpD,MAAM,aAAa,OAAO,CAAC;EAC3B,KAAK,iBAAiB;EAEtB,+BAA+B,QAAQ,KAAK;EAC5C,KAAK,gBACH,QAAQ,iBAAiB,IAAI;EAE/B,MAAM,SAAS,KAAK,YAAY,OAAO;EACvC,KAAK,gBAAgB,UAAU,CAAE,GAAE,OAAO;CAC3C;CAMD,kBAAkBC,QAAwC;AACxD,SAAO,IAAI,iBAAiB;CAC7B;CAED,YAAYC,QAAgE;AAC1E,SAAO,QAAQ,UAAU,uBAAuB,iBAAiB;CAClE;CAED,YACEA,QACwB;EACxB,MAAM,SAAS,KAAK,YAAY,OAAO;AACvC,MAAI,OACF,QAAO,KAAK,kBAAkB,OAAO;MAErC,QAAO,KAAK,sBAAsB,OAAO;CAE5C;CAED,gBACEC,QACAC,QACA;EACA,KAAK,aAAa,IAAI,oBACpB;GAAE,GAAG;GAAQ,GAAG;EAAM,GACtB,KAAK,QACL,QACA;EAGF,KAAK,qBAAqB,IAAI,oBAC5B;GAAE,GAAG;GAAQ,GAAG;EAAM,GACtB,KAAK,QACL,QACA;CAEH;CAED,IAAI,WAA+B;AACjC,SAAO,KAAK,WAAW;CACxB;CAGD,WAAW;AACT,SAAO;CACR;CAED,aAAaC,QAAgC;AAC3C,SAAO;CACR;;;;;;;CAQD,MAAM,MACJA,QACAC,SACiB;EACjB,MAAM,aAAa,kBAAkB,MAAM,QAAQ;EACnD,MAAM,SAAS,MAAM,KAAK,WAAW,QAAQ,QAAQ,YAAY,QAAQ;EACzE,MAAM,MAAM,KAAK,WAAW,IAAI,iBAAiB,OAAO;AACxD,SAAO;CACR;CAKD,OAAO,gBACLC,OACAC,SACwB;EAExB,MAAM,SAAS,QAAQ,2BAA2B,MAAM;EACxD,MAAM,CAAC,gBAAgB,YAAY,GACjC,KAAK,uCAAuC,QAAQ;EACtD,MAAM,mBAAmB,MAAM,gBAAgB,UAC7C,eAAe,WACf,KAAK,WACL,eAAe,MACf,KAAK,MACL,eAAe,UACf,KAAK,UACL,EAAE,SAAS,KAAK,QAAS,EAC1B;EACD,MAAM,QAAQ;GACZ,SAAS;GACT,mBAAmB,MAAM,iBAAiB,YAAY;GACtD,YAAY;EACb;EACD,MAAM,cAAc,MAAM,kBAAkB,eAC1C,KAAK,QAAQ,EACb,CAAC,OAAO,UAAU,AAAC,GACnB,QACA,QACA,OACA,QACA,QACA,eAAe,QAChB;EACD,IAAI,aAAa,IAAI,gBAAgB,EACnC,MAAM,GACP;EACD,MAAM,YAAY,KAAK,iBAAiB;AACxC,MAAI;AACF,cAAW,MAAM,SAAS,UAAU,gBAAgB,OAAO,QAAQ,EAAE;IACnE,MAAM,cAAc,KAAK,WAAW,IAAI,cAAc,MAAM;IAC5D,MAAM,kBAAkB,IAAI,gBAAgB,EAC1C,MAAM,YACP;IACD,aAAa,WAAW,OAAO,gBAAgB;IAC/C,MAAM;GACP;EACF,SAAQ,KAAK;GACZ,MAAM,QAAQ,KACX,eAAe,CAAE,GAAE,IAAI,CAAC,eAAe,YAAY,eAAe,IAAI,CAAC,CACzE;AACD,SAAM;EACP;EACD,MAAM,QAAQ,KACX,eAAe,CAAE,GAAE,IAAI,CAAC,eACvB,YAAY,aAAa,EACvB,aAAa,CAAC,CAAC,UAAW,CAAC,EAC5B,EAAC,CACH,CACF;CACF;CAED,MAAM,gBACJC,UACAC,SACAC,YACsB;EACtB,MAAM,EAAE,SAAS,GAAG,SAAS;EAC7B,MAAM,SAAS,MAAM,KAAK,WAAW,QACnC,SACA,CAAE,GACF,QACD;EACD,MAAM,MAAM,KAAK,WAAW,IAAI,sBAAsB,OAAO;AAC7D,SAAO;CACR;;;;;;;CAQD,AAAU,kBAA+C;AACvD,SAAO,IAAI,gBAA6B;GACtC,GAAG,KAAK;GACR,YAAY,KAAK;EAClB;CACF;CAID,MAAM,OACJJ,OACAC,SACiB;EACjB,MAAM,SAAS,MAAM,KAAK,gBAAgB,OAAO,QAAQ;EACzD,IAAI,kBAAkB;AACtB,aAAW,MAAM,SAAS,QACxB,mBAAmB;AAErB,SAAO;CACR;AACF"}