@langchain/google-common 0.2.18 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/CHANGELOG.md +17 -0
  2. package/LICENSE +6 -6
  3. package/dist/_virtual/rolldown_runtime.cjs +25 -0
  4. package/dist/auth.cjs +82 -116
  5. package/dist/auth.cjs.map +1 -0
  6. package/dist/auth.d.cts +46 -0
  7. package/dist/auth.d.cts.map +1 -0
  8. package/dist/auth.d.ts +41 -36
  9. package/dist/auth.d.ts.map +1 -0
  10. package/dist/auth.js +80 -110
  11. package/dist/auth.js.map +1 -0
  12. package/dist/chat_models.cjs +251 -466
  13. package/dist/chat_models.cjs.map +1 -0
  14. package/dist/chat_models.d.cts +98 -0
  15. package/dist/chat_models.d.cts.map +1 -0
  16. package/dist/chat_models.d.ts +87 -73
  17. package/dist/chat_models.d.ts.map +1 -0
  18. package/dist/chat_models.js +245 -457
  19. package/dist/chat_models.js.map +1 -0
  20. package/dist/connection.cjs +321 -466
  21. package/dist/connection.cjs.map +1 -0
  22. package/dist/connection.d.cts +109 -0
  23. package/dist/connection.d.cts.map +1 -0
  24. package/dist/connection.d.ts +98 -91
  25. package/dist/connection.d.ts.map +1 -0
  26. package/dist/connection.js +317 -459
  27. package/dist/connection.js.map +1 -0
  28. package/dist/embeddings.cjs +135 -186
  29. package/dist/embeddings.cjs.map +1 -0
  30. package/dist/embeddings.d.cts +44 -0
  31. package/dist/embeddings.d.cts.map +1 -0
  32. package/dist/embeddings.d.ts +38 -32
  33. package/dist/embeddings.d.ts.map +1 -0
  34. package/dist/embeddings.js +133 -181
  35. package/dist/embeddings.js.map +1 -0
  36. package/dist/experimental/media.cjs +380 -482
  37. package/dist/experimental/media.cjs.map +1 -0
  38. package/dist/experimental/media.d.cts +198 -0
  39. package/dist/experimental/media.d.cts.map +1 -0
  40. package/dist/experimental/media.d.ts +190 -202
  41. package/dist/experimental/media.d.ts.map +1 -0
  42. package/dist/experimental/media.js +369 -468
  43. package/dist/experimental/media.js.map +1 -0
  44. package/dist/experimental/utils/media_core.cjs +403 -517
  45. package/dist/experimental/utils/media_core.cjs.map +1 -0
  46. package/dist/experimental/utils/media_core.d.cts +215 -0
  47. package/dist/experimental/utils/media_core.d.cts.map +1 -0
  48. package/dist/experimental/utils/media_core.d.ts +171 -165
  49. package/dist/experimental/utils/media_core.d.ts.map +1 -0
  50. package/dist/experimental/utils/media_core.js +395 -506
  51. package/dist/experimental/utils/media_core.js.map +1 -0
  52. package/dist/index.cjs +58 -27
  53. package/dist/index.d.cts +13 -0
  54. package/dist/index.d.ts +13 -11
  55. package/dist/index.js +13 -11
  56. package/dist/llms.cjs +157 -244
  57. package/dist/llms.cjs.map +1 -0
  58. package/dist/llms.d.cts +72 -0
  59. package/dist/llms.d.cts.map +1 -0
  60. package/dist/llms.d.ts +64 -54
  61. package/dist/llms.d.ts.map +1 -0
  62. package/dist/llms.js +154 -238
  63. package/dist/llms.js.map +1 -0
  64. package/dist/output_parsers.cjs +148 -173
  65. package/dist/output_parsers.cjs.map +1 -0
  66. package/dist/output_parsers.d.cts +53 -0
  67. package/dist/output_parsers.d.cts.map +1 -0
  68. package/dist/output_parsers.d.ts +46 -42
  69. package/dist/output_parsers.d.ts.map +1 -0
  70. package/dist/output_parsers.js +146 -168
  71. package/dist/output_parsers.js.map +1 -0
  72. package/dist/types-anthropic.d.cts +229 -0
  73. package/dist/types-anthropic.d.cts.map +1 -0
  74. package/dist/types-anthropic.d.ts +221 -215
  75. package/dist/types-anthropic.d.ts.map +1 -0
  76. package/dist/types.cjs +51 -62
  77. package/dist/types.cjs.map +1 -0
  78. package/dist/types.d.cts +748 -0
  79. package/dist/types.d.cts.map +1 -0
  80. package/dist/types.d.ts +669 -656
  81. package/dist/types.d.ts.map +1 -0
  82. package/dist/types.js +46 -45
  83. package/dist/types.js.map +1 -0
  84. package/dist/utils/anthropic.cjs +598 -821
  85. package/dist/utils/anthropic.cjs.map +1 -0
  86. package/dist/utils/anthropic.js +597 -818
  87. package/dist/utils/anthropic.js.map +1 -0
  88. package/dist/utils/common.cjs +130 -211
  89. package/dist/utils/common.cjs.map +1 -0
  90. package/dist/utils/common.d.cts +13 -0
  91. package/dist/utils/common.d.cts.map +1 -0
  92. package/dist/utils/common.d.ts +12 -7
  93. package/dist/utils/common.d.ts.map +1 -0
  94. package/dist/utils/common.js +128 -207
  95. package/dist/utils/common.js.map +1 -0
  96. package/dist/utils/failed_handler.cjs +28 -30
  97. package/dist/utils/failed_handler.cjs.map +1 -0
  98. package/dist/utils/failed_handler.d.cts +9 -0
  99. package/dist/utils/failed_handler.d.cts.map +1 -0
  100. package/dist/utils/failed_handler.d.ts +8 -2
  101. package/dist/utils/failed_handler.d.ts.map +1 -0
  102. package/dist/utils/failed_handler.js +28 -28
  103. package/dist/utils/failed_handler.js.map +1 -0
  104. package/dist/utils/gemini.cjs +1020 -1488
  105. package/dist/utils/gemini.cjs.map +1 -0
  106. package/dist/utils/gemini.d.cts +51 -0
  107. package/dist/utils/gemini.d.cts.map +1 -0
  108. package/dist/utils/gemini.d.ts +51 -48
  109. package/dist/utils/gemini.d.ts.map +1 -0
  110. package/dist/utils/gemini.js +1015 -1479
  111. package/dist/utils/gemini.js.map +1 -0
  112. package/dist/utils/index.cjs +38 -23
  113. package/dist/utils/index.d.cts +8 -0
  114. package/dist/utils/index.d.ts +8 -7
  115. package/dist/utils/index.js +8 -7
  116. package/dist/utils/palm.d.cts +11 -0
  117. package/dist/utils/palm.d.cts.map +1 -0
  118. package/dist/utils/palm.d.ts +9 -4
  119. package/dist/utils/palm.d.ts.map +1 -0
  120. package/dist/utils/safety.cjs +13 -22
  121. package/dist/utils/safety.cjs.map +1 -0
  122. package/dist/utils/safety.d.cts +12 -0
  123. package/dist/utils/safety.d.cts.map +1 -0
  124. package/dist/utils/safety.d.ts +10 -4
  125. package/dist/utils/safety.d.ts.map +1 -0
  126. package/dist/utils/safety.js +13 -19
  127. package/dist/utils/safety.js.map +1 -0
  128. package/dist/utils/stream.cjs +296 -475
  129. package/dist/utils/stream.cjs.map +1 -0
  130. package/dist/utils/stream.d.cts +165 -0
  131. package/dist/utils/stream.d.cts.map +1 -0
  132. package/dist/utils/stream.d.ts +156 -131
  133. package/dist/utils/stream.d.ts.map +1 -0
  134. package/dist/utils/stream.js +293 -469
  135. package/dist/utils/stream.js.map +1 -0
  136. package/dist/utils/zod_to_gemini_parameters.cjs +43 -81
  137. package/dist/utils/zod_to_gemini_parameters.cjs.map +1 -0
  138. package/dist/utils/zod_to_gemini_parameters.d.cts +22 -0
  139. package/dist/utils/zod_to_gemini_parameters.d.cts.map +1 -0
  140. package/dist/utils/zod_to_gemini_parameters.d.ts +21 -6
  141. package/dist/utils/zod_to_gemini_parameters.d.ts.map +1 -0
  142. package/dist/utils/zod_to_gemini_parameters.js +40 -76
  143. package/dist/utils/zod_to_gemini_parameters.js.map +1 -0
  144. package/package.json +69 -85
  145. package/dist/types-anthropic.cjs +0 -2
  146. package/dist/types-anthropic.js +0 -1
  147. package/dist/utils/anthropic.d.ts +0 -4
  148. package/dist/utils/palm.cjs +0 -2
  149. package/dist/utils/palm.js +0 -1
  150. package/experimental/media.cjs +0 -1
  151. package/experimental/media.d.cts +0 -1
  152. package/experimental/media.d.ts +0 -1
  153. package/experimental/media.js +0 -1
  154. package/experimental/utils/media_core.cjs +0 -1
  155. package/experimental/utils/media_core.d.cts +0 -1
  156. package/experimental/utils/media_core.d.ts +0 -1
  157. package/experimental/utils/media_core.js +0 -1
  158. package/index.cjs +0 -1
  159. package/index.d.cts +0 -1
  160. package/index.d.ts +0 -1
  161. package/index.js +0 -1
  162. package/types.cjs +0 -1
  163. package/types.d.cts +0 -1
  164. package/types.d.ts +0 -1
  165. package/types.js +0 -1
  166. package/utils.cjs +0 -1
  167. package/utils.d.cts +0 -1
  168. package/utils.d.ts +0 -1
  169. package/utils.js +0 -1
package/dist/llms.cjs CHANGED
@@ -1,246 +1,159 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.GoogleBaseLLM = void 0;
4
- const manager_1 = require("@langchain/core/callbacks/manager");
5
- const llms_1 = require("@langchain/core/language_models/llms");
6
- const outputs_1 = require("@langchain/core/outputs");
7
- const env_1 = require("@langchain/core/utils/env");
8
- const connection_js_1 = require("./connection.cjs");
9
- const common_js_1 = require("./utils/common.cjs");
10
- const gemini_js_1 = require("./utils/gemini.cjs");
11
- const auth_js_1 = require("./auth.cjs");
12
- const failed_handler_js_1 = require("./utils/failed_handler.cjs");
13
- const chat_models_js_1 = require("./chat_models.cjs");
14
- class GoogleLLMConnection extends connection_js_1.AbstractGoogleLLMConnection {
15
- async formatContents(input, _parameters) {
16
- const parts = await this.api.messageContentToParts(input);
17
- const contents = [
18
- {
19
- role: "user", // Required by Vertex AI
20
- parts,
21
- },
22
- ];
23
- return contents;
24
- }
25
- }
26
- class ProxyChatGoogle extends chat_models_js_1.ChatGoogleBase {
27
- constructor(fields) {
28
- super(fields);
29
- }
30
- buildAbstractedClient(fields) {
31
- return fields.connection.client;
32
- }
33
- }
1
+ const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
2
+ const require_gemini = require('./utils/gemini.cjs');
3
+ const require_common = require('./utils/common.cjs');
4
+ const require_failed_handler = require('./utils/failed_handler.cjs');
5
+ const require_connection = require('./connection.cjs');
6
+ const require_auth = require('./auth.cjs');
7
+ const require_chat_models = require('./chat_models.cjs');
8
+ const __langchain_core_utils_env = require_rolldown_runtime.__toESM(require("@langchain/core/utils/env"));
9
+ const __langchain_core_outputs = require_rolldown_runtime.__toESM(require("@langchain/core/outputs"));
10
+ const __langchain_core_callbacks_manager = require_rolldown_runtime.__toESM(require("@langchain/core/callbacks/manager"));
11
+ const __langchain_core_language_models_llms = require_rolldown_runtime.__toESM(require("@langchain/core/language_models/llms"));
12
+
13
+ //#region src/llms.ts
14
+ var GoogleLLMConnection = class extends require_connection.AbstractGoogleLLMConnection {
15
+ async formatContents(input, _parameters) {
16
+ const parts = await this.api.messageContentToParts(input);
17
+ const contents = [{
18
+ role: "user",
19
+ parts
20
+ }];
21
+ return contents;
22
+ }
23
+ };
24
+ var ProxyChatGoogle = class extends require_chat_models.ChatGoogleBase {
25
+ constructor(fields) {
26
+ super(fields);
27
+ }
28
+ buildAbstractedClient(fields) {
29
+ return fields.connection.client;
30
+ }
31
+ };
34
32
  /**
35
- * Integration with an LLM.
36
- */
37
- class GoogleBaseLLM extends llms_1.LLM {
38
- // Used for tracing, replace with the same name as your class
39
- static lc_name() {
40
- return "GoogleLLM";
41
- }
42
- get lc_secrets() {
43
- return {
44
- authOptions: "GOOGLE_AUTH_OPTIONS",
45
- };
46
- }
47
- constructor(fields) {
48
- super((0, failed_handler_js_1.ensureParams)(fields));
49
- Object.defineProperty(this, "originalFields", {
50
- enumerable: true,
51
- configurable: true,
52
- writable: true,
53
- value: void 0
54
- });
55
- Object.defineProperty(this, "lc_serializable", {
56
- enumerable: true,
57
- configurable: true,
58
- writable: true,
59
- value: true
60
- });
61
- Object.defineProperty(this, "modelName", {
62
- enumerable: true,
63
- configurable: true,
64
- writable: true,
65
- value: "gemini-pro"
66
- });
67
- Object.defineProperty(this, "model", {
68
- enumerable: true,
69
- configurable: true,
70
- writable: true,
71
- value: "gemini-pro"
72
- });
73
- Object.defineProperty(this, "temperature", {
74
- enumerable: true,
75
- configurable: true,
76
- writable: true,
77
- value: 0.7
78
- });
79
- Object.defineProperty(this, "maxOutputTokens", {
80
- enumerable: true,
81
- configurable: true,
82
- writable: true,
83
- value: 1024
84
- });
85
- Object.defineProperty(this, "topP", {
86
- enumerable: true,
87
- configurable: true,
88
- writable: true,
89
- value: 0.8
90
- });
91
- Object.defineProperty(this, "topK", {
92
- enumerable: true,
93
- configurable: true,
94
- writable: true,
95
- value: 40
96
- });
97
- Object.defineProperty(this, "stopSequences", {
98
- enumerable: true,
99
- configurable: true,
100
- writable: true,
101
- value: []
102
- });
103
- Object.defineProperty(this, "safetySettings", {
104
- enumerable: true,
105
- configurable: true,
106
- writable: true,
107
- value: []
108
- });
109
- Object.defineProperty(this, "safetyHandler", {
110
- enumerable: true,
111
- configurable: true,
112
- writable: true,
113
- value: void 0
114
- });
115
- Object.defineProperty(this, "responseMimeType", {
116
- enumerable: true,
117
- configurable: true,
118
- writable: true,
119
- value: "text/plain"
120
- });
121
- Object.defineProperty(this, "connection", {
122
- enumerable: true,
123
- configurable: true,
124
- writable: true,
125
- value: void 0
126
- });
127
- Object.defineProperty(this, "streamedConnection", {
128
- enumerable: true,
129
- configurable: true,
130
- writable: true,
131
- value: void 0
132
- });
133
- this.originalFields = fields;
134
- (0, common_js_1.copyAndValidateModelParamsInto)(fields, this);
135
- this.safetyHandler =
136
- fields?.safetyHandler ?? new gemini_js_1.DefaultGeminiSafetyHandler();
137
- const client = this.buildClient(fields);
138
- this.buildConnection(fields ?? {}, client);
139
- }
140
- buildApiKeyClient(apiKey) {
141
- return new auth_js_1.ApiKeyGoogleAuth(apiKey);
142
- }
143
- buildApiKey(fields) {
144
- return fields?.apiKey ?? (0, env_1.getEnvironmentVariable)("GOOGLE_API_KEY");
145
- }
146
- buildClient(fields) {
147
- const apiKey = this.buildApiKey(fields);
148
- if (apiKey) {
149
- return this.buildApiKeyClient(apiKey);
150
- }
151
- else {
152
- return this.buildAbstractedClient(fields);
153
- }
154
- }
155
- buildConnection(fields, client) {
156
- this.connection = new GoogleLLMConnection({ ...fields, ...this }, this.caller, client, false);
157
- this.streamedConnection = new GoogleLLMConnection({ ...fields, ...this }, this.caller, client, true);
158
- }
159
- get platform() {
160
- return this.connection.platform;
161
- }
162
- // Replace
163
- _llmType() {
164
- return "googlellm";
165
- }
166
- formatPrompt(prompt) {
167
- return prompt;
168
- }
169
- /**
170
- * For some given input string and options, return a string output.
171
- *
172
- * Despite the fact that `invoke` is overridden below, we still need this
173
- * in order to handle public APi calls to `generate()`.
174
- */
175
- async _call(prompt, options) {
176
- const parameters = (0, common_js_1.copyAIModelParams)(this, options);
177
- const result = await this.connection.request(prompt, parameters, options);
178
- const ret = this.connection.api.responseToString(result);
179
- return ret;
180
- }
181
- // Normally, you should not override this method and instead should override
182
- // _streamResponseChunks. We are doing so here to allow for multimodal inputs into
183
- // the LLM.
184
- async *_streamIterator(input, options) {
185
- // TODO: Refactor callback setup and teardown code into core
186
- const prompt = llms_1.BaseLLM._convertInputToPromptValue(input);
187
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
188
- const callbackManager_ = await manager_1.CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
189
- const extra = {
190
- options: callOptions,
191
- invocation_params: this?.invocationParams(callOptions),
192
- batch_size: 1,
193
- };
194
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], undefined, undefined, extra, undefined, undefined, runnableConfig.runName);
195
- let generation = new outputs_1.GenerationChunk({
196
- text: "",
197
- });
198
- const proxyChat = this.createProxyChat();
199
- try {
200
- for await (const chunk of proxyChat._streamIterator(input, options)) {
201
- const stringValue = this.connection.api.chunkToString(chunk);
202
- const generationChunk = new outputs_1.GenerationChunk({
203
- text: stringValue,
204
- });
205
- generation = generation.concat(generationChunk);
206
- yield stringValue;
207
- }
208
- }
209
- catch (err) {
210
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
211
- throw err;
212
- }
213
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({
214
- generations: [[generation]],
215
- })));
216
- }
217
- async predictMessages(messages, options, _callbacks) {
218
- const { content } = messages[0];
219
- const result = await this.connection.request(content, {}, options);
220
- const ret = this.connection.api.responseToBaseMessage(result);
221
- return ret;
222
- }
223
- /**
224
- * Internal implementation detail to allow Google LLMs to support
225
- * multimodal input by delegating to the chat model implementation.
226
- *
227
- * TODO: Replace with something less hacky.
228
- */
229
- createProxyChat() {
230
- return new ProxyChatGoogle({
231
- ...this.originalFields,
232
- connection: this.connection,
233
- });
234
- }
235
- // TODO: Remove the need to override this - we are doing it to
236
- // allow the LLM to handle multimodal types of input.
237
- async invoke(input, options) {
238
- const stream = await this._streamIterator(input, options);
239
- let generatedOutput = "";
240
- for await (const chunk of stream) {
241
- generatedOutput += chunk;
242
- }
243
- return generatedOutput;
244
- }
245
- }
33
+ * Integration with an LLM.
34
+ */
35
+ var GoogleBaseLLM = class extends __langchain_core_language_models_llms.LLM {
36
+ static lc_name() {
37
+ return "GoogleLLM";
38
+ }
39
+ get lc_secrets() {
40
+ return { authOptions: "GOOGLE_AUTH_OPTIONS" };
41
+ }
42
+ originalFields;
43
+ lc_serializable = true;
44
+ modelName = "gemini-pro";
45
+ model = "gemini-pro";
46
+ temperature = .7;
47
+ maxOutputTokens = 1024;
48
+ topP = .8;
49
+ topK = 40;
50
+ stopSequences = [];
51
+ safetySettings = [];
52
+ safetyHandler;
53
+ responseMimeType = "text/plain";
54
+ connection;
55
+ streamedConnection;
56
+ constructor(fields) {
57
+ super(require_failed_handler.ensureParams(fields));
58
+ this.originalFields = fields;
59
+ require_common.copyAndValidateModelParamsInto(fields, this);
60
+ this.safetyHandler = fields?.safetyHandler ?? new require_gemini.DefaultGeminiSafetyHandler();
61
+ const client = this.buildClient(fields);
62
+ this.buildConnection(fields ?? {}, client);
63
+ }
64
+ buildApiKeyClient(apiKey) {
65
+ return new require_auth.ApiKeyGoogleAuth(apiKey);
66
+ }
67
+ buildApiKey(fields) {
68
+ return fields?.apiKey ?? (0, __langchain_core_utils_env.getEnvironmentVariable)("GOOGLE_API_KEY");
69
+ }
70
+ buildClient(fields) {
71
+ const apiKey = this.buildApiKey(fields);
72
+ if (apiKey) return this.buildApiKeyClient(apiKey);
73
+ else return this.buildAbstractedClient(fields);
74
+ }
75
+ buildConnection(fields, client) {
76
+ this.connection = new GoogleLLMConnection({
77
+ ...fields,
78
+ ...this
79
+ }, this.caller, client, false);
80
+ this.streamedConnection = new GoogleLLMConnection({
81
+ ...fields,
82
+ ...this
83
+ }, this.caller, client, true);
84
+ }
85
+ get platform() {
86
+ return this.connection.platform;
87
+ }
88
+ _llmType() {
89
+ return "googlellm";
90
+ }
91
+ formatPrompt(prompt) {
92
+ return prompt;
93
+ }
94
+ /**
95
+ * For some given input string and options, return a string output.
96
+ *
97
+ * Despite the fact that `invoke` is overridden below, we still need this
98
+ * in order to handle public APi calls to `generate()`.
99
+ */
100
+ async _call(prompt, options) {
101
+ const parameters = require_common.copyAIModelParams(this, options);
102
+ const result = await this.connection.request(prompt, parameters, options);
103
+ const ret = this.connection.api.responseToString(result);
104
+ return ret;
105
+ }
106
+ async *_streamIterator(input, options) {
107
+ const prompt = __langchain_core_language_models_llms.BaseLLM._convertInputToPromptValue(input);
108
+ const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
109
+ const callbackManager_ = await __langchain_core_callbacks_manager.CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
110
+ const extra = {
111
+ options: callOptions,
112
+ invocation_params: this?.invocationParams(callOptions),
113
+ batch_size: 1
114
+ };
115
+ const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], void 0, void 0, extra, void 0, void 0, runnableConfig.runName);
116
+ let generation = new __langchain_core_outputs.GenerationChunk({ text: "" });
117
+ const proxyChat = this.createProxyChat();
118
+ try {
119
+ for await (const chunk of proxyChat._streamIterator(input, options)) {
120
+ const stringValue = this.connection.api.chunkToString(chunk);
121
+ const generationChunk = new __langchain_core_outputs.GenerationChunk({ text: stringValue });
122
+ generation = generation.concat(generationChunk);
123
+ yield stringValue;
124
+ }
125
+ } catch (err) {
126
+ await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
127
+ throw err;
128
+ }
129
+ await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({ generations: [[generation]] })));
130
+ }
131
+ async predictMessages(messages, options, _callbacks) {
132
+ const { content } = messages[0];
133
+ const result = await this.connection.request(content, {}, options);
134
+ const ret = this.connection.api.responseToBaseMessage(result);
135
+ return ret;
136
+ }
137
+ /**
138
+ * Internal implementation detail to allow Google LLMs to support
139
+ * multimodal input by delegating to the chat model implementation.
140
+ *
141
+ * TODO: Replace with something less hacky.
142
+ */
143
+ createProxyChat() {
144
+ return new ProxyChatGoogle({
145
+ ...this.originalFields,
146
+ connection: this.connection
147
+ });
148
+ }
149
+ async invoke(input, options) {
150
+ const stream = await this._streamIterator(input, options);
151
+ let generatedOutput = "";
152
+ for await (const chunk of stream) generatedOutput += chunk;
153
+ return generatedOutput;
154
+ }
155
+ };
156
+
157
+ //#endregion
246
158
  exports.GoogleBaseLLM = GoogleBaseLLM;
159
+ //# sourceMappingURL=llms.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llms.cjs","names":["AbstractGoogleLLMConnection","input: MessageContent","_parameters: GoogleAIModelParams","contents: GeminiContent[]","ChatGoogleBase","fields: ProxyChatInput<AuthOptions>","LLM","fields?: GoogleBaseLLMInput<AuthOptions>","ensureParams","copyAndValidateModelParamsInto","DefaultGeminiSafetyHandler","apiKey: string","ApiKeyGoogleAuth","fields?: GoogleAIBaseLLMInput<AuthOptions>","fields: GoogleBaseLLMInput<AuthOptions>","client: GoogleAbstractedClient","prompt: string","options: this[\"ParsedCallOptions\"]","copyAIModelParams","input: BaseLanguageModelInput","options?: BaseLanguageModelCallOptions","BaseLLM","CallbackManager","GenerationChunk","messages: BaseMessage[]","options?: string[] | BaseLanguageModelCallOptions","_callbacks?: Callbacks"],"sources":["../src/llms.ts"],"sourcesContent":["import { CallbackManager, Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { BaseLLM, LLM } from \"@langchain/core/language_models/llms\";\nimport {\n type BaseLanguageModelCallOptions,\n BaseLanguageModelInput,\n} from \"@langchain/core/language_models/base\";\nimport { BaseMessage, MessageContent } from \"@langchain/core/messages\";\nimport { GenerationChunk } from \"@langchain/core/outputs\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\n\nimport { AbstractGoogleLLMConnection } from \"./connection.js\";\nimport {\n GoogleAIBaseLLMInput,\n GoogleAIModelParams,\n GoogleAISafetySetting,\n GooglePlatformType,\n GeminiContent,\n GoogleAIResponseMimeType,\n} from \"./types.js\";\nimport {\n copyAIModelParams,\n copyAndValidateModelParamsInto,\n} from \"./utils/common.js\";\nimport { DefaultGeminiSafetyHandler } from \"./utils/gemini.js\";\nimport { ApiKeyGoogleAuth, GoogleAbstractedClient } from \"./auth.js\";\nimport { ensureParams } from \"./utils/failed_handler.js\";\nimport { ChatGoogleBase } from \"./chat_models.js\";\nimport type { GoogleBaseLLMInput, GoogleAISafetyHandler } from \"./types.js\";\n\nexport { GoogleBaseLLMInput };\n\nclass GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<\n MessageContent,\n AuthOptions\n> {\n async formatContents(\n input: MessageContent,\n _parameters: GoogleAIModelParams\n ): Promise<GeminiContent[]> {\n const parts = await this.api.messageContentToParts!(input);\n const contents: GeminiContent[] = [\n {\n role: \"user\", // Required by Vertex AI\n parts,\n },\n ];\n return contents;\n }\n}\n\ntype ProxyChatInput<AuthOptions> = GoogleAIBaseLLMInput<AuthOptions> & {\n connection: GoogleLLMConnection<AuthOptions>;\n};\n\nclass ProxyChatGoogle<AuthOptions> extends ChatGoogleBase<AuthOptions> {\n constructor(fields: ProxyChatInput<AuthOptions>) {\n super(fields);\n }\n\n buildAbstractedClient(\n fields: ProxyChatInput<AuthOptions>\n ): GoogleAbstractedClient {\n return fields.connection.client;\n }\n}\n\n/**\n * Integration with an LLM.\n */\nexport abstract class GoogleBaseLLM<AuthOptions>\n extends LLM<BaseLanguageModelCallOptions>\n implements GoogleBaseLLMInput<AuthOptions>\n{\n // Used for tracing, replace with the same name as your class\n static lc_name() {\n return \"GoogleLLM\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n authOptions: \"GOOGLE_AUTH_OPTIONS\",\n };\n }\n\n originalFields?: GoogleBaseLLMInput<AuthOptions>;\n\n lc_serializable = true;\n\n modelName = \"gemini-pro\";\n\n model = \"gemini-pro\";\n\n temperature = 0.7;\n\n maxOutputTokens = 1024;\n\n topP = 0.8;\n\n topK = 40;\n\n stopSequences: string[] = [];\n\n safetySettings: GoogleAISafetySetting[] = [];\n\n safetyHandler: GoogleAISafetyHandler;\n\n responseMimeType: GoogleAIResponseMimeType = \"text/plain\";\n\n protected connection: GoogleLLMConnection<AuthOptions>;\n\n protected streamedConnection: GoogleLLMConnection<AuthOptions>;\n\n constructor(fields?: GoogleBaseLLMInput<AuthOptions>) {\n super(ensureParams(fields));\n this.originalFields = fields;\n\n copyAndValidateModelParamsInto(fields, this);\n this.safetyHandler =\n fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();\n\n const client = this.buildClient(fields);\n this.buildConnection(fields ?? {}, client);\n }\n\n abstract buildAbstractedClient(\n fields?: GoogleAIBaseLLMInput<AuthOptions>\n ): GoogleAbstractedClient;\n\n buildApiKeyClient(apiKey: string): GoogleAbstractedClient {\n return new ApiKeyGoogleAuth(apiKey);\n }\n\n buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined {\n return fields?.apiKey ?? getEnvironmentVariable(\"GOOGLE_API_KEY\");\n }\n\n buildClient(\n fields?: GoogleAIBaseLLMInput<AuthOptions>\n ): GoogleAbstractedClient {\n const apiKey = this.buildApiKey(fields);\n if (apiKey) {\n return this.buildApiKeyClient(apiKey);\n } else {\n return this.buildAbstractedClient(fields);\n }\n }\n\n buildConnection(\n fields: GoogleBaseLLMInput<AuthOptions>,\n client: GoogleAbstractedClient\n ) {\n this.connection = new GoogleLLMConnection(\n { ...fields, ...this },\n this.caller,\n client,\n false\n );\n\n this.streamedConnection = new GoogleLLMConnection(\n { ...fields, ...this },\n this.caller,\n client,\n true\n );\n }\n\n get platform(): GooglePlatformType {\n return this.connection.platform;\n }\n\n // Replace\n _llmType() {\n return \"googlellm\";\n }\n\n formatPrompt(prompt: string): MessageContent {\n return prompt;\n }\n\n /**\n * For some given input string and options, return a string output.\n *\n * Despite the fact that `invoke` is overridden below, we still need this\n * in order to handle public APi calls to `generate()`.\n */\n async _call(\n prompt: string,\n options: this[\"ParsedCallOptions\"]\n ): Promise<string> {\n const parameters = copyAIModelParams(this, options);\n const result = await this.connection.request(prompt, parameters, options);\n const ret = this.connection.api.responseToString(result);\n return ret;\n }\n\n // Normally, you should not override this method and instead should override\n // _streamResponseChunks. We are doing so here to allow for multimodal inputs into\n // the LLM.\n async *_streamIterator(\n input: BaseLanguageModelInput,\n options?: BaseLanguageModelCallOptions\n ): AsyncGenerator<string> {\n // TODO: Refactor callback setup and teardown code into core\n const prompt = BaseLLM._convertInputToPromptValue(input);\n const [runnableConfig, callOptions] =\n this._separateRunnableConfigFromCallOptions(options);\n const callbackManager_ = await CallbackManager.configure(\n runnableConfig.callbacks,\n this.callbacks,\n runnableConfig.tags,\n this.tags,\n runnableConfig.metadata,\n this.metadata,\n { verbose: this.verbose }\n );\n const extra = {\n options: callOptions,\n invocation_params: this?.invocationParams(callOptions),\n batch_size: 1,\n };\n const runManagers = await callbackManager_?.handleLLMStart(\n this.toJSON(),\n [prompt.toString()],\n undefined,\n undefined,\n extra,\n undefined,\n undefined,\n runnableConfig.runName\n );\n let generation = new GenerationChunk({\n text: \"\",\n });\n const proxyChat = this.createProxyChat();\n try {\n for await (const chunk of proxyChat._streamIterator(input, options)) {\n const stringValue = this.connection.api.chunkToString(chunk);\n const generationChunk = new GenerationChunk({\n text: stringValue,\n });\n generation = generation.concat(generationChunk);\n yield stringValue;\n }\n } catch (err) {\n await Promise.all(\n (runManagers ?? []).map((runManager) => runManager?.handleLLMError(err))\n );\n throw err;\n }\n await Promise.all(\n (runManagers ?? []).map((runManager) =>\n runManager?.handleLLMEnd({\n generations: [[generation]],\n })\n )\n );\n }\n\n async predictMessages(\n messages: BaseMessage[],\n options?: string[] | BaseLanguageModelCallOptions,\n _callbacks?: Callbacks\n ): Promise<BaseMessage> {\n const { content } = messages[0];\n const result = await this.connection.request(\n content,\n {},\n options as BaseLanguageModelCallOptions\n );\n const ret = this.connection.api.responseToBaseMessage(result);\n return ret;\n }\n\n /**\n * Internal implementation detail to allow Google LLMs to support\n * multimodal input by delegating to the chat model implementation.\n *\n * TODO: Replace with something less hacky.\n */\n protected createProxyChat(): ChatGoogleBase<AuthOptions> {\n return new ProxyChatGoogle<AuthOptions>({\n ...this.originalFields,\n connection: this.connection,\n });\n }\n\n // TODO: Remove the need to override this - we are doing it to\n // allow the LLM to handle multimodal types of input.\n async invoke(\n input: BaseLanguageModelInput,\n options?: BaseLanguageModelCallOptions\n ): Promise<string> {\n const stream = await this._streamIterator(input, options);\n let generatedOutput = \"\";\n for await (const chunk of stream) {\n generatedOutput += chunk;\n }\n return generatedOutput;\n }\n}\n"],"mappings":";;;;;;;;;;;;;AA+BA,IAAM,sBAAN,cAA+CA,+CAG7C;CACA,MAAM,eACJC,OACAC,aAC0B;EAC1B,MAAM,QAAQ,MAAM,KAAK,IAAI,sBAAuB,MAAM;EAC1D,MAAMC,WAA4B,CAChC;GACE,MAAM;GACN;EACD,CACF;AACD,SAAO;CACR;AACF;AAMD,IAAM,kBAAN,cAA2CC,mCAA4B;CACrE,YAAYC,QAAqC;EAC/C,MAAM,OAAO;CACd;CAED,sBACEA,QACwB;AACxB,SAAO,OAAO,WAAW;CAC1B;AACF;;;;AAKD,IAAsB,gBAAtB,cACUC,0CAEV;CAEE,OAAO,UAAU;AACf,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,aAAa,sBACd;CACF;CAED;CAEA,kBAAkB;CAElB,YAAY;CAEZ,QAAQ;CAER,cAAc;CAEd,kBAAkB;CAElB,OAAO;CAEP,OAAO;CAEP,gBAA0B,CAAE;CAE5B,iBAA0C,CAAE;CAE5C;CAEA,mBAA6C;CAE7C,AAAU;CAEV,AAAU;CAEV,YAAYC,QAA0C;EACpD,MAAMC,oCAAa,OAAO,CAAC;EAC3B,KAAK,iBAAiB;EAEtBC,8CAA+B,QAAQ,KAAK;EAC5C,KAAK,gBACH,QAAQ,iBAAiB,IAAIC;EAE/B,MAAM,SAAS,KAAK,YAAY,OAAO;EACvC,KAAK,gBAAgB,UAAU,CAAE,GAAE,OAAO;CAC3C;CAMD,kBAAkBC,QAAwC;AACxD,SAAO,IAAIC,8BAAiB;CAC7B;CAED,YAAYC,QAAgE;AAC1E,SAAO,QAAQ,iEAAiC,iBAAiB;CAClE;CAED,YACEA,QACwB;EACxB,MAAM,SAAS,KAAK,YAAY,OAAO;AACvC,MAAI,OACF,QAAO,KAAK,kBAAkB,OAAO;MAErC,QAAO,KAAK,sBAAsB,OAAO;CAE5C;CAED,gBACEC,QACAC,QACA;EACA,KAAK,aAAa,IAAI,oBACpB;GAAE,GAAG;GAAQ,GAAG;EAAM,GACtB,KAAK,QACL,QACA;EAGF,KAAK,qBAAqB,IAAI,oBAC5B;GAAE,GAAG;GAAQ,GAAG;EAAM,GACtB,KAAK,QACL,QACA;CAEH;CAED,IAAI,WAA+B;AACjC,SAAO,KAAK,WAAW;CACxB;CAGD,WAAW;AACT,SAAO;CACR;CAED,aAAaC,QAAgC;AAC3C,SAAO;CACR;;;;;;;CAQD,MAAM,MACJA,QACAC,SACiB;EACjB,MAAM,aAAaC,iCAAkB,MAAM,QAAQ;EACnD,MAAM,SAAS,MAAM,KAAK,WAAW,QAAQ,QAAQ,YAAY,QAAQ;EACzE,MAAM,MAAM,KAAK,WAAW,IAAI,iBAAiB,OAAO;AACxD,SAAO;CACR;CAKD,OAAO,gBACLC,OACAC,SACwB;EAExB,MAAM,SAASC,8CAAQ,2BAA2B,MAAM;EACxD,MAAM,CAAC,gBAAgB,YAAY,GACjC,KAAK,uCAAuC,QAAQ;EACtD,MAAM,mBAAmB,MAAMC,mDAAgB,UAC7C,eAAe,WACf,KAAK,WACL,eAAe,MACf,KAAK,MACL,eAAe,UACf,KAAK,UACL,EAAE,SAAS,KAAK,QAAS,EAC1B;EACD,MAAM,QAAQ;GACZ,SAAS;GACT,mBAAmB,MAAM,iBAAiB,YAAY;GACtD,YAAY;EACb;EACD,MAAM,cAAc,MAAM,kBAAkB,eAC1C,KAAK,QAAQ,EACb,CAAC,OAAO,UAAU,AAAC,GACnB,QACA,QACA,OACA,QACA,QACA,eAAe,QAChB;EACD,IAAI,aAAa,IAAIC,yCAAgB,EACnC,MAAM,GACP;EACD,MAAM,YAAY,KAAK,iBAAiB;AACxC,MAAI;AACF,cAAW,MAAM,SAAS,UAAU,gBAAgB,OAAO,QAAQ,EAAE;IACnE,MAAM,cAAc,KAAK,WAAW,IAAI,cAAc,MAAM;IAC5D,MAAM,kBAAkB,IAAIA,yCAAgB,EAC1C,MAAM,YACP;IACD,aAAa,WAAW,OAAO,gBAAgB;IAC/C,MAAM;GACP;EACF,SAAQ,KAAK;GACZ,MAAM,QAAQ,KACX,eAAe,CAAE,GAAE,IAAI,CAAC,eAAe,YAAY,eAAe,IAAI,CAAC,CACzE;AACD,SAAM;EACP;EACD,MAAM,QAAQ,KACX,eAAe,CAAE,GAAE,IAAI,CAAC,eACvB,YAAY,aAAa,EACvB,aAAa,CAAC,CAAC,UAAW,CAAC,EAC5B,EAAC,CACH,CACF;CACF;CAED,MAAM,gBACJC,UACAC,SACAC,YACsB;EACtB,MAAM,EAAE,SAAS,GAAG,SAAS;EAC7B,MAAM,SAAS,MAAM,KAAK,WAAW,QACnC,SACA,CAAE,GACF,QACD;EACD,MAAM,MAAM,KAAK,WAAW,IAAI,sBAAsB,OAAO;AAC7D,SAAO;CACR;;;;;;;CAQD,AAAU,kBAA+C;AACvD,SAAO,IAAI,gBAA6B;GACtC,GAAG,KAAK;GACR,YAAY,KAAK;EAClB;CACF;CAID,MAAM,OACJP,OACAC,SACiB;EACjB,MAAM,SAAS,MAAM,KAAK,gBAAgB,OAAO,QAAQ;EACzD,IAAI,kBAAkB;AACtB,aAAW,MAAM,SAAS,QACxB,mBAAmB;AAErB,SAAO;CACR;AACF"}
@@ -0,0 +1,72 @@
1
+ import { GeminiContent, GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAIResponseMimeType, GoogleAISafetyHandler, GoogleAISafetySetting, GoogleBaseLLMInput, GooglePlatformType } from "./types.cjs";
2
+ import { GoogleAbstractedClient } from "./auth.cjs";
3
+ import { AbstractGoogleLLMConnection } from "./connection.cjs";
4
+ import { ChatGoogleBase } from "./chat_models.cjs";
5
+ import { LLM } from "@langchain/core/language_models/llms";
6
+ import { BaseMessage, MessageContent } from "@langchain/core/messages";
7
+ import { BaseLanguageModelCallOptions, BaseLanguageModelInput } from "@langchain/core/language_models/base";
8
+ import { Callbacks } from "@langchain/core/callbacks/manager";
9
+
10
+ //#region src/llms.d.ts
11
+ declare class GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<MessageContent, AuthOptions> {
12
+ formatContents(input: MessageContent, _parameters: GoogleAIModelParams): Promise<GeminiContent[]>;
13
+ }
14
+ /**
15
+ * Integration with an LLM.
16
+ */
17
+ declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguageModelCallOptions> implements GoogleBaseLLMInput<AuthOptions> {
18
+ // Used for tracing, replace with the same name as your class
19
+ static lc_name(): string;
20
+ get lc_secrets(): {
21
+ [key: string]: string;
22
+ } | undefined;
23
+ originalFields?: GoogleBaseLLMInput<AuthOptions>;
24
+ lc_serializable: boolean;
25
+ modelName: string;
26
+ model: string;
27
+ temperature: number;
28
+ maxOutputTokens: number;
29
+ topP: number;
30
+ topK: number;
31
+ stopSequences: string[];
32
+ safetySettings: GoogleAISafetySetting[];
33
+ safetyHandler: GoogleAISafetyHandler;
34
+ responseMimeType: GoogleAIResponseMimeType;
35
+ protected connection: GoogleLLMConnection<AuthOptions>;
36
+ protected streamedConnection: GoogleLLMConnection<AuthOptions>;
37
+ constructor(fields?: GoogleBaseLLMInput<AuthOptions>);
38
+ abstract buildAbstractedClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
39
+ buildApiKeyClient(apiKey: string): GoogleAbstractedClient;
40
+ buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined;
41
+ buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
42
+ buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;
43
+ get platform(): GooglePlatformType;
44
+ // Replace
45
+ _llmType(): string;
46
+ formatPrompt(prompt: string): MessageContent;
47
+ /**
48
+ * For some given input string and options, return a string output.
49
+ *
50
+ * Despite the fact that `invoke` is overridden below, we still need this
51
+ * in order to handle public APi calls to `generate()`.
52
+ */
53
+ _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
54
+ // Normally, you should not override this method and instead should override
55
+ // _streamResponseChunks. We are doing so here to allow for multimodal inputs into
56
+ // the LLM.
57
+ _streamIterator(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): AsyncGenerator<string>;
58
+ predictMessages(messages: BaseMessage[], options?: string[] | BaseLanguageModelCallOptions, _callbacks?: Callbacks): Promise<BaseMessage>;
59
+ /**
60
+ * Internal implementation detail to allow Google LLMs to support
61
+ * multimodal input by delegating to the chat model implementation.
62
+ *
63
+ * TODO: Replace with something less hacky.
64
+ */
65
+ protected createProxyChat(): ChatGoogleBase<AuthOptions>;
66
+ // TODO: Remove the need to override this - we are doing it to
67
+ // allow the LLM to handle multimodal types of input.
68
+ invoke(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): Promise<string>;
69
+ }
70
+ //#endregion
71
+ export { GoogleBaseLLM };
72
+ //# sourceMappingURL=llms.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llms.d.cts","names":["Callbacks","LLM","BaseLanguageModelCallOptions","BaseLanguageModelInput","BaseMessage","MessageContent","AbstractGoogleLLMConnection","GoogleAIBaseLLMInput","GoogleAIModelParams","GoogleAISafetySetting","GooglePlatformType","GeminiContent","GoogleAIResponseMimeType","GoogleAbstractedClient","ChatGoogleBase","GoogleBaseLLMInput","GoogleAISafetyHandler","GoogleLLMConnection","AuthOptions","Promise","GoogleBaseLLM","AsyncGenerator"],"sources":["../src/llms.d.ts"],"sourcesContent":["import { Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { LLM } from \"@langchain/core/language_models/llms\";\nimport { type BaseLanguageModelCallOptions, BaseLanguageModelInput } from \"@langchain/core/language_models/base\";\nimport { BaseMessage, MessageContent } from \"@langchain/core/messages\";\nimport { AbstractGoogleLLMConnection } from \"./connection.js\";\nimport { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GooglePlatformType, GeminiContent, GoogleAIResponseMimeType } from \"./types.js\";\nimport { GoogleAbstractedClient } from \"./auth.js\";\nimport { ChatGoogleBase } from \"./chat_models.js\";\nimport type { GoogleBaseLLMInput, GoogleAISafetyHandler } from \"./types.js\";\nexport { GoogleBaseLLMInput };\ndeclare class GoogleLLMConnection<AuthOptions> extends AbstractGoogleLLMConnection<MessageContent, AuthOptions> {\n formatContents(input: MessageContent, _parameters: GoogleAIModelParams): Promise<GeminiContent[]>;\n}\n/**\n * Integration with an LLM.\n */\nexport declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguageModelCallOptions> implements GoogleBaseLLMInput<AuthOptions> {\n // Used for tracing, replace with the same name as your class\n static lc_name(): string;\n get lc_secrets(): {\n [key: string]: string;\n } | undefined;\n originalFields?: GoogleBaseLLMInput<AuthOptions>;\n lc_serializable: boolean;\n modelName: string;\n model: string;\n temperature: number;\n maxOutputTokens: number;\n topP: number;\n topK: number;\n stopSequences: string[];\n safetySettings: GoogleAISafetySetting[];\n safetyHandler: GoogleAISafetyHandler;\n responseMimeType: GoogleAIResponseMimeType;\n protected connection: GoogleLLMConnection<AuthOptions>;\n protected streamedConnection: GoogleLLMConnection<AuthOptions>;\n constructor(fields?: GoogleBaseLLMInput<AuthOptions>);\n abstract buildAbstractedClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;\n buildApiKeyClient(apiKey: string): GoogleAbstractedClient;\n buildApiKey(fields?: GoogleAIBaseLLMInput<AuthOptions>): string | undefined;\n buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;\n buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;\n get platform(): GooglePlatformType;\n // Replace\n _llmType(): string;\n formatPrompt(prompt: string): MessageContent;\n /**\n * For some given input string and options, return a string output.\n *\n * Despite the fact that `invoke` is overridden below, we still need this\n * in order to handle public APi calls to `generate()`.\n */\n _call(prompt: string, options: this[\"ParsedCallOptions\"]): Promise<string>;\n // Normally, you should not override this method and instead should override\n // _streamResponseChunks. We are doing so here to allow for multimodal inputs into\n // the LLM.\n _streamIterator(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): AsyncGenerator<string>;\n predictMessages(messages: BaseMessage[], options?: string[] | BaseLanguageModelCallOptions, _callbacks?: Callbacks): Promise<BaseMessage>;\n /**\n * Internal implementation detail to allow Google LLMs to support\n * multimodal input by delegating to the chat model implementation.\n *\n * TODO: Replace with something less hacky.\n */\n protected createProxyChat(): ChatGoogleBase<AuthOptions>;\n // TODO: Remove the need to override this - we are doing it to\n // allow the LLM to handle multimodal types of input.\n invoke(input: BaseLanguageModelInput, options?: BaseLanguageModelCallOptions): Promise<string>;\n}\n"],"mappings":";;;;;;;;;;AAS8B,cAChBiB,mBAAmB,CAAA,WAAA,CAAA,SAAsBX,2BAAtB,CAAkDD,cAAlD,EAAkEa,WAAlE,CAAA,CAAA;EAAA,cAAA,CAAA,KAAA,EACPb,cADO,EAAA,WAAA,EACsBG,mBADtB,CAAA,EAC4CW,OAD5C,CACoDR,aADpD,EAAA,CAAA;;;;;AACoDA,uBAKvDS,aALuDT,CAAAA,WAAAA,CAAAA,SAKpBV,GALoBU,CAKhBT,4BALgBS,CAAAA,YAKyBI,kBALzBJ,CAK4CO,WAL5CP,CAAAA,CAAAA;EAAa;EAAd,OAD7BL,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAA2B,IAAA,UAAA,CAAA,CAAA,EAAA;IAMpDc,CAAAA,GAAAA,EAAAA,MAAa,CAAA,EAAA,MAAA;EAAA,CAAA,GAAA,SAAA;EAAA,cAA0BlB,CAAAA,EAMhDa,kBANgDb,CAM7BgB,WAN6BhB,CAAAA;EAA4B,eAAgCgB,EAAAA,OAAAA;EAAW,SAMpGA,EAAAA,MAAAA;EAAW,KAA9BH,EAAAA,MAAAA;EAAkB,WASnBN,EAAAA,MAAAA;EAAqB,eACtBO,EAAAA,MAAAA;EAAqB,IAClBJ,EAAAA,MAAAA;EAAwB,IACAM,EAAAA,MAAAA;EAAW,aAA/BD,EAAAA,MAAAA,EAAAA;EAAmB,cACSC,EAJlCT,qBAIkCS,EAAAA;EAAW,aAA/BD,EAHfD,qBAGeC;EAAmB,gBACTC,EAHtBN,wBAGsBM;EAAW,UAA9BH,UAAAA,EAFCE,mBAEDF,CAFqBG,WAErBH,CAAAA;EAAkB,UACsBG,kBAAAA,EAF/BD,mBAE+BC,CAFXA,WAEWA,CAAAA;EAAW,WAAhCX,CAAAA,MAAAA,CAAAA,EADnBQ,kBACmBR,CADAW,WACAX,CAAAA;EAAoB,SAAgBM,qBAAAA,CAAAA,MAAAA,CAAAA,EAApCN,oBAAoCM,CAAfK,WAAeL,CAAAA,CAAAA,EAAAA,sBAAAA;EAAsB,iBAC/DA,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA,EAAAA,sBAAAA;EAAsB,WACfK,CAAAA,MAAAA,CAAAA,EAArBX,oBAAqBW,CAAAA,WAAAA,CAAAA,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAW,WAAhCX,CAAAA,MAAAA,CAAAA,EACAA,oBADAA,CACqBW,WADrBX,CAAAA,CAAAA,EACoCM,sBADpCN;EAAoB,eACCW,CAAAA,MAAAA,EAClBH,kBADkBG,CACCA,WADDA,CAAAA,EAAAA,MAAAA,EACuBL,sBADvBK,CAAAA,EAAAA,IAAAA;EAAW,IAAhCX,QAAAA,CAAAA,CAAAA,EAELG,kBAFKH;EAAoB;EAAsC,QACpCW,CAAAA,CAAAA,EAAAA,MAAAA;EAAW,YAA9BH,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA,EAIMV,cAJNU;EAAkB;;;;;;EAe2C,KAAGM,CAAAA,MAAAA,EAAAA,MAAAA,EAAAA,OAAAA,EAAAA,IAAAA,CAAAA,mBAAAA,CAAAA,CAAAA,EAJ7BF,OAI6BE,CAAAA,MAAAA,CAAAA;EAAc;EACjE;EAAqD;EAAwB,eAAWjB,CAAAA,KAAAA,EADtGD,sBACsGC,EAAAA,OAAAA,CAAAA,EADpEF,4BACoEE,CAAAA,EADrCiB,cACqCjB,CAAAA,MAAAA,CAAAA;EAAW,eAAnBe,CAAAA,QAAAA,EAA3Ff,WAA2Fe,EAAAA,EAAAA,OAAAA,CAAAA,EAAAA,MAAAA,EAAAA,GAAvDjB,4BAAuDiB,EAAAA,UAAAA,CAAAA,EAAZnB,SAAYmB,CAAAA,EAAAA,OAAAA,CAAQf,WAARe,CAAAA;EAAO;;;;;;EAzC5D,UAA0CJ,eAAAA,CAAAA,CAAAA,EAgD7ED,cAhD6EC,CAgD9DG,WAhD8DH,CAAAA;EAAkB;;gBAmD9GZ,kCAAkCD,+BAA+BiB"}