@langchain/google-vertexai-web 0.2.18 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/CHANGELOG.md +17 -0
  2. package/LICENSE +6 -6
  3. package/README.md +1 -1
  4. package/dist/_virtual/rolldown_runtime.cjs +25 -0
  5. package/dist/chat_models.cjs +307 -305
  6. package/dist/chat_models.cjs.map +1 -0
  7. package/dist/chat_models.d.cts +301 -0
  8. package/dist/chat_models.d.cts.map +1 -0
  9. package/dist/chat_models.d.ts +12 -7
  10. package/dist/chat_models.d.ts.map +1 -0
  11. package/dist/chat_models.js +306 -301
  12. package/dist/chat_models.js.map +1 -0
  13. package/dist/embeddings.cjs +21 -18
  14. package/dist/embeddings.cjs.map +1 -0
  15. package/dist/embeddings.d.cts +19 -0
  16. package/dist/embeddings.d.cts.map +1 -0
  17. package/dist/embeddings.d.ts +11 -6
  18. package/dist/embeddings.d.ts.map +1 -0
  19. package/dist/embeddings.js +21 -15
  20. package/dist/embeddings.js.map +1 -0
  21. package/dist/index.cjs +7 -19
  22. package/dist/index.d.cts +4 -0
  23. package/dist/index.d.ts +4 -3
  24. package/dist/index.js +5 -3
  25. package/dist/llms.cjs +26 -24
  26. package/dist/llms.cjs.map +1 -0
  27. package/dist/llms.d.cts +20 -0
  28. package/dist/llms.d.cts.map +1 -0
  29. package/dist/llms.d.ts +12 -7
  30. package/dist/llms.d.ts.map +1 -0
  31. package/dist/llms.js +25 -20
  32. package/dist/llms.js.map +1 -0
  33. package/dist/types.cjs +9 -17
  34. package/dist/types.d.cts +1 -0
  35. package/dist/types.d.ts +1 -1
  36. package/dist/types.js +1 -1
  37. package/dist/utils.cjs +9 -17
  38. package/dist/utils.d.cts +1 -0
  39. package/dist/utils.d.ts +1 -1
  40. package/dist/utils.js +1 -1
  41. package/package.json +55 -72
  42. package/index.cjs +0 -1
  43. package/index.d.cts +0 -1
  44. package/index.d.ts +0 -1
  45. package/index.js +0 -1
  46. package/types.cjs +0 -1
  47. package/types.d.cts +0 -1
  48. package/types.d.ts +0 -1
  49. package/types.js +0 -1
  50. package/utils.cjs +0 -1
  51. package/utils.d.cts +0 -1
  52. package/utils.d.ts +0 -1
  53. package/utils.js +0 -1
@@ -1,303 +1,308 @@
1
1
  import { ChatGoogle } from "@langchain/google-webauth";
2
+
3
+ //#region src/chat_models.ts
2
4
  /**
3
- * Integration with Google Vertex AI chat models in web environments.
4
- *
5
- * Setup:
6
- * Install `@langchain/google-vertexai-web` and set your stringified
7
- * Vertex AI credentials as an environment variable named `GOOGLE_VERTEX_AI_WEB_CREDENTIALS`.
8
- *
9
- * ```bash
10
- * npm install @langchain/google-vertexai-web
11
- * export GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
12
- * ```
13
- *
14
- * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai_web.index.ChatVertexAI.html#constructor)
15
- *
16
- * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
17
- *
18
- * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
19
- * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:
20
- *
21
- * ```typescript
22
- * // If binding tools along with other options, chain `.bindTools` and `.withConfig`
23
- * const llmWithArgsBound = llm.bindTools([...]) // tools array
24
- * .withConfig({
25
- * stop: ["\n"], // other call options
26
- * });
27
- *
28
- * // When calling `.bindTools`, call options should be passed via the second argument
29
- * const llmWithTools = llm.bindTools(
30
- * [...],
31
- * {
32
- * tool_choice: "auto",
33
- * }
34
- * );
35
- * ```
36
- *
37
- * ## Examples
38
- *
39
- * <details open>
40
- * <summary><strong>Instantiate</strong></summary>
41
- *
42
- * ```typescript
43
- * import { ChatVertexAI } from '@langchain/google-vertexai-web';
44
- *
45
- * const llm = new ChatVertexAI({
46
- * model: "gemini-1.5-pro",
47
- * temperature: 0,
48
- * authOptions: {
49
- * credentials: process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS,
50
- * },
51
- * // other params...
52
- * });
53
- * ```
54
- * </details>
55
- *
56
- * <br />
57
- *
58
- * <details>
59
- * <summary><strong>Invoking</strong></summary>
60
- *
61
- * ```typescript
62
- * const input = `Translate "I love programming" into French.`;
63
- *
64
- * // Models also accept a list of chat messages or a formatted prompt
65
- * const result = await llm.invoke(input);
66
- * console.log(result);
67
- * ```
68
- *
69
- * ```txt
70
- * AIMessageChunk {
71
- * "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
72
- * "additional_kwargs": {},
73
- * "response_metadata": {},
74
- * "tool_calls": [],
75
- * "tool_call_chunks": [],
76
- * "invalid_tool_calls": [],
77
- * "usage_metadata": {
78
- * "input_tokens": 9,
79
- * "output_tokens": 63,
80
- * "total_tokens": 72
81
- * }
82
- * }
83
- * ```
84
- * </details>
85
- *
86
- * <br />
87
- *
88
- * <details>
89
- * <summary><strong>Streaming Chunks</strong></summary>
90
- *
91
- * ```typescript
92
- * for await (const chunk of await llm.stream(input)) {
93
- * console.log(chunk);
94
- * }
95
- * ```
96
- *
97
- * ```txt
98
- * AIMessageChunk {
99
- * "content": "\"",
100
- * "additional_kwargs": {},
101
- * "response_metadata": {},
102
- * "tool_calls": [],
103
- * "tool_call_chunks": [],
104
- * "invalid_tool_calls": []
105
- * }
106
- * AIMessageChunk {
107
- * "content": "J'adore programmer\" \n",
108
- * "additional_kwargs": {},
109
- * "response_metadata": {},
110
- * "tool_calls": [],
111
- * "tool_call_chunks": [],
112
- * "invalid_tool_calls": []
113
- * }
114
- * AIMessageChunk {
115
- * "content": "",
116
- * "additional_kwargs": {},
117
- * "response_metadata": {},
118
- * "tool_calls": [],
119
- * "tool_call_chunks": [],
120
- * "invalid_tool_calls": []
121
- * }
122
- * AIMessageChunk {
123
- * "content": "",
124
- * "additional_kwargs": {},
125
- * "response_metadata": {
126
- * "finishReason": "stop"
127
- * },
128
- * "tool_calls": [],
129
- * "tool_call_chunks": [],
130
- * "invalid_tool_calls": [],
131
- * "usage_metadata": {
132
- * "input_tokens": 9,
133
- * "output_tokens": 8,
134
- * "total_tokens": 17
135
- * }
136
- * }
137
- * ```
138
- * </details>
139
- *
140
- * <br />
141
- *
142
- * <details>
143
- * <summary><strong>Aggregate Streamed Chunks</strong></summary>
144
- *
145
- * ```typescript
146
- * import { AIMessageChunk } from '@langchain/core/messages';
147
- * import { concat } from '@langchain/core/utils/stream';
148
- *
149
- * const stream = await llm.stream(input);
150
- * let full: AIMessageChunk | undefined;
151
- * for await (const chunk of stream) {
152
- * full = !full ? chunk : concat(full, chunk);
153
- * }
154
- * console.log(full);
155
- * ```
156
- *
157
- * ```txt
158
- * AIMessageChunk {
159
- * "content": "\"J'adore programmer\" \n",
160
- * "additional_kwargs": {},
161
- * "response_metadata": {
162
- * "finishReason": "stop"
163
- * },
164
- * "tool_calls": [],
165
- * "tool_call_chunks": [],
166
- * "invalid_tool_calls": [],
167
- * "usage_metadata": {
168
- * "input_tokens": 9,
169
- * "output_tokens": 8,
170
- * "total_tokens": 17
171
- * }
172
- * }
173
- * ```
174
- * </details>
175
- *
176
- * <br />
177
- *
178
- * <details>
179
- * <summary><strong>Bind tools</strong></summary>
180
- *
181
- * ```typescript
182
- * import { z } from 'zod';
183
- *
184
- * const GetWeather = {
185
- * name: "GetWeather",
186
- * description: "Get the current weather in a given location",
187
- * schema: z.object({
188
- * location: z.string().describe("The city and state, e.g. San Francisco, CA")
189
- * }),
190
- * }
191
- *
192
- * const GetPopulation = {
193
- * name: "GetPopulation",
194
- * description: "Get the current population in a given location",
195
- * schema: z.object({
196
- * location: z.string().describe("The city and state, e.g. San Francisco, CA")
197
- * }),
198
- * }
199
- *
200
- * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
201
- * const aiMsg = await llmWithTools.invoke(
202
- * "Which city is hotter today and which is bigger: LA or NY?"
203
- * );
204
- * console.log(aiMsg.tool_calls);
205
- * ```
206
- *
207
- * ```txt
208
- * [
209
- * {
210
- * name: 'GetPopulation',
211
- * args: { location: 'New York City, NY' },
212
- * id: '33c1c1f47e2f492799c77d2800a43912',
213
- * type: 'tool_call'
214
- * }
215
- * ]
216
- * ```
217
- * </details>
218
- *
219
- * <br />
220
- *
221
- * <details>
222
- * <summary><strong>Structured Output</strong></summary>
223
- *
224
- * ```typescript
225
- * import { z } from 'zod';
226
- *
227
- * const Joke = z.object({
228
- * setup: z.string().describe("The setup of the joke"),
229
- * punchline: z.string().describe("The punchline to the joke"),
230
- * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
231
- * }).describe('Joke to tell user.');
232
- *
233
- * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
234
- * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
235
- * console.log(jokeResult);
236
- * ```
237
- *
238
- * ```txt
239
- * {
240
- * setup: 'What do you call a cat that loves to bowl?',
241
- * punchline: 'An alley cat!'
242
- * }
243
- * ```
244
- * </details>
245
- *
246
- * <br />
247
- *
248
- * <details>
249
- * <summary><strong>Usage Metadata</strong></summary>
250
- *
251
- * ```typescript
252
- * const aiMsgForMetadata = await llm.invoke(input);
253
- * console.log(aiMsgForMetadata.usage_metadata);
254
- * ```
255
- *
256
- * ```txt
257
- * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
258
- * ```
259
- * </details>
260
- *
261
- * <br />
262
- *
263
- * <details>
264
- * <summary><strong>Stream Usage Metadata</strong></summary>
265
- *
266
- * ```typescript
267
- * const streamForMetadata = await llm.stream(
268
- * input,
269
- * {
270
- * streamUsage: true
271
- * }
272
- * );
273
- * let fullForMetadata: AIMessageChunk | undefined;
274
- * for await (const chunk of streamForMetadata) {
275
- * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
276
- * }
277
- * console.log(fullForMetadata?.usage_metadata);
278
- * ```
279
- *
280
- * ```txt
281
- * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
282
- * ```
283
- * </details>
284
- *
285
- * <br />
286
- */
287
- export class ChatVertexAI extends ChatGoogle {
288
- static lc_name() {
289
- return "ChatVertexAI";
290
- }
291
- constructor(fields) {
292
- super({
293
- ...fields,
294
- platformType: "gcp",
295
- });
296
- Object.defineProperty(this, "lc_namespace", {
297
- enumerable: true,
298
- configurable: true,
299
- writable: true,
300
- value: ["langchain", "chat_models", "vertexai"]
301
- });
302
- }
303
- }
5
+ * Integration with Google Vertex AI chat models in web environments.
6
+ *
7
+ * Setup:
8
+ * Install `@langchain/google-vertexai-web` and set your stringified
9
+ * Vertex AI credentials as an environment variable named `GOOGLE_VERTEX_AI_WEB_CREDENTIALS`.
10
+ *
11
+ * ```bash
12
+ * npm install @langchain/google-vertexai-web
13
+ * export GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
14
+ * ```
15
+ *
16
+ * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai_web.index.ChatVertexAI.html#constructor)
17
+ *
18
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
19
+ *
20
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
21
+ * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:
22
+ *
23
+ * ```typescript
24
+ * // If binding tools along with other options, chain `.bindTools` and `.withConfig`
25
+ * const llmWithArgsBound = llm.bindTools([...]) // tools array
26
+ * .withConfig({
27
+ * stop: ["\n"], // other call options
28
+ * });
29
+ *
30
+ * // When calling `.bindTools`, call options should be passed via the second argument
31
+ * const llmWithTools = llm.bindTools(
32
+ * [...],
33
+ * {
34
+ * tool_choice: "auto",
35
+ * }
36
+ * );
37
+ * ```
38
+ *
39
+ * ## Examples
40
+ *
41
+ * <details open>
42
+ * <summary><strong>Instantiate</strong></summary>
43
+ *
44
+ * ```typescript
45
+ * import { ChatVertexAI } from '@langchain/google-vertexai-web';
46
+ *
47
+ * const llm = new ChatVertexAI({
48
+ * model: "gemini-1.5-pro",
49
+ * temperature: 0,
50
+ * authOptions: {
51
+ * credentials: process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS,
52
+ * },
53
+ * // other params...
54
+ * });
55
+ * ```
56
+ * </details>
57
+ *
58
+ * <br />
59
+ *
60
+ * <details>
61
+ * <summary><strong>Invoking</strong></summary>
62
+ *
63
+ * ```typescript
64
+ * const input = `Translate "I love programming" into French.`;
65
+ *
66
+ * // Models also accept a list of chat messages or a formatted prompt
67
+ * const result = await llm.invoke(input);
68
+ * console.log(result);
69
+ * ```
70
+ *
71
+ * ```txt
72
+ * AIMessageChunk {
73
+ * "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
74
+ * "additional_kwargs": {},
75
+ * "response_metadata": {},
76
+ * "tool_calls": [],
77
+ * "tool_call_chunks": [],
78
+ * "invalid_tool_calls": [],
79
+ * "usage_metadata": {
80
+ * "input_tokens": 9,
81
+ * "output_tokens": 63,
82
+ * "total_tokens": 72
83
+ * }
84
+ * }
85
+ * ```
86
+ * </details>
87
+ *
88
+ * <br />
89
+ *
90
+ * <details>
91
+ * <summary><strong>Streaming Chunks</strong></summary>
92
+ *
93
+ * ```typescript
94
+ * for await (const chunk of await llm.stream(input)) {
95
+ * console.log(chunk);
96
+ * }
97
+ * ```
98
+ *
99
+ * ```txt
100
+ * AIMessageChunk {
101
+ * "content": "\"",
102
+ * "additional_kwargs": {},
103
+ * "response_metadata": {},
104
+ * "tool_calls": [],
105
+ * "tool_call_chunks": [],
106
+ * "invalid_tool_calls": []
107
+ * }
108
+ * AIMessageChunk {
109
+ * "content": "J'adore programmer\" \n",
110
+ * "additional_kwargs": {},
111
+ * "response_metadata": {},
112
+ * "tool_calls": [],
113
+ * "tool_call_chunks": [],
114
+ * "invalid_tool_calls": []
115
+ * }
116
+ * AIMessageChunk {
117
+ * "content": "",
118
+ * "additional_kwargs": {},
119
+ * "response_metadata": {},
120
+ * "tool_calls": [],
121
+ * "tool_call_chunks": [],
122
+ * "invalid_tool_calls": []
123
+ * }
124
+ * AIMessageChunk {
125
+ * "content": "",
126
+ * "additional_kwargs": {},
127
+ * "response_metadata": {
128
+ * "finishReason": "stop"
129
+ * },
130
+ * "tool_calls": [],
131
+ * "tool_call_chunks": [],
132
+ * "invalid_tool_calls": [],
133
+ * "usage_metadata": {
134
+ * "input_tokens": 9,
135
+ * "output_tokens": 8,
136
+ * "total_tokens": 17
137
+ * }
138
+ * }
139
+ * ```
140
+ * </details>
141
+ *
142
+ * <br />
143
+ *
144
+ * <details>
145
+ * <summary><strong>Aggregate Streamed Chunks</strong></summary>
146
+ *
147
+ * ```typescript
148
+ * import { AIMessageChunk } from '@langchain/core/messages';
149
+ * import { concat } from '@langchain/core/utils/stream';
150
+ *
151
+ * const stream = await llm.stream(input);
152
+ * let full: AIMessageChunk | undefined;
153
+ * for await (const chunk of stream) {
154
+ * full = !full ? chunk : concat(full, chunk);
155
+ * }
156
+ * console.log(full);
157
+ * ```
158
+ *
159
+ * ```txt
160
+ * AIMessageChunk {
161
+ * "content": "\"J'adore programmer\" \n",
162
+ * "additional_kwargs": {},
163
+ * "response_metadata": {
164
+ * "finishReason": "stop"
165
+ * },
166
+ * "tool_calls": [],
167
+ * "tool_call_chunks": [],
168
+ * "invalid_tool_calls": [],
169
+ * "usage_metadata": {
170
+ * "input_tokens": 9,
171
+ * "output_tokens": 8,
172
+ * "total_tokens": 17
173
+ * }
174
+ * }
175
+ * ```
176
+ * </details>
177
+ *
178
+ * <br />
179
+ *
180
+ * <details>
181
+ * <summary><strong>Bind tools</strong></summary>
182
+ *
183
+ * ```typescript
184
+ * import { z } from 'zod';
185
+ *
186
+ * const GetWeather = {
187
+ * name: "GetWeather",
188
+ * description: "Get the current weather in a given location",
189
+ * schema: z.object({
190
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
191
+ * }),
192
+ * }
193
+ *
194
+ * const GetPopulation = {
195
+ * name: "GetPopulation",
196
+ * description: "Get the current population in a given location",
197
+ * schema: z.object({
198
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
199
+ * }),
200
+ * }
201
+ *
202
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
203
+ * const aiMsg = await llmWithTools.invoke(
204
+ * "Which city is hotter today and which is bigger: LA or NY?"
205
+ * );
206
+ * console.log(aiMsg.tool_calls);
207
+ * ```
208
+ *
209
+ * ```txt
210
+ * [
211
+ * {
212
+ * name: 'GetPopulation',
213
+ * args: { location: 'New York City, NY' },
214
+ * id: '33c1c1f47e2f492799c77d2800a43912',
215
+ * type: 'tool_call'
216
+ * }
217
+ * ]
218
+ * ```
219
+ * </details>
220
+ *
221
+ * <br />
222
+ *
223
+ * <details>
224
+ * <summary><strong>Structured Output</strong></summary>
225
+ *
226
+ * ```typescript
227
+ * import { z } from 'zod';
228
+ *
229
+ * const Joke = z.object({
230
+ * setup: z.string().describe("The setup of the joke"),
231
+ * punchline: z.string().describe("The punchline to the joke"),
232
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
233
+ * }).describe('Joke to tell user.');
234
+ *
235
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
236
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
237
+ * console.log(jokeResult);
238
+ * ```
239
+ *
240
+ * ```txt
241
+ * {
242
+ * setup: 'What do you call a cat that loves to bowl?',
243
+ * punchline: 'An alley cat!'
244
+ * }
245
+ * ```
246
+ * </details>
247
+ *
248
+ * <br />
249
+ *
250
+ * <details>
251
+ * <summary><strong>Usage Metadata</strong></summary>
252
+ *
253
+ * ```typescript
254
+ * const aiMsgForMetadata = await llm.invoke(input);
255
+ * console.log(aiMsgForMetadata.usage_metadata);
256
+ * ```
257
+ *
258
+ * ```txt
259
+ * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
260
+ * ```
261
+ * </details>
262
+ *
263
+ * <br />
264
+ *
265
+ * <details>
266
+ * <summary><strong>Stream Usage Metadata</strong></summary>
267
+ *
268
+ * ```typescript
269
+ * const streamForMetadata = await llm.stream(
270
+ * input,
271
+ * {
272
+ * streamUsage: true
273
+ * }
274
+ * );
275
+ * let fullForMetadata: AIMessageChunk | undefined;
276
+ * for await (const chunk of streamForMetadata) {
277
+ * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
278
+ * }
279
+ * console.log(fullForMetadata?.usage_metadata);
280
+ * ```
281
+ *
282
+ * ```txt
283
+ * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
284
+ * ```
285
+ * </details>
286
+ *
287
+ * <br />
288
+ */
289
+ var ChatVertexAI = class extends ChatGoogle {
290
+ lc_namespace = [
291
+ "langchain",
292
+ "chat_models",
293
+ "vertexai"
294
+ ];
295
+ static lc_name() {
296
+ return "ChatVertexAI";
297
+ }
298
+ constructor(fields) {
299
+ super({
300
+ ...fields,
301
+ platformType: "gcp"
302
+ });
303
+ }
304
+ };
305
+
306
+ //#endregion
307
+ export { ChatVertexAI };
308
+ //# sourceMappingURL=chat_models.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"chat_models.js","names":["fields?: ChatVertexAIInput"],"sources":["../src/chat_models.ts"],"sourcesContent":["import { type ChatGoogleInput, ChatGoogle } from \"@langchain/google-webauth\";\n\n/**\n * Input to a Google Vertex AI chat model class.\n */\nexport interface ChatVertexAIInput extends ChatGoogleInput {}\n\n/**\n * Integration with Google Vertex AI chat models in web environments.\n *\n * Setup:\n * Install `@langchain/google-vertexai-web` and set your stringified\n * Vertex AI credentials as an environment variable named `GOOGLE_VERTEX_AI_WEB_CREDENTIALS`.\n *\n * ```bash\n * npm install @langchain/google-vertexai-web\n * export GOOGLE_VERTEX_AI_WEB_CREDENTIALS={\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...}\n * ```\n *\n * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai_web.index.ChatVertexAI.html#constructor)\n *\n * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)\n *\n * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.\n * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:\n *\n * ```typescript\n * // If binding tools along with other options, chain `.bindTools` and `.withConfig`\n * const llmWithArgsBound = llm.bindTools([...]) // tools array\n * .withConfig({\n * stop: [\"\\n\"], // other call options\n * });\n *\n * // When calling `.bindTools`, call options should be passed via the second argument\n * const llmWithTools = llm.bindTools(\n * [...],\n * {\n * tool_choice: \"auto\",\n * }\n * );\n * ```\n *\n * ## Examples\n *\n * <details open>\n * <summary><strong>Instantiate</strong></summary>\n *\n * ```typescript\n * import { ChatVertexAI } from '@langchain/google-vertexai-web';\n *\n * const llm = new ChatVertexAI({\n * model: \"gemini-1.5-pro\",\n * temperature: 0,\n * authOptions: {\n * credentials: process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS,\n * },\n * // other params...\n * });\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Invoking</strong></summary>\n *\n * ```typescript\n * const input = `Translate \"I love programming\" into French.`;\n *\n * // Models also accept a list of chat messages or a formatted prompt\n * const result = await llm.invoke(input);\n * console.log(result);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"J'adore programmer\\\" \\n\\nHere's why this is the best translation:\\n\\n* **J'adore** means \\\"I love\\\" and conveys a strong passion.\\n* **Programmer** is the French verb for \\\"to program.\\\"\\n\\nThis translation is natural and idiomatic in French. \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 63,\n * \"total_tokens\": 72\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Streaming Chunks</strong></summary>\n *\n * ```typescript\n * for await (const chunk of await llm.stream(input)) {\n * console.log(chunk);\n * }\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"J'adore programmer\\\" \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {},\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": []\n * }\n * AIMessageChunk {\n * \"content\": \"\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 8,\n * \"total_tokens\": 17\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Aggregate Streamed Chunks</strong></summary>\n *\n * ```typescript\n * import { AIMessageChunk } from '@langchain/core/messages';\n * import { concat } from '@langchain/core/utils/stream';\n *\n * const stream = await llm.stream(input);\n * let full: AIMessageChunk | undefined;\n * for await (const chunk of stream) {\n * full = !full ? chunk : concat(full, chunk);\n * }\n * console.log(full);\n * ```\n *\n * ```txt\n * AIMessageChunk {\n * \"content\": \"\\\"J'adore programmer\\\" \\n\",\n * \"additional_kwargs\": {},\n * \"response_metadata\": {\n * \"finishReason\": \"stop\"\n * },\n * \"tool_calls\": [],\n * \"tool_call_chunks\": [],\n * \"invalid_tool_calls\": [],\n * \"usage_metadata\": {\n * \"input_tokens\": 9,\n * \"output_tokens\": 8,\n * \"total_tokens\": 17\n * }\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Bind tools</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const GetWeather = {\n * name: \"GetWeather\",\n * description: \"Get the current weather in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const GetPopulation = {\n * name: \"GetPopulation\",\n * description: \"Get the current population in a given location\",\n * schema: z.object({\n * location: z.string().describe(\"The city and state, e.g. San Francisco, CA\")\n * }),\n * }\n *\n * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);\n * const aiMsg = await llmWithTools.invoke(\n * \"Which city is hotter today and which is bigger: LA or NY?\"\n * );\n * console.log(aiMsg.tool_calls);\n * ```\n *\n * ```txt\n * [\n * {\n * name: 'GetPopulation',\n * args: { location: 'New York City, NY' },\n * id: '33c1c1f47e2f492799c77d2800a43912',\n * type: 'tool_call'\n * }\n * ]\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Structured Output</strong></summary>\n *\n * ```typescript\n * import { z } from 'zod';\n *\n * const Joke = z.object({\n * setup: z.string().describe(\"The setup of the joke\"),\n * punchline: z.string().describe(\"The punchline to the joke\"),\n * rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\")\n * }).describe('Joke to tell user.');\n *\n * const structuredLlm = llm.withStructuredOutput(Joke, { name: \"Joke\" });\n * const jokeResult = await structuredLlm.invoke(\"Tell me a joke about cats\");\n * console.log(jokeResult);\n * ```\n *\n * ```txt\n * {\n * setup: 'What do you call a cat that loves to bowl?',\n * punchline: 'An alley cat!'\n * }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Usage Metadata</strong></summary>\n *\n * ```typescript\n * const aiMsgForMetadata = await llm.invoke(input);\n * console.log(aiMsgForMetadata.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }\n * ```\n * </details>\n *\n * <br />\n *\n * <details>\n * <summary><strong>Stream Usage Metadata</strong></summary>\n *\n * ```typescript\n * const streamForMetadata = await llm.stream(\n * input,\n * {\n * streamUsage: true\n * }\n * );\n * let fullForMetadata: AIMessageChunk | undefined;\n * for await (const chunk of streamForMetadata) {\n * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);\n * }\n * console.log(fullForMetadata?.usage_metadata);\n * ```\n *\n * ```txt\n * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }\n * ```\n * </details>\n *\n * <br />\n */\nexport class ChatVertexAI extends ChatGoogle {\n lc_namespace = [\"langchain\", \"chat_models\", \"vertexai\"];\n\n static lc_name() {\n return \"ChatVertexAI\";\n }\n\n constructor(fields?: ChatVertexAIInput) {\n super({\n ...fields,\n platformType: \"gcp\",\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAoSA,IAAa,eAAb,cAAkC,WAAW;CAC3C,eAAe;EAAC;EAAa;EAAe;CAAW;CAEvD,OAAO,UAAU;AACf,SAAO;CACR;CAED,YAAYA,QAA4B;EACtC,MAAM;GACJ,GAAG;GACH,cAAc;EACf,EAAC;CACH;AACF"}