@langchain/google-vertexai-web 0.0.22 → 0.0.26
Sign up to get free protection for your applications and to get access to all the features.
- package/dist/chat_models.cjs +283 -1
- package/dist/chat_models.d.ts +284 -2
- package/dist/chat_models.js +283 -1
- package/dist/embeddings.cjs +20 -0
- package/dist/embeddings.d.ts +14 -0
- package/dist/embeddings.js +16 -0
- package/dist/index.cjs +1 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/llms.cjs +2 -1
- package/dist/llms.d.ts +3 -2
- package/dist/llms.js +2 -1
- package/package.json +2 -2
package/dist/chat_models.cjs
CHANGED
@@ -3,7 +3,289 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.ChatVertexAI = void 0;
|
4
4
|
const google_webauth_1 = require("@langchain/google-webauth");
|
5
5
|
/**
|
6
|
-
* Integration with
|
6
|
+
* Integration with Google Vertex AI chat models in web environments.
|
7
|
+
*
|
8
|
+
* Setup:
|
9
|
+
* Install `@langchain/google-vertexai-web` and set your stringified
|
10
|
+
* Vertex AI credentials as an environment variable named `GOOGLE_VERTEX_AI_WEB_CREDENTIALS`.
|
11
|
+
*
|
12
|
+
* ```bash
|
13
|
+
* npm install @langchain/google-vertexai-web
|
14
|
+
* export GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
|
15
|
+
* ```
|
16
|
+
*
|
17
|
+
* ## [Constructor args](https://api.js.langchain.com/classes/langchain_community_chat_models_googlevertexai_web.ChatGoogleVertexAI.html#constructor)
|
18
|
+
*
|
19
|
+
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
|
20
|
+
*
|
21
|
+
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
|
22
|
+
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
|
23
|
+
*
|
24
|
+
* ```typescript
|
25
|
+
* // When calling `.bind`, call options should be passed via the first argument
|
26
|
+
* const llmWithArgsBound = llm.bind({
|
27
|
+
* stop: ["\n"],
|
28
|
+
* tools: [...],
|
29
|
+
* });
|
30
|
+
*
|
31
|
+
* // When calling `.bindTools`, call options should be passed via the second argument
|
32
|
+
* const llmWithTools = llm.bindTools(
|
33
|
+
* [...],
|
34
|
+
* {
|
35
|
+
* tool_choice: "auto",
|
36
|
+
* }
|
37
|
+
* );
|
38
|
+
* ```
|
39
|
+
*
|
40
|
+
* ## Examples
|
41
|
+
*
|
42
|
+
* <details open>
|
43
|
+
* <summary><strong>Instantiate</strong></summary>
|
44
|
+
*
|
45
|
+
* ```typescript
|
46
|
+
* import { ChatVertexAI } from '@langchain/google-vertexai-web';
|
47
|
+
*
|
48
|
+
* const llm = new ChatVertexAI({
|
49
|
+
* model: "gemini-1.5-pro",
|
50
|
+
* temperature: 0,
|
51
|
+
* authOptions: {
|
52
|
+
* credentials: process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS,
|
53
|
+
* },
|
54
|
+
* // other params...
|
55
|
+
* });
|
56
|
+
* ```
|
57
|
+
* </details>
|
58
|
+
*
|
59
|
+
* <br />
|
60
|
+
*
|
61
|
+
* <details>
|
62
|
+
* <summary><strong>Invoking</strong></summary>
|
63
|
+
*
|
64
|
+
* ```typescript
|
65
|
+
* const input = `Translate "I love programming" into French.`;
|
66
|
+
*
|
67
|
+
* // Models also accept a list of chat messages or a formatted prompt
|
68
|
+
* const result = await llm.invoke(input);
|
69
|
+
* console.log(result);
|
70
|
+
* ```
|
71
|
+
*
|
72
|
+
* ```txt
|
73
|
+
* AIMessageChunk {
|
74
|
+
* "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
|
75
|
+
* "additional_kwargs": {},
|
76
|
+
* "response_metadata": {},
|
77
|
+
* "tool_calls": [],
|
78
|
+
* "tool_call_chunks": [],
|
79
|
+
* "invalid_tool_calls": [],
|
80
|
+
* "usage_metadata": {
|
81
|
+
* "input_tokens": 9,
|
82
|
+
* "output_tokens": 63,
|
83
|
+
* "total_tokens": 72
|
84
|
+
* }
|
85
|
+
* }
|
86
|
+
* ```
|
87
|
+
* </details>
|
88
|
+
*
|
89
|
+
* <br />
|
90
|
+
*
|
91
|
+
* <details>
|
92
|
+
* <summary><strong>Streaming Chunks</strong></summary>
|
93
|
+
*
|
94
|
+
* ```typescript
|
95
|
+
* for await (const chunk of await llm.stream(input)) {
|
96
|
+
* console.log(chunk);
|
97
|
+
* }
|
98
|
+
* ```
|
99
|
+
*
|
100
|
+
* ```txt
|
101
|
+
* AIMessageChunk {
|
102
|
+
* "content": "\"",
|
103
|
+
* "additional_kwargs": {},
|
104
|
+
* "response_metadata": {},
|
105
|
+
* "tool_calls": [],
|
106
|
+
* "tool_call_chunks": [],
|
107
|
+
* "invalid_tool_calls": []
|
108
|
+
* }
|
109
|
+
* AIMessageChunk {
|
110
|
+
* "content": "J'adore programmer\" \n",
|
111
|
+
* "additional_kwargs": {},
|
112
|
+
* "response_metadata": {},
|
113
|
+
* "tool_calls": [],
|
114
|
+
* "tool_call_chunks": [],
|
115
|
+
* "invalid_tool_calls": []
|
116
|
+
* }
|
117
|
+
* AIMessageChunk {
|
118
|
+
* "content": "",
|
119
|
+
* "additional_kwargs": {},
|
120
|
+
* "response_metadata": {},
|
121
|
+
* "tool_calls": [],
|
122
|
+
* "tool_call_chunks": [],
|
123
|
+
* "invalid_tool_calls": []
|
124
|
+
* }
|
125
|
+
* AIMessageChunk {
|
126
|
+
* "content": "",
|
127
|
+
* "additional_kwargs": {},
|
128
|
+
* "response_metadata": {
|
129
|
+
* "finishReason": "stop"
|
130
|
+
* },
|
131
|
+
* "tool_calls": [],
|
132
|
+
* "tool_call_chunks": [],
|
133
|
+
* "invalid_tool_calls": [],
|
134
|
+
* "usage_metadata": {
|
135
|
+
* "input_tokens": 9,
|
136
|
+
* "output_tokens": 8,
|
137
|
+
* "total_tokens": 17
|
138
|
+
* }
|
139
|
+
* }
|
140
|
+
* ```
|
141
|
+
* </details>
|
142
|
+
*
|
143
|
+
* <br />
|
144
|
+
*
|
145
|
+
* <details>
|
146
|
+
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
|
147
|
+
*
|
148
|
+
* ```typescript
|
149
|
+
* import { AIMessageChunk } from '@langchain/core/messages';
|
150
|
+
* import { concat } from '@langchain/core/utils/stream';
|
151
|
+
*
|
152
|
+
* const stream = await llm.stream(input);
|
153
|
+
* let full: AIMessageChunk | undefined;
|
154
|
+
* for await (const chunk of stream) {
|
155
|
+
* full = !full ? chunk : concat(full, chunk);
|
156
|
+
* }
|
157
|
+
* console.log(full);
|
158
|
+
* ```
|
159
|
+
*
|
160
|
+
* ```txt
|
161
|
+
* AIMessageChunk {
|
162
|
+
* "content": "\"J'adore programmer\" \n",
|
163
|
+
* "additional_kwargs": {},
|
164
|
+
* "response_metadata": {
|
165
|
+
* "finishReason": "stop"
|
166
|
+
* },
|
167
|
+
* "tool_calls": [],
|
168
|
+
* "tool_call_chunks": [],
|
169
|
+
* "invalid_tool_calls": [],
|
170
|
+
* "usage_metadata": {
|
171
|
+
* "input_tokens": 9,
|
172
|
+
* "output_tokens": 8,
|
173
|
+
* "total_tokens": 17
|
174
|
+
* }
|
175
|
+
* }
|
176
|
+
* ```
|
177
|
+
* </details>
|
178
|
+
*
|
179
|
+
* <br />
|
180
|
+
*
|
181
|
+
* <details>
|
182
|
+
* <summary><strong>Bind tools</strong></summary>
|
183
|
+
*
|
184
|
+
* ```typescript
|
185
|
+
* import { z } from 'zod';
|
186
|
+
*
|
187
|
+
* const GetWeather = {
|
188
|
+
* name: "GetWeather",
|
189
|
+
* description: "Get the current weather in a given location",
|
190
|
+
* schema: z.object({
|
191
|
+
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
|
192
|
+
* }),
|
193
|
+
* }
|
194
|
+
*
|
195
|
+
* const GetPopulation = {
|
196
|
+
* name: "GetPopulation",
|
197
|
+
* description: "Get the current population in a given location",
|
198
|
+
* schema: z.object({
|
199
|
+
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
|
200
|
+
* }),
|
201
|
+
* }
|
202
|
+
*
|
203
|
+
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
|
204
|
+
* const aiMsg = await llmWithTools.invoke(
|
205
|
+
* "Which city is hotter today and which is bigger: LA or NY?"
|
206
|
+
* );
|
207
|
+
* console.log(aiMsg.tool_calls);
|
208
|
+
* ```
|
209
|
+
*
|
210
|
+
* ```txt
|
211
|
+
* [
|
212
|
+
* {
|
213
|
+
* name: 'GetPopulation',
|
214
|
+
* args: { location: 'New York City, NY' },
|
215
|
+
* id: '33c1c1f47e2f492799c77d2800a43912',
|
216
|
+
* type: 'tool_call'
|
217
|
+
* }
|
218
|
+
* ]
|
219
|
+
* ```
|
220
|
+
* </details>
|
221
|
+
*
|
222
|
+
* <br />
|
223
|
+
*
|
224
|
+
* <details>
|
225
|
+
* <summary><strong>Structured Output</strong></summary>
|
226
|
+
*
|
227
|
+
* ```typescript
|
228
|
+
* import { z } from 'zod';
|
229
|
+
*
|
230
|
+
* const Joke = z.object({
|
231
|
+
* setup: z.string().describe("The setup of the joke"),
|
232
|
+
* punchline: z.string().describe("The punchline to the joke"),
|
233
|
+
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
|
234
|
+
* }).describe('Joke to tell user.');
|
235
|
+
*
|
236
|
+
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
|
237
|
+
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
|
238
|
+
* console.log(jokeResult);
|
239
|
+
* ```
|
240
|
+
*
|
241
|
+
* ```txt
|
242
|
+
* {
|
243
|
+
* setup: 'What do you call a cat that loves to bowl?',
|
244
|
+
* punchline: 'An alley cat!'
|
245
|
+
* }
|
246
|
+
* ```
|
247
|
+
* </details>
|
248
|
+
*
|
249
|
+
* <br />
|
250
|
+
*
|
251
|
+
* <details>
|
252
|
+
* <summary><strong>Usage Metadata</strong></summary>
|
253
|
+
*
|
254
|
+
* ```typescript
|
255
|
+
* const aiMsgForMetadata = await llm.invoke(input);
|
256
|
+
* console.log(aiMsgForMetadata.usage_metadata);
|
257
|
+
* ```
|
258
|
+
*
|
259
|
+
* ```txt
|
260
|
+
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
|
261
|
+
* ```
|
262
|
+
* </details>
|
263
|
+
*
|
264
|
+
* <br />
|
265
|
+
*
|
266
|
+
* <details>
|
267
|
+
* <summary><strong>Stream Usage Metadata</strong></summary>
|
268
|
+
*
|
269
|
+
* ```typescript
|
270
|
+
* const streamForMetadata = await llm.stream(
|
271
|
+
* input,
|
272
|
+
* {
|
273
|
+
* streamUsage: true
|
274
|
+
* }
|
275
|
+
* );
|
276
|
+
* let fullForMetadata: AIMessageChunk | undefined;
|
277
|
+
* for await (const chunk of streamForMetadata) {
|
278
|
+
* fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
|
279
|
+
* }
|
280
|
+
* console.log(fullForMetadata?.usage_metadata);
|
281
|
+
* ```
|
282
|
+
*
|
283
|
+
* ```txt
|
284
|
+
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
|
285
|
+
* ```
|
286
|
+
* </details>
|
287
|
+
*
|
288
|
+
* <br />
|
7
289
|
*/
|
8
290
|
class ChatVertexAI extends google_webauth_1.ChatGoogle {
|
9
291
|
static lc_name() {
|
package/dist/chat_models.d.ts
CHANGED
@@ -1,11 +1,293 @@
|
|
1
1
|
import { type ChatGoogleInput, ChatGoogle } from "@langchain/google-webauth";
|
2
2
|
/**
|
3
|
-
* Input to chat model class.
|
3
|
+
* Input to a Google Vertex AI chat model class.
|
4
4
|
*/
|
5
5
|
export interface ChatVertexAIInput extends ChatGoogleInput {
|
6
6
|
}
|
7
7
|
/**
|
8
|
-
* Integration with
|
8
|
+
* Integration with Google Vertex AI chat models in web environments.
|
9
|
+
*
|
10
|
+
* Setup:
|
11
|
+
* Install `@langchain/google-vertexai-web` and set your stringified
|
12
|
+
* Vertex AI credentials as an environment variable named `GOOGLE_VERTEX_AI_WEB_CREDENTIALS`.
|
13
|
+
*
|
14
|
+
* ```bash
|
15
|
+
* npm install @langchain/google-vertexai-web
|
16
|
+
* export GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
|
17
|
+
* ```
|
18
|
+
*
|
19
|
+
* ## [Constructor args](https://api.js.langchain.com/classes/langchain_community_chat_models_googlevertexai_web.ChatGoogleVertexAI.html#constructor)
|
20
|
+
*
|
21
|
+
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
|
22
|
+
*
|
23
|
+
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
|
24
|
+
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
|
25
|
+
*
|
26
|
+
* ```typescript
|
27
|
+
* // When calling `.bind`, call options should be passed via the first argument
|
28
|
+
* const llmWithArgsBound = llm.bind({
|
29
|
+
* stop: ["\n"],
|
30
|
+
* tools: [...],
|
31
|
+
* });
|
32
|
+
*
|
33
|
+
* // When calling `.bindTools`, call options should be passed via the second argument
|
34
|
+
* const llmWithTools = llm.bindTools(
|
35
|
+
* [...],
|
36
|
+
* {
|
37
|
+
* tool_choice: "auto",
|
38
|
+
* }
|
39
|
+
* );
|
40
|
+
* ```
|
41
|
+
*
|
42
|
+
* ## Examples
|
43
|
+
*
|
44
|
+
* <details open>
|
45
|
+
* <summary><strong>Instantiate</strong></summary>
|
46
|
+
*
|
47
|
+
* ```typescript
|
48
|
+
* import { ChatVertexAI } from '@langchain/google-vertexai-web';
|
49
|
+
*
|
50
|
+
* const llm = new ChatVertexAI({
|
51
|
+
* model: "gemini-1.5-pro",
|
52
|
+
* temperature: 0,
|
53
|
+
* authOptions: {
|
54
|
+
* credentials: process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS,
|
55
|
+
* },
|
56
|
+
* // other params...
|
57
|
+
* });
|
58
|
+
* ```
|
59
|
+
* </details>
|
60
|
+
*
|
61
|
+
* <br />
|
62
|
+
*
|
63
|
+
* <details>
|
64
|
+
* <summary><strong>Invoking</strong></summary>
|
65
|
+
*
|
66
|
+
* ```typescript
|
67
|
+
* const input = `Translate "I love programming" into French.`;
|
68
|
+
*
|
69
|
+
* // Models also accept a list of chat messages or a formatted prompt
|
70
|
+
* const result = await llm.invoke(input);
|
71
|
+
* console.log(result);
|
72
|
+
* ```
|
73
|
+
*
|
74
|
+
* ```txt
|
75
|
+
* AIMessageChunk {
|
76
|
+
* "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
|
77
|
+
* "additional_kwargs": {},
|
78
|
+
* "response_metadata": {},
|
79
|
+
* "tool_calls": [],
|
80
|
+
* "tool_call_chunks": [],
|
81
|
+
* "invalid_tool_calls": [],
|
82
|
+
* "usage_metadata": {
|
83
|
+
* "input_tokens": 9,
|
84
|
+
* "output_tokens": 63,
|
85
|
+
* "total_tokens": 72
|
86
|
+
* }
|
87
|
+
* }
|
88
|
+
* ```
|
89
|
+
* </details>
|
90
|
+
*
|
91
|
+
* <br />
|
92
|
+
*
|
93
|
+
* <details>
|
94
|
+
* <summary><strong>Streaming Chunks</strong></summary>
|
95
|
+
*
|
96
|
+
* ```typescript
|
97
|
+
* for await (const chunk of await llm.stream(input)) {
|
98
|
+
* console.log(chunk);
|
99
|
+
* }
|
100
|
+
* ```
|
101
|
+
*
|
102
|
+
* ```txt
|
103
|
+
* AIMessageChunk {
|
104
|
+
* "content": "\"",
|
105
|
+
* "additional_kwargs": {},
|
106
|
+
* "response_metadata": {},
|
107
|
+
* "tool_calls": [],
|
108
|
+
* "tool_call_chunks": [],
|
109
|
+
* "invalid_tool_calls": []
|
110
|
+
* }
|
111
|
+
* AIMessageChunk {
|
112
|
+
* "content": "J'adore programmer\" \n",
|
113
|
+
* "additional_kwargs": {},
|
114
|
+
* "response_metadata": {},
|
115
|
+
* "tool_calls": [],
|
116
|
+
* "tool_call_chunks": [],
|
117
|
+
* "invalid_tool_calls": []
|
118
|
+
* }
|
119
|
+
* AIMessageChunk {
|
120
|
+
* "content": "",
|
121
|
+
* "additional_kwargs": {},
|
122
|
+
* "response_metadata": {},
|
123
|
+
* "tool_calls": [],
|
124
|
+
* "tool_call_chunks": [],
|
125
|
+
* "invalid_tool_calls": []
|
126
|
+
* }
|
127
|
+
* AIMessageChunk {
|
128
|
+
* "content": "",
|
129
|
+
* "additional_kwargs": {},
|
130
|
+
* "response_metadata": {
|
131
|
+
* "finishReason": "stop"
|
132
|
+
* },
|
133
|
+
* "tool_calls": [],
|
134
|
+
* "tool_call_chunks": [],
|
135
|
+
* "invalid_tool_calls": [],
|
136
|
+
* "usage_metadata": {
|
137
|
+
* "input_tokens": 9,
|
138
|
+
* "output_tokens": 8,
|
139
|
+
* "total_tokens": 17
|
140
|
+
* }
|
141
|
+
* }
|
142
|
+
* ```
|
143
|
+
* </details>
|
144
|
+
*
|
145
|
+
* <br />
|
146
|
+
*
|
147
|
+
* <details>
|
148
|
+
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
|
149
|
+
*
|
150
|
+
* ```typescript
|
151
|
+
* import { AIMessageChunk } from '@langchain/core/messages';
|
152
|
+
* import { concat } from '@langchain/core/utils/stream';
|
153
|
+
*
|
154
|
+
* const stream = await llm.stream(input);
|
155
|
+
* let full: AIMessageChunk | undefined;
|
156
|
+
* for await (const chunk of stream) {
|
157
|
+
* full = !full ? chunk : concat(full, chunk);
|
158
|
+
* }
|
159
|
+
* console.log(full);
|
160
|
+
* ```
|
161
|
+
*
|
162
|
+
* ```txt
|
163
|
+
* AIMessageChunk {
|
164
|
+
* "content": "\"J'adore programmer\" \n",
|
165
|
+
* "additional_kwargs": {},
|
166
|
+
* "response_metadata": {
|
167
|
+
* "finishReason": "stop"
|
168
|
+
* },
|
169
|
+
* "tool_calls": [],
|
170
|
+
* "tool_call_chunks": [],
|
171
|
+
* "invalid_tool_calls": [],
|
172
|
+
* "usage_metadata": {
|
173
|
+
* "input_tokens": 9,
|
174
|
+
* "output_tokens": 8,
|
175
|
+
* "total_tokens": 17
|
176
|
+
* }
|
177
|
+
* }
|
178
|
+
* ```
|
179
|
+
* </details>
|
180
|
+
*
|
181
|
+
* <br />
|
182
|
+
*
|
183
|
+
* <details>
|
184
|
+
* <summary><strong>Bind tools</strong></summary>
|
185
|
+
*
|
186
|
+
* ```typescript
|
187
|
+
* import { z } from 'zod';
|
188
|
+
*
|
189
|
+
* const GetWeather = {
|
190
|
+
* name: "GetWeather",
|
191
|
+
* description: "Get the current weather in a given location",
|
192
|
+
* schema: z.object({
|
193
|
+
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
|
194
|
+
* }),
|
195
|
+
* }
|
196
|
+
*
|
197
|
+
* const GetPopulation = {
|
198
|
+
* name: "GetPopulation",
|
199
|
+
* description: "Get the current population in a given location",
|
200
|
+
* schema: z.object({
|
201
|
+
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
|
202
|
+
* }),
|
203
|
+
* }
|
204
|
+
*
|
205
|
+
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
|
206
|
+
* const aiMsg = await llmWithTools.invoke(
|
207
|
+
* "Which city is hotter today and which is bigger: LA or NY?"
|
208
|
+
* );
|
209
|
+
* console.log(aiMsg.tool_calls);
|
210
|
+
* ```
|
211
|
+
*
|
212
|
+
* ```txt
|
213
|
+
* [
|
214
|
+
* {
|
215
|
+
* name: 'GetPopulation',
|
216
|
+
* args: { location: 'New York City, NY' },
|
217
|
+
* id: '33c1c1f47e2f492799c77d2800a43912',
|
218
|
+
* type: 'tool_call'
|
219
|
+
* }
|
220
|
+
* ]
|
221
|
+
* ```
|
222
|
+
* </details>
|
223
|
+
*
|
224
|
+
* <br />
|
225
|
+
*
|
226
|
+
* <details>
|
227
|
+
* <summary><strong>Structured Output</strong></summary>
|
228
|
+
*
|
229
|
+
* ```typescript
|
230
|
+
* import { z } from 'zod';
|
231
|
+
*
|
232
|
+
* const Joke = z.object({
|
233
|
+
* setup: z.string().describe("The setup of the joke"),
|
234
|
+
* punchline: z.string().describe("The punchline to the joke"),
|
235
|
+
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
|
236
|
+
* }).describe('Joke to tell user.');
|
237
|
+
*
|
238
|
+
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
|
239
|
+
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
|
240
|
+
* console.log(jokeResult);
|
241
|
+
* ```
|
242
|
+
*
|
243
|
+
* ```txt
|
244
|
+
* {
|
245
|
+
* setup: 'What do you call a cat that loves to bowl?',
|
246
|
+
* punchline: 'An alley cat!'
|
247
|
+
* }
|
248
|
+
* ```
|
249
|
+
* </details>
|
250
|
+
*
|
251
|
+
* <br />
|
252
|
+
*
|
253
|
+
* <details>
|
254
|
+
* <summary><strong>Usage Metadata</strong></summary>
|
255
|
+
*
|
256
|
+
* ```typescript
|
257
|
+
* const aiMsgForMetadata = await llm.invoke(input);
|
258
|
+
* console.log(aiMsgForMetadata.usage_metadata);
|
259
|
+
* ```
|
260
|
+
*
|
261
|
+
* ```txt
|
262
|
+
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
|
263
|
+
* ```
|
264
|
+
* </details>
|
265
|
+
*
|
266
|
+
* <br />
|
267
|
+
*
|
268
|
+
* <details>
|
269
|
+
* <summary><strong>Stream Usage Metadata</strong></summary>
|
270
|
+
*
|
271
|
+
* ```typescript
|
272
|
+
* const streamForMetadata = await llm.stream(
|
273
|
+
* input,
|
274
|
+
* {
|
275
|
+
* streamUsage: true
|
276
|
+
* }
|
277
|
+
* );
|
278
|
+
* let fullForMetadata: AIMessageChunk | undefined;
|
279
|
+
* for await (const chunk of streamForMetadata) {
|
280
|
+
* fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
|
281
|
+
* }
|
282
|
+
* console.log(fullForMetadata?.usage_metadata);
|
283
|
+
* ```
|
284
|
+
*
|
285
|
+
* ```txt
|
286
|
+
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
|
287
|
+
* ```
|
288
|
+
* </details>
|
289
|
+
*
|
290
|
+
* <br />
|
9
291
|
*/
|
10
292
|
export declare class ChatVertexAI extends ChatGoogle {
|
11
293
|
static lc_name(): string;
|
package/dist/chat_models.js
CHANGED
@@ -1,6 +1,288 @@
|
|
1
1
|
import { ChatGoogle } from "@langchain/google-webauth";
|
2
2
|
/**
|
3
|
-
* Integration with
|
3
|
+
* Integration with Google Vertex AI chat models in web environments.
|
4
|
+
*
|
5
|
+
* Setup:
|
6
|
+
* Install `@langchain/google-vertexai-web` and set your stringified
|
7
|
+
* Vertex AI credentials as an environment variable named `GOOGLE_VERTEX_AI_WEB_CREDENTIALS`.
|
8
|
+
*
|
9
|
+
* ```bash
|
10
|
+
* npm install @langchain/google-vertexai-web
|
11
|
+
* export GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...}
|
12
|
+
* ```
|
13
|
+
*
|
14
|
+
* ## [Constructor args](https://api.js.langchain.com/classes/langchain_community_chat_models_googlevertexai_web.ChatGoogleVertexAI.html#constructor)
|
15
|
+
*
|
16
|
+
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
|
17
|
+
*
|
18
|
+
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
|
19
|
+
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
|
20
|
+
*
|
21
|
+
* ```typescript
|
22
|
+
* // When calling `.bind`, call options should be passed via the first argument
|
23
|
+
* const llmWithArgsBound = llm.bind({
|
24
|
+
* stop: ["\n"],
|
25
|
+
* tools: [...],
|
26
|
+
* });
|
27
|
+
*
|
28
|
+
* // When calling `.bindTools`, call options should be passed via the second argument
|
29
|
+
* const llmWithTools = llm.bindTools(
|
30
|
+
* [...],
|
31
|
+
* {
|
32
|
+
* tool_choice: "auto",
|
33
|
+
* }
|
34
|
+
* );
|
35
|
+
* ```
|
36
|
+
*
|
37
|
+
* ## Examples
|
38
|
+
*
|
39
|
+
* <details open>
|
40
|
+
* <summary><strong>Instantiate</strong></summary>
|
41
|
+
*
|
42
|
+
* ```typescript
|
43
|
+
* import { ChatVertexAI } from '@langchain/google-vertexai-web';
|
44
|
+
*
|
45
|
+
* const llm = new ChatVertexAI({
|
46
|
+
* model: "gemini-1.5-pro",
|
47
|
+
* temperature: 0,
|
48
|
+
* authOptions: {
|
49
|
+
* credentials: process.env.GOOGLE_VERTEX_AI_WEB_CREDENTIALS,
|
50
|
+
* },
|
51
|
+
* // other params...
|
52
|
+
* });
|
53
|
+
* ```
|
54
|
+
* </details>
|
55
|
+
*
|
56
|
+
* <br />
|
57
|
+
*
|
58
|
+
* <details>
|
59
|
+
* <summary><strong>Invoking</strong></summary>
|
60
|
+
*
|
61
|
+
* ```typescript
|
62
|
+
* const input = `Translate "I love programming" into French.`;
|
63
|
+
*
|
64
|
+
* // Models also accept a list of chat messages or a formatted prompt
|
65
|
+
* const result = await llm.invoke(input);
|
66
|
+
* console.log(result);
|
67
|
+
* ```
|
68
|
+
*
|
69
|
+
* ```txt
|
70
|
+
* AIMessageChunk {
|
71
|
+
* "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
|
72
|
+
* "additional_kwargs": {},
|
73
|
+
* "response_metadata": {},
|
74
|
+
* "tool_calls": [],
|
75
|
+
* "tool_call_chunks": [],
|
76
|
+
* "invalid_tool_calls": [],
|
77
|
+
* "usage_metadata": {
|
78
|
+
* "input_tokens": 9,
|
79
|
+
* "output_tokens": 63,
|
80
|
+
* "total_tokens": 72
|
81
|
+
* }
|
82
|
+
* }
|
83
|
+
* ```
|
84
|
+
* </details>
|
85
|
+
*
|
86
|
+
* <br />
|
87
|
+
*
|
88
|
+
* <details>
|
89
|
+
* <summary><strong>Streaming Chunks</strong></summary>
|
90
|
+
*
|
91
|
+
* ```typescript
|
92
|
+
* for await (const chunk of await llm.stream(input)) {
|
93
|
+
* console.log(chunk);
|
94
|
+
* }
|
95
|
+
* ```
|
96
|
+
*
|
97
|
+
* ```txt
|
98
|
+
* AIMessageChunk {
|
99
|
+
* "content": "\"",
|
100
|
+
* "additional_kwargs": {},
|
101
|
+
* "response_metadata": {},
|
102
|
+
* "tool_calls": [],
|
103
|
+
* "tool_call_chunks": [],
|
104
|
+
* "invalid_tool_calls": []
|
105
|
+
* }
|
106
|
+
* AIMessageChunk {
|
107
|
+
* "content": "J'adore programmer\" \n",
|
108
|
+
* "additional_kwargs": {},
|
109
|
+
* "response_metadata": {},
|
110
|
+
* "tool_calls": [],
|
111
|
+
* "tool_call_chunks": [],
|
112
|
+
* "invalid_tool_calls": []
|
113
|
+
* }
|
114
|
+
* AIMessageChunk {
|
115
|
+
* "content": "",
|
116
|
+
* "additional_kwargs": {},
|
117
|
+
* "response_metadata": {},
|
118
|
+
* "tool_calls": [],
|
119
|
+
* "tool_call_chunks": [],
|
120
|
+
* "invalid_tool_calls": []
|
121
|
+
* }
|
122
|
+
* AIMessageChunk {
|
123
|
+
* "content": "",
|
124
|
+
* "additional_kwargs": {},
|
125
|
+
* "response_metadata": {
|
126
|
+
* "finishReason": "stop"
|
127
|
+
* },
|
128
|
+
* "tool_calls": [],
|
129
|
+
* "tool_call_chunks": [],
|
130
|
+
* "invalid_tool_calls": [],
|
131
|
+
* "usage_metadata": {
|
132
|
+
* "input_tokens": 9,
|
133
|
+
* "output_tokens": 8,
|
134
|
+
* "total_tokens": 17
|
135
|
+
* }
|
136
|
+
* }
|
137
|
+
* ```
|
138
|
+
* </details>
|
139
|
+
*
|
140
|
+
* <br />
|
141
|
+
*
|
142
|
+
* <details>
|
143
|
+
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
|
144
|
+
*
|
145
|
+
* ```typescript
|
146
|
+
* import { AIMessageChunk } from '@langchain/core/messages';
|
147
|
+
* import { concat } from '@langchain/core/utils/stream';
|
148
|
+
*
|
149
|
+
* const stream = await llm.stream(input);
|
150
|
+
* let full: AIMessageChunk | undefined;
|
151
|
+
* for await (const chunk of stream) {
|
152
|
+
* full = !full ? chunk : concat(full, chunk);
|
153
|
+
* }
|
154
|
+
* console.log(full);
|
155
|
+
* ```
|
156
|
+
*
|
157
|
+
* ```txt
|
158
|
+
* AIMessageChunk {
|
159
|
+
* "content": "\"J'adore programmer\" \n",
|
160
|
+
* "additional_kwargs": {},
|
161
|
+
* "response_metadata": {
|
162
|
+
* "finishReason": "stop"
|
163
|
+
* },
|
164
|
+
* "tool_calls": [],
|
165
|
+
* "tool_call_chunks": [],
|
166
|
+
* "invalid_tool_calls": [],
|
167
|
+
* "usage_metadata": {
|
168
|
+
* "input_tokens": 9,
|
169
|
+
* "output_tokens": 8,
|
170
|
+
* "total_tokens": 17
|
171
|
+
* }
|
172
|
+
* }
|
173
|
+
* ```
|
174
|
+
* </details>
|
175
|
+
*
|
176
|
+
* <br />
|
177
|
+
*
|
178
|
+
* <details>
|
179
|
+
* <summary><strong>Bind tools</strong></summary>
|
180
|
+
*
|
181
|
+
* ```typescript
|
182
|
+
* import { z } from 'zod';
|
183
|
+
*
|
184
|
+
* const GetWeather = {
|
185
|
+
* name: "GetWeather",
|
186
|
+
* description: "Get the current weather in a given location",
|
187
|
+
* schema: z.object({
|
188
|
+
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
|
189
|
+
* }),
|
190
|
+
* }
|
191
|
+
*
|
192
|
+
* const GetPopulation = {
|
193
|
+
* name: "GetPopulation",
|
194
|
+
* description: "Get the current population in a given location",
|
195
|
+
* schema: z.object({
|
196
|
+
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
|
197
|
+
* }),
|
198
|
+
* }
|
199
|
+
*
|
200
|
+
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
|
201
|
+
* const aiMsg = await llmWithTools.invoke(
|
202
|
+
* "Which city is hotter today and which is bigger: LA or NY?"
|
203
|
+
* );
|
204
|
+
* console.log(aiMsg.tool_calls);
|
205
|
+
* ```
|
206
|
+
*
|
207
|
+
* ```txt
|
208
|
+
* [
|
209
|
+
* {
|
210
|
+
* name: 'GetPopulation',
|
211
|
+
* args: { location: 'New York City, NY' },
|
212
|
+
* id: '33c1c1f47e2f492799c77d2800a43912',
|
213
|
+
* type: 'tool_call'
|
214
|
+
* }
|
215
|
+
* ]
|
216
|
+
* ```
|
217
|
+
* </details>
|
218
|
+
*
|
219
|
+
* <br />
|
220
|
+
*
|
221
|
+
* <details>
|
222
|
+
* <summary><strong>Structured Output</strong></summary>
|
223
|
+
*
|
224
|
+
* ```typescript
|
225
|
+
* import { z } from 'zod';
|
226
|
+
*
|
227
|
+
* const Joke = z.object({
|
228
|
+
* setup: z.string().describe("The setup of the joke"),
|
229
|
+
* punchline: z.string().describe("The punchline to the joke"),
|
230
|
+
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
|
231
|
+
* }).describe('Joke to tell user.');
|
232
|
+
*
|
233
|
+
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
|
234
|
+
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
|
235
|
+
* console.log(jokeResult);
|
236
|
+
* ```
|
237
|
+
*
|
238
|
+
* ```txt
|
239
|
+
* {
|
240
|
+
* setup: 'What do you call a cat that loves to bowl?',
|
241
|
+
* punchline: 'An alley cat!'
|
242
|
+
* }
|
243
|
+
* ```
|
244
|
+
* </details>
|
245
|
+
*
|
246
|
+
* <br />
|
247
|
+
*
|
248
|
+
* <details>
|
249
|
+
* <summary><strong>Usage Metadata</strong></summary>
|
250
|
+
*
|
251
|
+
* ```typescript
|
252
|
+
* const aiMsgForMetadata = await llm.invoke(input);
|
253
|
+
* console.log(aiMsgForMetadata.usage_metadata);
|
254
|
+
* ```
|
255
|
+
*
|
256
|
+
* ```txt
|
257
|
+
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
|
258
|
+
* ```
|
259
|
+
* </details>
|
260
|
+
*
|
261
|
+
* <br />
|
262
|
+
*
|
263
|
+
* <details>
|
264
|
+
* <summary><strong>Stream Usage Metadata</strong></summary>
|
265
|
+
*
|
266
|
+
* ```typescript
|
267
|
+
* const streamForMetadata = await llm.stream(
|
268
|
+
* input,
|
269
|
+
* {
|
270
|
+
* streamUsage: true
|
271
|
+
* }
|
272
|
+
* );
|
273
|
+
* let fullForMetadata: AIMessageChunk | undefined;
|
274
|
+
* for await (const chunk of streamForMetadata) {
|
275
|
+
* fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
|
276
|
+
* }
|
277
|
+
* console.log(fullForMetadata?.usage_metadata);
|
278
|
+
* ```
|
279
|
+
*
|
280
|
+
* ```txt
|
281
|
+
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
|
282
|
+
* ```
|
283
|
+
* </details>
|
284
|
+
*
|
285
|
+
* <br />
|
4
286
|
*/
|
5
287
|
export class ChatVertexAI extends ChatGoogle {
|
6
288
|
static lc_name() {
|
@@ -0,0 +1,20 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.VertexAIEmbeddings = void 0;
|
4
|
+
const google_webauth_1 = require("@langchain/google-webauth");
|
5
|
+
/**
|
6
|
+
* Integration with a Google Vertex AI embeddings model using
|
7
|
+
* the "@langchain/google-webauth" package for auth.
|
8
|
+
*/
|
9
|
+
class VertexAIEmbeddings extends google_webauth_1.GoogleEmbeddings {
|
10
|
+
static lc_name() {
|
11
|
+
return "VertexAIEmbeddings";
|
12
|
+
}
|
13
|
+
constructor(fields) {
|
14
|
+
super({
|
15
|
+
...fields,
|
16
|
+
platformType: "gcp",
|
17
|
+
});
|
18
|
+
}
|
19
|
+
}
|
20
|
+
exports.VertexAIEmbeddings = VertexAIEmbeddings;
|
@@ -0,0 +1,14 @@
|
|
1
|
+
import { type GoogleEmbeddingsInput, GoogleEmbeddings } from "@langchain/google-webauth";
|
2
|
+
/**
|
3
|
+
* Input to a Google Vertex AI embeddings class.
|
4
|
+
*/
|
5
|
+
export interface GoogleVertexAIEmbeddingsInput extends GoogleEmbeddingsInput {
|
6
|
+
}
|
7
|
+
/**
|
8
|
+
* Integration with a Google Vertex AI embeddings model using
|
9
|
+
* the "@langchain/google-webauth" package for auth.
|
10
|
+
*/
|
11
|
+
export declare class VertexAIEmbeddings extends GoogleEmbeddings {
|
12
|
+
static lc_name(): string;
|
13
|
+
constructor(fields: GoogleVertexAIEmbeddingsInput);
|
14
|
+
}
|
@@ -0,0 +1,16 @@
|
|
1
|
+
import { GoogleEmbeddings, } from "@langchain/google-webauth";
|
2
|
+
/**
|
3
|
+
* Integration with a Google Vertex AI embeddings model using
|
4
|
+
* the "@langchain/google-webauth" package for auth.
|
5
|
+
*/
|
6
|
+
export class VertexAIEmbeddings extends GoogleEmbeddings {
|
7
|
+
static lc_name() {
|
8
|
+
return "VertexAIEmbeddings";
|
9
|
+
}
|
10
|
+
constructor(fields) {
|
11
|
+
super({
|
12
|
+
...fields,
|
13
|
+
platformType: "gcp",
|
14
|
+
});
|
15
|
+
}
|
16
|
+
}
|
package/dist/index.cjs
CHANGED
@@ -16,3 +16,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
17
|
__exportStar(require("./chat_models.cjs"), exports);
|
18
18
|
__exportStar(require("./llms.cjs"), exports);
|
19
|
+
__exportStar(require("./embeddings.cjs"), exports);
|
package/dist/index.d.ts
CHANGED
package/dist/index.js
CHANGED
package/dist/llms.cjs
CHANGED
@@ -3,7 +3,8 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.VertexAI = void 0;
|
4
4
|
const google_webauth_1 = require("@langchain/google-webauth");
|
5
5
|
/**
|
6
|
-
* Integration with a LLM
|
6
|
+
* Integration with a Google Vertex AI LLM using
|
7
|
+
* the "@langchain/google-webauth" package for auth.
|
7
8
|
*/
|
8
9
|
class VertexAI extends google_webauth_1.GoogleLLM {
|
9
10
|
static lc_name() {
|
package/dist/llms.d.ts
CHANGED
@@ -1,11 +1,12 @@
|
|
1
1
|
import { type GoogleLLMInput, GoogleLLM } from "@langchain/google-webauth";
|
2
2
|
/**
|
3
|
-
* Input to LLM
|
3
|
+
* Input to a Google Vertex LLM class.
|
4
4
|
*/
|
5
5
|
export interface VertexAIInput extends GoogleLLMInput {
|
6
6
|
}
|
7
7
|
/**
|
8
|
-
* Integration with a LLM
|
8
|
+
* Integration with a Google Vertex AI LLM using
|
9
|
+
* the "@langchain/google-webauth" package for auth.
|
9
10
|
*/
|
10
11
|
export declare class VertexAI extends GoogleLLM {
|
11
12
|
static lc_name(): string;
|
package/dist/llms.js
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import { GoogleLLM } from "@langchain/google-webauth";
|
2
2
|
/**
|
3
|
-
* Integration with a LLM
|
3
|
+
* Integration with a Google Vertex AI LLM using
|
4
|
+
* the "@langchain/google-webauth" package for auth.
|
4
5
|
*/
|
5
6
|
export class VertexAI extends GoogleLLM {
|
6
7
|
static lc_name() {
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@langchain/google-vertexai-web",
|
3
|
-
"version": "0.0.
|
3
|
+
"version": "0.0.26",
|
4
4
|
"description": "LangChain.js support for Google Vertex AI Web",
|
5
5
|
"type": "module",
|
6
6
|
"engines": {
|
@@ -41,7 +41,7 @@
|
|
41
41
|
"license": "MIT",
|
42
42
|
"dependencies": {
|
43
43
|
"@langchain/core": ">=0.2.21 <0.3.0",
|
44
|
-
"@langchain/google-webauth": "~0.0.
|
44
|
+
"@langchain/google-webauth": "~0.0.26"
|
45
45
|
},
|
46
46
|
"devDependencies": {
|
47
47
|
"@jest/globals": "^29.5.0",
|