@librechat/agents 2.4.43 → 2.4.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/llm/providers.cjs +3 -3
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/cjs/llm/vertexai/index.cjs +330 -0
- package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
- package/dist/cjs/main.cjs +1 -0
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/run.cjs +15 -10
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/esm/llm/providers.mjs +2 -2
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/esm/llm/vertexai/index.mjs +328 -0
- package/dist/esm/llm/vertexai/index.mjs.map +1 -0
- package/dist/esm/main.mjs +1 -1
- package/dist/esm/run.mjs +15 -11
- package/dist/esm/run.mjs.map +1 -1
- package/dist/types/llm/vertexai/index.d.ts +293 -0
- package/dist/types/run.d.ts +2 -1
- package/dist/types/types/llm.d.ts +5 -3
- package/dist/types/types/run.d.ts +1 -0
- package/package.json +1 -1
- package/src/llm/providers.ts +8 -7
- package/src/llm/vertexai/index.ts +360 -0
- package/src/run.ts +15 -9
- package/src/types/llm.ts +7 -5
- package/src/types/run.ts +1 -0
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
import { ChatGoogle } from '@langchain/google-gauth';
|
|
2
|
+
import { ChatConnection } from '@langchain/google-common';
|
|
3
|
+
import type {
|
|
4
|
+
GeminiRequest,
|
|
5
|
+
GoogleAIModelRequestParams,
|
|
6
|
+
GoogleAbstractedClient,
|
|
7
|
+
} from '@langchain/google-common';
|
|
8
|
+
import type { BaseMessage } from '@langchain/core/messages';
|
|
9
|
+
import type { VertexAIClientOptions } from '@/types';
|
|
10
|
+
|
|
11
|
+
class CustomChatConnection extends ChatConnection<VertexAIClientOptions> {
|
|
12
|
+
async formatData(
|
|
13
|
+
input: BaseMessage[],
|
|
14
|
+
parameters: GoogleAIModelRequestParams
|
|
15
|
+
): Promise<unknown> {
|
|
16
|
+
const formattedData = (await super.formatData(
|
|
17
|
+
input,
|
|
18
|
+
parameters
|
|
19
|
+
)) as GeminiRequest;
|
|
20
|
+
if (
|
|
21
|
+
formattedData.generationConfig?.thinkingConfig?.thinkingBudget === -1 &&
|
|
22
|
+
formattedData.generationConfig.thinkingConfig.includeThoughts === false
|
|
23
|
+
) {
|
|
24
|
+
formattedData.generationConfig.thinkingConfig.includeThoughts = true;
|
|
25
|
+
}
|
|
26
|
+
return formattedData;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Integration with Google Vertex AI chat models.
|
|
32
|
+
*
|
|
33
|
+
* Setup:
|
|
34
|
+
* Install `@langchain/google-vertexai` and set your stringified
|
|
35
|
+
* Vertex AI credentials as an environment variable named `GOOGLE_APPLICATION_CREDENTIALS`.
|
|
36
|
+
*
|
|
37
|
+
* ```bash
|
|
38
|
+
* npm install @langchain/google-vertexai
|
|
39
|
+
* export GOOGLE_APPLICATION_CREDENTIALS="path/to/credentials"
|
|
40
|
+
* ```
|
|
41
|
+
*
|
|
42
|
+
* ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai.index.ChatVertexAI.html#constructor.new_ChatVertexAI)
|
|
43
|
+
*
|
|
44
|
+
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
|
|
45
|
+
*
|
|
46
|
+
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
|
|
47
|
+
* They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:
|
|
48
|
+
*
|
|
49
|
+
* ```typescript
|
|
50
|
+
* // When calling `.withConfig`, call options should be passed via the first argument
|
|
51
|
+
* const llmWithArgsBound = llm.withConfig({
|
|
52
|
+
* stop: ["\n"],
|
|
53
|
+
* tools: [...],
|
|
54
|
+
* });
|
|
55
|
+
*
|
|
56
|
+
* // When calling `.bindTools`, call options should be passed via the second argument
|
|
57
|
+
* const llmWithTools = llm.bindTools(
|
|
58
|
+
* [...],
|
|
59
|
+
* {
|
|
60
|
+
* tool_choice: "auto",
|
|
61
|
+
* }
|
|
62
|
+
* );
|
|
63
|
+
* ```
|
|
64
|
+
*
|
|
65
|
+
* ## Examples
|
|
66
|
+
*
|
|
67
|
+
* <details open>
|
|
68
|
+
* <summary><strong>Instantiate</strong></summary>
|
|
69
|
+
*
|
|
70
|
+
* ```typescript
|
|
71
|
+
* import { ChatVertexAI } from '@langchain/google-vertexai';
|
|
72
|
+
*
|
|
73
|
+
* const llm = new ChatVertexAI({
|
|
74
|
+
* model: "gemini-1.5-pro",
|
|
75
|
+
* temperature: 0,
|
|
76
|
+
* // other params...
|
|
77
|
+
* });
|
|
78
|
+
* ```
|
|
79
|
+
* </details>
|
|
80
|
+
*
|
|
81
|
+
* <br />
|
|
82
|
+
*
|
|
83
|
+
* <details>
|
|
84
|
+
* <summary><strong>Invoking</strong></summary>
|
|
85
|
+
*
|
|
86
|
+
* ```typescript
|
|
87
|
+
* const input = `Translate "I love programming" into French.`;
|
|
88
|
+
*
|
|
89
|
+
* // Models also accept a list of chat messages or a formatted prompt
|
|
90
|
+
* const result = await llm.invoke(input);
|
|
91
|
+
* console.log(result);
|
|
92
|
+
* ```
|
|
93
|
+
*
|
|
94
|
+
* ```txt
|
|
95
|
+
* AIMessageChunk {
|
|
96
|
+
* "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
|
|
97
|
+
* "additional_kwargs": {},
|
|
98
|
+
* "response_metadata": {},
|
|
99
|
+
* "tool_calls": [],
|
|
100
|
+
* "tool_call_chunks": [],
|
|
101
|
+
* "invalid_tool_calls": [],
|
|
102
|
+
* "usage_metadata": {
|
|
103
|
+
* "input_tokens": 9,
|
|
104
|
+
* "output_tokens": 63,
|
|
105
|
+
* "total_tokens": 72
|
|
106
|
+
* }
|
|
107
|
+
* }
|
|
108
|
+
* ```
|
|
109
|
+
* </details>
|
|
110
|
+
*
|
|
111
|
+
* <br />
|
|
112
|
+
*
|
|
113
|
+
* <details>
|
|
114
|
+
* <summary><strong>Streaming Chunks</strong></summary>
|
|
115
|
+
*
|
|
116
|
+
* ```typescript
|
|
117
|
+
* for await (const chunk of await llm.stream(input)) {
|
|
118
|
+
* console.log(chunk);
|
|
119
|
+
* }
|
|
120
|
+
* ```
|
|
121
|
+
*
|
|
122
|
+
* ```txt
|
|
123
|
+
* AIMessageChunk {
|
|
124
|
+
* "content": "\"",
|
|
125
|
+
* "additional_kwargs": {},
|
|
126
|
+
* "response_metadata": {},
|
|
127
|
+
* "tool_calls": [],
|
|
128
|
+
* "tool_call_chunks": [],
|
|
129
|
+
* "invalid_tool_calls": []
|
|
130
|
+
* }
|
|
131
|
+
* AIMessageChunk {
|
|
132
|
+
* "content": "J'adore programmer\" \n",
|
|
133
|
+
* "additional_kwargs": {},
|
|
134
|
+
* "response_metadata": {},
|
|
135
|
+
* "tool_calls": [],
|
|
136
|
+
* "tool_call_chunks": [],
|
|
137
|
+
* "invalid_tool_calls": []
|
|
138
|
+
* }
|
|
139
|
+
* AIMessageChunk {
|
|
140
|
+
* "content": "",
|
|
141
|
+
* "additional_kwargs": {},
|
|
142
|
+
* "response_metadata": {},
|
|
143
|
+
* "tool_calls": [],
|
|
144
|
+
* "tool_call_chunks": [],
|
|
145
|
+
* "invalid_tool_calls": []
|
|
146
|
+
* }
|
|
147
|
+
* AIMessageChunk {
|
|
148
|
+
* "content": "",
|
|
149
|
+
* "additional_kwargs": {},
|
|
150
|
+
* "response_metadata": {
|
|
151
|
+
* "finishReason": "stop"
|
|
152
|
+
* },
|
|
153
|
+
* "tool_calls": [],
|
|
154
|
+
* "tool_call_chunks": [],
|
|
155
|
+
* "invalid_tool_calls": [],
|
|
156
|
+
* "usage_metadata": {
|
|
157
|
+
* "input_tokens": 9,
|
|
158
|
+
* "output_tokens": 8,
|
|
159
|
+
* "total_tokens": 17
|
|
160
|
+
* }
|
|
161
|
+
* }
|
|
162
|
+
* ```
|
|
163
|
+
* </details>
|
|
164
|
+
*
|
|
165
|
+
* <br />
|
|
166
|
+
*
|
|
167
|
+
* <details>
|
|
168
|
+
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
|
|
169
|
+
*
|
|
170
|
+
* ```typescript
|
|
171
|
+
* import { AIMessageChunk } from '@langchain/core/messages';
|
|
172
|
+
* import { concat } from '@langchain/core/utils/stream';
|
|
173
|
+
*
|
|
174
|
+
* const stream = await llm.stream(input);
|
|
175
|
+
* let full: AIMessageChunk | undefined;
|
|
176
|
+
* for await (const chunk of stream) {
|
|
177
|
+
* full = !full ? chunk : concat(full, chunk);
|
|
178
|
+
* }
|
|
179
|
+
* console.log(full);
|
|
180
|
+
* ```
|
|
181
|
+
*
|
|
182
|
+
* ```txt
|
|
183
|
+
* AIMessageChunk {
|
|
184
|
+
* "content": "\"J'adore programmer\" \n",
|
|
185
|
+
* "additional_kwargs": {},
|
|
186
|
+
* "response_metadata": {
|
|
187
|
+
* "finishReason": "stop"
|
|
188
|
+
* },
|
|
189
|
+
* "tool_calls": [],
|
|
190
|
+
* "tool_call_chunks": [],
|
|
191
|
+
* "invalid_tool_calls": [],
|
|
192
|
+
* "usage_metadata": {
|
|
193
|
+
* "input_tokens": 9,
|
|
194
|
+
* "output_tokens": 8,
|
|
195
|
+
* "total_tokens": 17
|
|
196
|
+
* }
|
|
197
|
+
* }
|
|
198
|
+
* ```
|
|
199
|
+
* </details>
|
|
200
|
+
*
|
|
201
|
+
* <br />
|
|
202
|
+
*
|
|
203
|
+
* <details>
|
|
204
|
+
* <summary><strong>Bind tools</strong></summary>
|
|
205
|
+
*
|
|
206
|
+
* ```typescript
|
|
207
|
+
* import { z } from 'zod';
|
|
208
|
+
*
|
|
209
|
+
* const GetWeather = {
|
|
210
|
+
* name: "GetWeather",
|
|
211
|
+
* description: "Get the current weather in a given location",
|
|
212
|
+
* schema: z.object({
|
|
213
|
+
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
|
|
214
|
+
* }),
|
|
215
|
+
* }
|
|
216
|
+
*
|
|
217
|
+
* const GetPopulation = {
|
|
218
|
+
* name: "GetPopulation",
|
|
219
|
+
* description: "Get the current population in a given location",
|
|
220
|
+
* schema: z.object({
|
|
221
|
+
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
|
|
222
|
+
* }),
|
|
223
|
+
* }
|
|
224
|
+
*
|
|
225
|
+
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
|
|
226
|
+
* const aiMsg = await llmWithTools.invoke(
|
|
227
|
+
* "Which city is hotter today and which is bigger: LA or NY?"
|
|
228
|
+
* );
|
|
229
|
+
* console.log(aiMsg.tool_calls);
|
|
230
|
+
* ```
|
|
231
|
+
*
|
|
232
|
+
* ```txt
|
|
233
|
+
* [
|
|
234
|
+
* {
|
|
235
|
+
* name: 'GetPopulation',
|
|
236
|
+
* args: { location: 'New York City, NY' },
|
|
237
|
+
* id: '33c1c1f47e2f492799c77d2800a43912',
|
|
238
|
+
* type: 'tool_call'
|
|
239
|
+
* }
|
|
240
|
+
* ]
|
|
241
|
+
* ```
|
|
242
|
+
* </details>
|
|
243
|
+
*
|
|
244
|
+
* <br />
|
|
245
|
+
*
|
|
246
|
+
* <details>
|
|
247
|
+
* <summary><strong>Structured Output</strong></summary>
|
|
248
|
+
*
|
|
249
|
+
* ```typescript
|
|
250
|
+
* import { z } from 'zod';
|
|
251
|
+
*
|
|
252
|
+
* const Joke = z.object({
|
|
253
|
+
* setup: z.string().describe("The setup of the joke"),
|
|
254
|
+
* punchline: z.string().describe("The punchline to the joke"),
|
|
255
|
+
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
|
|
256
|
+
* }).describe('Joke to tell user.');
|
|
257
|
+
*
|
|
258
|
+
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
|
|
259
|
+
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
|
|
260
|
+
* console.log(jokeResult);
|
|
261
|
+
* ```
|
|
262
|
+
*
|
|
263
|
+
* ```txt
|
|
264
|
+
* {
|
|
265
|
+
* setup: 'What do you call a cat that loves to bowl?',
|
|
266
|
+
* punchline: 'An alley cat!'
|
|
267
|
+
* }
|
|
268
|
+
* ```
|
|
269
|
+
* </details>
|
|
270
|
+
*
|
|
271
|
+
* <br />
|
|
272
|
+
*
|
|
273
|
+
* <details>
|
|
274
|
+
* <summary><strong>Usage Metadata</strong></summary>
|
|
275
|
+
*
|
|
276
|
+
* ```typescript
|
|
277
|
+
* const aiMsgForMetadata = await llm.invoke(input);
|
|
278
|
+
* console.log(aiMsgForMetadata.usage_metadata);
|
|
279
|
+
* ```
|
|
280
|
+
*
|
|
281
|
+
* ```txt
|
|
282
|
+
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
|
|
283
|
+
* ```
|
|
284
|
+
* </details>
|
|
285
|
+
*
|
|
286
|
+
* <br />
|
|
287
|
+
*
|
|
288
|
+
* <details>
|
|
289
|
+
* <summary><strong>Stream Usage Metadata</strong></summary>
|
|
290
|
+
*
|
|
291
|
+
* ```typescript
|
|
292
|
+
* const streamForMetadata = await llm.stream(
|
|
293
|
+
* input,
|
|
294
|
+
* {
|
|
295
|
+
* streamUsage: true
|
|
296
|
+
* }
|
|
297
|
+
* );
|
|
298
|
+
* let fullForMetadata: AIMessageChunk | undefined;
|
|
299
|
+
* for await (const chunk of streamForMetadata) {
|
|
300
|
+
* fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
|
|
301
|
+
* }
|
|
302
|
+
* console.log(fullForMetadata?.usage_metadata);
|
|
303
|
+
* ```
|
|
304
|
+
*
|
|
305
|
+
* ```txt
|
|
306
|
+
* { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
|
|
307
|
+
* ```
|
|
308
|
+
* </details>
|
|
309
|
+
*
|
|
310
|
+
* <br />
|
|
311
|
+
*/
|
|
312
|
+
export class ChatVertexAI extends ChatGoogle {
|
|
313
|
+
lc_namespace = ['langchain', 'chat_models', 'vertexai'];
|
|
314
|
+
dynamicThinkingBudget = false;
|
|
315
|
+
|
|
316
|
+
static lc_name(): 'ChatVertexAI' {
|
|
317
|
+
return 'ChatVertexAI';
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
constructor(fields?: VertexAIClientOptions) {
|
|
321
|
+
let dynamicThinkingBudget = false;
|
|
322
|
+
if (fields?.thinkingBudget === -1) {
|
|
323
|
+
dynamicThinkingBudget = true;
|
|
324
|
+
fields.thinkingBudget = 1;
|
|
325
|
+
}
|
|
326
|
+
super({
|
|
327
|
+
...fields,
|
|
328
|
+
platformType: 'gcp',
|
|
329
|
+
});
|
|
330
|
+
this.dynamicThinkingBudget = dynamicThinkingBudget;
|
|
331
|
+
}
|
|
332
|
+
invocationParams(
|
|
333
|
+
options?: this['ParsedCallOptions'] | undefined
|
|
334
|
+
): GoogleAIModelRequestParams {
|
|
335
|
+
const params = super.invocationParams(options);
|
|
336
|
+
if (this.dynamicThinkingBudget) {
|
|
337
|
+
params.maxReasoningTokens = -1;
|
|
338
|
+
}
|
|
339
|
+
return params;
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
buildConnection(
|
|
343
|
+
fields: VertexAIClientOptions,
|
|
344
|
+
client: GoogleAbstractedClient
|
|
345
|
+
): void {
|
|
346
|
+
this.connection = new CustomChatConnection(
|
|
347
|
+
{ ...fields, ...this },
|
|
348
|
+
this.caller,
|
|
349
|
+
client,
|
|
350
|
+
false
|
|
351
|
+
);
|
|
352
|
+
|
|
353
|
+
this.streamedConnection = new CustomChatConnection(
|
|
354
|
+
{ ...fields, ...this },
|
|
355
|
+
this.caller,
|
|
356
|
+
client,
|
|
357
|
+
true
|
|
358
|
+
);
|
|
359
|
+
}
|
|
360
|
+
}
|
package/src/run.ts
CHANGED
|
@@ -19,6 +19,19 @@ import { StandardGraph } from '@/graphs/Graph';
|
|
|
19
19
|
import { HandlerRegistry } from '@/events';
|
|
20
20
|
import { isOpenAILike } from '@/utils/llm';
|
|
21
21
|
|
|
22
|
+
export const defaultOmitOptions = new Set([
|
|
23
|
+
'stream',
|
|
24
|
+
'thinking',
|
|
25
|
+
'streaming',
|
|
26
|
+
'maxTokens',
|
|
27
|
+
'clientOptions',
|
|
28
|
+
'thinkingConfig',
|
|
29
|
+
'thinkingBudget',
|
|
30
|
+
'includeThoughts',
|
|
31
|
+
'maxOutputTokens',
|
|
32
|
+
'additionalModelRequestFields',
|
|
33
|
+
]);
|
|
34
|
+
|
|
22
35
|
export class Run<T extends t.BaseGraphState> {
|
|
23
36
|
graphRunnable?: t.CompiledWorkflow<T, Partial<T>, string>;
|
|
24
37
|
// private collab!: CollabGraph;
|
|
@@ -242,6 +255,7 @@ export class Run<T extends t.BaseGraphState> {
|
|
|
242
255
|
clientOptions,
|
|
243
256
|
chainOptions,
|
|
244
257
|
skipLanguage,
|
|
258
|
+
omitOptions = defaultOmitOptions,
|
|
245
259
|
}: t.RunTitleOptions): Promise<{ language: string; title: string }> {
|
|
246
260
|
const convoTemplate = PromptTemplate.fromTemplate(
|
|
247
261
|
'User: {input}\nAI: {output}'
|
|
@@ -257,16 +271,8 @@ export class Run<T extends t.BaseGraphState> {
|
|
|
257
271
|
).value;
|
|
258
272
|
const model = this.Graph?.getNewModel({
|
|
259
273
|
provider,
|
|
274
|
+
omitOptions,
|
|
260
275
|
clientOptions,
|
|
261
|
-
omitOptions: new Set([
|
|
262
|
-
'clientOptions',
|
|
263
|
-
'streaming',
|
|
264
|
-
'stream',
|
|
265
|
-
'thinking',
|
|
266
|
-
'maxTokens',
|
|
267
|
-
'maxOutputTokens',
|
|
268
|
-
'additionalModelRequestFields',
|
|
269
|
-
]),
|
|
270
276
|
});
|
|
271
277
|
if (!model) {
|
|
272
278
|
return { language: '', title: '' };
|
package/src/types/llm.ts
CHANGED
|
@@ -3,7 +3,6 @@ import { ChatOllama } from '@langchain/ollama';
|
|
|
3
3
|
import { ChatAnthropic } from '@langchain/anthropic';
|
|
4
4
|
import { ChatMistralAI } from '@langchain/mistralai';
|
|
5
5
|
import { ChatBedrockConverse } from '@langchain/aws';
|
|
6
|
-
import { ChatVertexAI } from '@langchain/google-vertexai';
|
|
7
6
|
import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
|
|
8
7
|
import type {
|
|
9
8
|
BindToolsInput,
|
|
@@ -31,13 +30,14 @@ import type { ChatOllamaInput } from '@langchain/ollama';
|
|
|
31
30
|
import type { OpenAI as OpenAIClient } from 'openai';
|
|
32
31
|
import type { ChatXAIInput } from '@langchain/xai';
|
|
33
32
|
import {
|
|
34
|
-
ChatXAI,
|
|
35
|
-
ChatOpenAI,
|
|
36
|
-
ChatDeepSeek,
|
|
37
33
|
AzureChatOpenAI,
|
|
34
|
+
ChatDeepSeek,
|
|
35
|
+
ChatOpenAI,
|
|
36
|
+
ChatXAI,
|
|
38
37
|
} from '@/llm/openai';
|
|
39
38
|
import { CustomChatGoogleGenerativeAI } from '@/llm/google';
|
|
40
39
|
import { ChatOpenRouter } from '@/llm/openrouter';
|
|
40
|
+
import { ChatVertexAI } from '@/llm/vertexai';
|
|
41
41
|
import { Providers } from '@/common';
|
|
42
42
|
|
|
43
43
|
export type AzureClientOptions = Partial<OpenAIChatInput> &
|
|
@@ -62,7 +62,9 @@ export type OpenAIClientOptions = ChatOpenAIFields;
|
|
|
62
62
|
export type OllamaClientOptions = ChatOllamaInput;
|
|
63
63
|
export type AnthropicClientOptions = AnthropicInput;
|
|
64
64
|
export type MistralAIClientOptions = ChatMistralAIInput;
|
|
65
|
-
export type VertexAIClientOptions = ChatVertexAIInput
|
|
65
|
+
export type VertexAIClientOptions = ChatVertexAIInput & {
|
|
66
|
+
includeThoughts?: boolean;
|
|
67
|
+
};
|
|
66
68
|
export type BedrockClientOptions = BedrockChatFields;
|
|
67
69
|
export type BedrockAnthropicInput = ChatBedrockConverseInput & {
|
|
68
70
|
additionalModelRequestFields?: ChatBedrockConverseInput['additionalModelRequestFields'] &
|
package/src/types/run.ts
CHANGED