@librechat/agents 2.4.43 → 2.4.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,293 @@
1
+ import { ChatGoogle } from '@langchain/google-gauth';
2
+ import type { GoogleAIModelRequestParams, GoogleAbstractedClient } from '@langchain/google-common';
3
+ import type { VertexAIClientOptions } from '@/types';
4
+ /**
5
+ * Integration with Google Vertex AI chat models.
6
+ *
7
+ * Setup:
8
+ * Install `@langchain/google-vertexai` and set your stringified
9
+ * Vertex AI credentials as an environment variable named `GOOGLE_APPLICATION_CREDENTIALS`.
10
+ *
11
+ * ```bash
12
+ * npm install @langchain/google-vertexai
13
+ * export GOOGLE_APPLICATION_CREDENTIALS="path/to/credentials"
14
+ * ```
15
+ *
16
+ * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_google_vertexai.index.ChatVertexAI.html#constructor.new_ChatVertexAI)
17
+ *
18
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_common_types.GoogleAIBaseLanguageModelCallOptions.html)
19
+ *
20
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
21
+ * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:
22
+ *
23
+ * ```typescript
24
+ * // When calling `.withConfig`, call options should be passed via the first argument
25
+ * const llmWithArgsBound = llm.withConfig({
26
+ * stop: ["\n"],
27
+ * tools: [...],
28
+ * });
29
+ *
30
+ * // When calling `.bindTools`, call options should be passed via the second argument
31
+ * const llmWithTools = llm.bindTools(
32
+ * [...],
33
+ * {
34
+ * tool_choice: "auto",
35
+ * }
36
+ * );
37
+ * ```
38
+ *
39
+ * ## Examples
40
+ *
41
+ * <details open>
42
+ * <summary><strong>Instantiate</strong></summary>
43
+ *
44
+ * ```typescript
45
+ * import { ChatVertexAI } from '@langchain/google-vertexai';
46
+ *
47
+ * const llm = new ChatVertexAI({
48
+ * model: "gemini-1.5-pro",
49
+ * temperature: 0,
50
+ * // other params...
51
+ * });
52
+ * ```
53
+ * </details>
54
+ *
55
+ * <br />
56
+ *
57
+ * <details>
58
+ * <summary><strong>Invoking</strong></summary>
59
+ *
60
+ * ```typescript
61
+ * const input = `Translate "I love programming" into French.`;
62
+ *
63
+ * // Models also accept a list of chat messages or a formatted prompt
64
+ * const result = await llm.invoke(input);
65
+ * console.log(result);
66
+ * ```
67
+ *
68
+ * ```txt
69
+ * AIMessageChunk {
70
+ * "content": "\"J'adore programmer\" \n\nHere's why this is the best translation:\n\n* **J'adore** means \"I love\" and conveys a strong passion.\n* **Programmer** is the French verb for \"to program.\"\n\nThis translation is natural and idiomatic in French. \n",
71
+ * "additional_kwargs": {},
72
+ * "response_metadata": {},
73
+ * "tool_calls": [],
74
+ * "tool_call_chunks": [],
75
+ * "invalid_tool_calls": [],
76
+ * "usage_metadata": {
77
+ * "input_tokens": 9,
78
+ * "output_tokens": 63,
79
+ * "total_tokens": 72
80
+ * }
81
+ * }
82
+ * ```
83
+ * </details>
84
+ *
85
+ * <br />
86
+ *
87
+ * <details>
88
+ * <summary><strong>Streaming Chunks</strong></summary>
89
+ *
90
+ * ```typescript
91
+ * for await (const chunk of await llm.stream(input)) {
92
+ * console.log(chunk);
93
+ * }
94
+ * ```
95
+ *
96
+ * ```txt
97
+ * AIMessageChunk {
98
+ * "content": "\"",
99
+ * "additional_kwargs": {},
100
+ * "response_metadata": {},
101
+ * "tool_calls": [],
102
+ * "tool_call_chunks": [],
103
+ * "invalid_tool_calls": []
104
+ * }
105
+ * AIMessageChunk {
106
+ * "content": "J'adore programmer\" \n",
107
+ * "additional_kwargs": {},
108
+ * "response_metadata": {},
109
+ * "tool_calls": [],
110
+ * "tool_call_chunks": [],
111
+ * "invalid_tool_calls": []
112
+ * }
113
+ * AIMessageChunk {
114
+ * "content": "",
115
+ * "additional_kwargs": {},
116
+ * "response_metadata": {},
117
+ * "tool_calls": [],
118
+ * "tool_call_chunks": [],
119
+ * "invalid_tool_calls": []
120
+ * }
121
+ * AIMessageChunk {
122
+ * "content": "",
123
+ * "additional_kwargs": {},
124
+ * "response_metadata": {
125
+ * "finishReason": "stop"
126
+ * },
127
+ * "tool_calls": [],
128
+ * "tool_call_chunks": [],
129
+ * "invalid_tool_calls": [],
130
+ * "usage_metadata": {
131
+ * "input_tokens": 9,
132
+ * "output_tokens": 8,
133
+ * "total_tokens": 17
134
+ * }
135
+ * }
136
+ * ```
137
+ * </details>
138
+ *
139
+ * <br />
140
+ *
141
+ * <details>
142
+ * <summary><strong>Aggregate Streamed Chunks</strong></summary>
143
+ *
144
+ * ```typescript
145
+ * import { AIMessageChunk } from '@langchain/core/messages';
146
+ * import { concat } from '@langchain/core/utils/stream';
147
+ *
148
+ * const stream = await llm.stream(input);
149
+ * let full: AIMessageChunk | undefined;
150
+ * for await (const chunk of stream) {
151
+ * full = !full ? chunk : concat(full, chunk);
152
+ * }
153
+ * console.log(full);
154
+ * ```
155
+ *
156
+ * ```txt
157
+ * AIMessageChunk {
158
+ * "content": "\"J'adore programmer\" \n",
159
+ * "additional_kwargs": {},
160
+ * "response_metadata": {
161
+ * "finishReason": "stop"
162
+ * },
163
+ * "tool_calls": [],
164
+ * "tool_call_chunks": [],
165
+ * "invalid_tool_calls": [],
166
+ * "usage_metadata": {
167
+ * "input_tokens": 9,
168
+ * "output_tokens": 8,
169
+ * "total_tokens": 17
170
+ * }
171
+ * }
172
+ * ```
173
+ * </details>
174
+ *
175
+ * <br />
176
+ *
177
+ * <details>
178
+ * <summary><strong>Bind tools</strong></summary>
179
+ *
180
+ * ```typescript
181
+ * import { z } from 'zod';
182
+ *
183
+ * const GetWeather = {
184
+ * name: "GetWeather",
185
+ * description: "Get the current weather in a given location",
186
+ * schema: z.object({
187
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
188
+ * }),
189
+ * }
190
+ *
191
+ * const GetPopulation = {
192
+ * name: "GetPopulation",
193
+ * description: "Get the current population in a given location",
194
+ * schema: z.object({
195
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
196
+ * }),
197
+ * }
198
+ *
199
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
200
+ * const aiMsg = await llmWithTools.invoke(
201
+ * "Which city is hotter today and which is bigger: LA or NY?"
202
+ * );
203
+ * console.log(aiMsg.tool_calls);
204
+ * ```
205
+ *
206
+ * ```txt
207
+ * [
208
+ * {
209
+ * name: 'GetPopulation',
210
+ * args: { location: 'New York City, NY' },
211
+ * id: '33c1c1f47e2f492799c77d2800a43912',
212
+ * type: 'tool_call'
213
+ * }
214
+ * ]
215
+ * ```
216
+ * </details>
217
+ *
218
+ * <br />
219
+ *
220
+ * <details>
221
+ * <summary><strong>Structured Output</strong></summary>
222
+ *
223
+ * ```typescript
224
+ * import { z } from 'zod';
225
+ *
226
+ * const Joke = z.object({
227
+ * setup: z.string().describe("The setup of the joke"),
228
+ * punchline: z.string().describe("The punchline to the joke"),
229
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
230
+ * }).describe('Joke to tell user.');
231
+ *
232
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
233
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
234
+ * console.log(jokeResult);
235
+ * ```
236
+ *
237
+ * ```txt
238
+ * {
239
+ * setup: 'What do you call a cat that loves to bowl?',
240
+ * punchline: 'An alley cat!'
241
+ * }
242
+ * ```
243
+ * </details>
244
+ *
245
+ * <br />
246
+ *
247
+ * <details>
248
+ * <summary><strong>Usage Metadata</strong></summary>
249
+ *
250
+ * ```typescript
251
+ * const aiMsgForMetadata = await llm.invoke(input);
252
+ * console.log(aiMsgForMetadata.usage_metadata);
253
+ * ```
254
+ *
255
+ * ```txt
256
+ * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
257
+ * ```
258
+ * </details>
259
+ *
260
+ * <br />
261
+ *
262
+ * <details>
263
+ * <summary><strong>Stream Usage Metadata</strong></summary>
264
+ *
265
+ * ```typescript
266
+ * const streamForMetadata = await llm.stream(
267
+ * input,
268
+ * {
269
+ * streamUsage: true
270
+ * }
271
+ * );
272
+ * let fullForMetadata: AIMessageChunk | undefined;
273
+ * for await (const chunk of streamForMetadata) {
274
+ * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
275
+ * }
276
+ * console.log(fullForMetadata?.usage_metadata);
277
+ * ```
278
+ *
279
+ * ```txt
280
+ * { input_tokens: 9, output_tokens: 8, total_tokens: 17 }
281
+ * ```
282
+ * </details>
283
+ *
284
+ * <br />
285
+ */
286
+ export declare class ChatVertexAI extends ChatGoogle {
287
+ lc_namespace: string[];
288
+ dynamicThinkingBudget: boolean;
289
+ static lc_name(): 'ChatVertexAI';
290
+ constructor(fields?: VertexAIClientOptions);
291
+ invocationParams(options?: this['ParsedCallOptions'] | undefined): GoogleAIModelRequestParams;
292
+ buildConnection(fields: VertexAIClientOptions, client: GoogleAbstractedClient): void;
293
+ }
@@ -4,6 +4,7 @@ import type { RunnableConfig } from '@langchain/core/runnables';
4
4
  import type * as t from '@/types';
5
5
  import { Providers } from '@/common';
6
6
  import { StandardGraph } from '@/graphs/Graph';
7
+ export declare const defaultOmitOptions: Set<string>;
7
8
  export declare class Run<T extends t.BaseGraphState> {
8
9
  graphRunnable?: t.CompiledWorkflow<T, Partial<T>, string>;
9
10
  private handlerRegistry;
@@ -21,7 +22,7 @@ export declare class Run<T extends t.BaseGraphState> {
21
22
  }, streamOptions?: t.EventStreamOptions): Promise<MessageContentComplex[] | undefined>;
22
23
  private createSystemCallback;
23
24
  getCallbacks(clientCallbacks: ClientCallbacks): SystemCallbacks;
24
- generateTitle({ provider, inputText, contentParts, titlePrompt, clientOptions, chainOptions, skipLanguage, }: t.RunTitleOptions): Promise<{
25
+ generateTitle({ provider, inputText, contentParts, titlePrompt, clientOptions, chainOptions, skipLanguage, omitOptions, }: t.RunTitleOptions): Promise<{
25
26
  language: string;
26
27
  title: string;
27
28
  }>;
@@ -2,7 +2,6 @@ import { ChatOllama } from '@langchain/ollama';
2
2
  import { ChatAnthropic } from '@langchain/anthropic';
3
3
  import { ChatMistralAI } from '@langchain/mistralai';
4
4
  import { ChatBedrockConverse } from '@langchain/aws';
5
- import { ChatVertexAI } from '@langchain/google-vertexai';
6
5
  import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
7
6
  import type { BindToolsInput, BaseChatModelParams } from '@langchain/core/language_models/chat_models';
8
7
  import type { OpenAIChatInput, ChatOpenAIFields, AzureOpenAIInput, ClientOptions as OAIClientOptions } from '@langchain/openai';
@@ -21,9 +20,10 @@ import type { Runnable } from '@langchain/core/runnables';
21
20
  import type { ChatOllamaInput } from '@langchain/ollama';
22
21
  import type { OpenAI as OpenAIClient } from 'openai';
23
22
  import type { ChatXAIInput } from '@langchain/xai';
24
- import { ChatXAI, ChatOpenAI, ChatDeepSeek, AzureChatOpenAI } from '@/llm/openai';
23
+ import { AzureChatOpenAI, ChatDeepSeek, ChatOpenAI, ChatXAI } from '@/llm/openai';
25
24
  import { CustomChatGoogleGenerativeAI } from '@/llm/google';
26
25
  import { ChatOpenRouter } from '@/llm/openrouter';
26
+ import { ChatVertexAI } from '@/llm/vertexai';
27
27
  import { Providers } from '@/common';
28
28
  export type AzureClientOptions = Partial<OpenAIChatInput> & Partial<AzureOpenAIInput> & {
29
29
  openAIApiKey?: string;
@@ -44,7 +44,9 @@ export type OpenAIClientOptions = ChatOpenAIFields;
44
44
  export type OllamaClientOptions = ChatOllamaInput;
45
45
  export type AnthropicClientOptions = AnthropicInput;
46
46
  export type MistralAIClientOptions = ChatMistralAIInput;
47
- export type VertexAIClientOptions = ChatVertexAIInput;
47
+ export type VertexAIClientOptions = ChatVertexAIInput & {
48
+ includeThoughts?: boolean;
49
+ };
48
50
  export type BedrockClientOptions = BedrockChatFields;
49
51
  export type BedrockAnthropicInput = ChatBedrockConverseInput & {
50
52
  additionalModelRequestFields?: ChatBedrockConverseInput['additionalModelRequestFields'] & AnthropicReasoning;
@@ -24,6 +24,7 @@ export type RunTitleOptions = {
24
24
  skipLanguage?: boolean;
25
25
  clientOptions?: l.ClientOptions;
26
26
  chainOptions?: Partial<RunnableConfig> | undefined;
27
+ omitOptions?: Set<string>;
27
28
  };
28
29
  export interface AgentStateChannels {
29
30
  messages: BaseMessage[];
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "2.4.43",
3
+ "version": "2.4.45",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -3,22 +3,23 @@ import { ChatOllama } from '@langchain/ollama';
3
3
  import { ChatMistralAI } from '@langchain/mistralai';
4
4
  import { ChatBedrockConverse } from '@langchain/aws';
5
5
  // import { ChatAnthropic } from '@langchain/anthropic';
6
- import { ChatVertexAI } from '@langchain/google-vertexai';
6
+ // import { ChatVertexAI } from '@langchain/google-vertexai';
7
7
  import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
8
8
  import type {
9
9
  ChatModelConstructorMap,
10
10
  ProviderOptionsMap,
11
11
  ChatModelMap,
12
12
  } from '@/types';
13
- import { CustomChatGoogleGenerativeAI } from '@/llm/google';
14
- import { CustomAnthropic } from '@/llm/anthropic';
15
- import { ChatOpenRouter } from '@/llm/openrouter';
16
13
  import {
17
- ChatXAI,
18
- ChatOpenAI,
19
- ChatDeepSeek,
20
14
  AzureChatOpenAI,
15
+ ChatDeepSeek,
16
+ ChatOpenAI,
17
+ ChatXAI,
21
18
  } from '@/llm/openai';
19
+ import { CustomChatGoogleGenerativeAI } from '@/llm/google';
20
+ import { CustomAnthropic } from '@/llm/anthropic';
21
+ import { ChatOpenRouter } from '@/llm/openrouter';
22
+ import { ChatVertexAI } from '@/llm/vertexai';
22
23
  import { Providers } from '@/common';
23
24
 
24
25
  export const llmProviders: Partial<ChatModelConstructorMap> = {