@librechat/agents-types 2.4.322 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/graph.ts CHANGED
@@ -1,77 +1,241 @@
1
1
  // src/types/graph.ts
2
- import type { StateGraphArgs, StateGraph, CompiledStateGraph } from '@langchain/langgraph';
3
- import type { BaseMessage, AIMessageChunk } from '@langchain/core/messages';
2
+ import type {
3
+ START,
4
+ StateType,
5
+ UpdateType,
6
+ StateGraph,
7
+ StateGraphArgs,
8
+ StateDefinition,
9
+ CompiledStateGraph,
10
+ BinaryOperatorAggregate,
11
+ } from '@langchain/langgraph';
12
+ import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
13
+ import type {
14
+ BaseMessage,
15
+ AIMessageChunk,
16
+ SystemMessage,
17
+ } from '@langchain/core/messages';
18
+ import type { RunnableConfig, Runnable } from '@langchain/core/runnables';
4
19
  import type { ChatGenerationChunk } from '@langchain/core/outputs';
5
- import type { RunnableConfig } from '@langchain/core/runnables';
6
- import type { ToolMap, GenericTool } from '@/types/tools';
20
+ import type { GoogleAIToolType } from '@langchain/google-common';
21
+ import type { ToolMap, ToolEndEvent, GenericTool } from '@/types/tools';
22
+ import type { Providers, Callback, GraphNodeKeys } from '@/common';
23
+ import type { StandardGraph, MultiAgentGraph } from '@/graphs';
7
24
  import type { ClientOptions } from '@/types/llm';
8
- import type { Providers } from '@/common';
9
- import type { Graph } from '@/graphs';
10
- // import type { RunnableConfig } from '@langchain/core/runnables';
25
+ import type {
26
+ RunStep,
27
+ RunStepDeltaEvent,
28
+ MessageDeltaEvent,
29
+ ReasoningDeltaEvent,
30
+ } from '@/types/stream';
31
+ import type { TokenCounter } from '@/types/run';
32
+
33
+ /** Interface for bound model with stream and invoke methods */
34
+ export interface ChatModel {
35
+ stream?: (
36
+ messages: BaseMessage[],
37
+ config?: RunnableConfig
38
+ ) => Promise<AsyncIterable<AIMessageChunk>>;
39
+ invoke: (
40
+ messages: BaseMessage[],
41
+ config?: RunnableConfig
42
+ ) => Promise<AIMessageChunk>;
43
+ }
44
+
45
+ export type GraphNode = GraphNodeKeys | typeof START;
46
+ export type ClientCallback<T extends unknown[]> = (
47
+ graph: StandardGraph,
48
+ ...args: T
49
+ ) => void;
50
+
51
+ export type ClientCallbacks = {
52
+ [Callback.TOOL_ERROR]?: ClientCallback<[Error, string]>;
53
+ [Callback.TOOL_START]?: ClientCallback<unknown[]>;
54
+ [Callback.TOOL_END]?: ClientCallback<unknown[]>;
55
+ };
56
+
57
+ export type SystemCallbacks = {
58
+ [K in keyof ClientCallbacks]: ClientCallbacks[K] extends ClientCallback<
59
+ infer Args
60
+ >
61
+ ? (...args: Args) => void
62
+ : never;
63
+ };
11
64
 
12
65
  export type BaseGraphState = {
13
66
  messages: BaseMessage[];
14
- // [key: string]: unknown;
15
67
  };
16
68
 
17
- export type IState = BaseGraphState;
69
+ export type MultiAgentGraphState = BaseGraphState & {
70
+ agentMessages?: BaseMessage[];
71
+ };
18
72
 
19
- // export interface IState extends BaseGraphState {
20
- // instructions?: string;
21
- // additional_instructions?: string;
22
- // }
73
+ export type IState = BaseGraphState;
23
74
 
24
75
  export interface EventHandler {
25
- handle(event: string, data: StreamEventData | ModelEndData, metadata?: Record<string, unknown>, graph?: Graph): void;
76
+ handle(
77
+ event: string,
78
+ data:
79
+ | StreamEventData
80
+ | ModelEndData
81
+ | RunStep
82
+ | RunStepDeltaEvent
83
+ | MessageDeltaEvent
84
+ | ReasoningDeltaEvent
85
+ | { result: ToolEndEvent },
86
+ metadata?: Record<string, unknown>,
87
+ graph?: StandardGraph | MultiAgentGraph
88
+ ): void | Promise<void>;
26
89
  }
27
90
 
28
- export type GraphStateChannels<T extends BaseGraphState> = StateGraphArgs<T>['channels'];
91
+ export type GraphStateChannels<T extends BaseGraphState> =
92
+ StateGraphArgs<T>['channels'];
93
+
94
+ export type Workflow<
95
+ T extends BaseGraphState = BaseGraphState,
96
+ U extends Partial<T> = Partial<T>,
97
+ N extends string = string,
98
+ > = StateGraph<T, U, N>;
99
+
100
+ export type CompiledWorkflow<
101
+ T extends BaseGraphState = BaseGraphState,
102
+ U extends Partial<T> = Partial<T>,
103
+ N extends string = string,
104
+ > = CompiledStateGraph<T, U, N>;
105
+
106
+ export type CompiledStateWorkflow = CompiledStateGraph<
107
+ StateType<{
108
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
109
+ }>,
110
+ UpdateType<{
111
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
112
+ }>,
113
+ string,
114
+ {
115
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
116
+ },
117
+ {
118
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
119
+ },
120
+ StateDefinition
121
+ >;
122
+
123
+ export type CompiledMultiAgentWorkflow = CompiledStateGraph<
124
+ StateType<{
125
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
126
+ agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
127
+ }>,
128
+ UpdateType<{
129
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
130
+ agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
131
+ }>,
132
+ string,
133
+ {
134
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
135
+ agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
136
+ },
137
+ {
138
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
139
+ agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
140
+ },
141
+ StateDefinition
142
+ >;
29
143
 
30
- export type Workflow<T extends BaseGraphState = BaseGraphState, U extends Partial<T> = Partial<T>, N extends string = string> = StateGraph<T, U, N>;
144
+ export type CompiledAgentWorfklow = CompiledStateGraph<
145
+ {
146
+ messages: BaseMessage[];
147
+ },
148
+ {
149
+ messages?: BaseMessage[] | undefined;
150
+ },
151
+ '__start__' | `agent=${string}` | `tools=${string}`,
152
+ {
153
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
154
+ },
155
+ {
156
+ messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
157
+ },
158
+ StateDefinition,
159
+ {
160
+ [x: `agent=${string}`]: Partial<BaseGraphState>;
161
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
162
+ [x: `tools=${string}`]: any;
163
+ }
164
+ >;
31
165
 
32
- export type CompiledWorkflow<T extends BaseGraphState = BaseGraphState, U extends Partial<T> = Partial<T>, N extends string = string> = CompiledStateGraph<T, U, N>;
166
+ export type SystemRunnable =
167
+ | Runnable<
168
+ BaseMessage[],
169
+ (BaseMessage | SystemMessage)[],
170
+ RunnableConfig<Record<string, unknown>>
171
+ >
172
+ | undefined;
33
173
 
34
- export type EventStreamCallbackHandlerInput = Parameters<CompiledWorkflow['streamEvents']>[2] extends Omit<infer T, 'autoClose'> ? T : never;
174
+ /**
175
+ * Optional compile options passed to workflow.compile().
176
+ * These are intentionally untyped to avoid coupling to library internals.
177
+ */
178
+ export type CompileOptions = {
179
+ // A checkpointer instance (e.g., MemorySaver, SQL saver)
180
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
181
+ checkpointer?: any;
182
+ interruptBefore?: string[];
183
+ interruptAfter?: string[];
184
+ };
185
+
186
+ export type EventStreamCallbackHandlerInput =
187
+ Parameters<CompiledWorkflow['streamEvents']>[2] extends Omit<
188
+ infer T,
189
+ 'autoClose'
190
+ >
191
+ ? T
192
+ : never;
35
193
 
36
- export type StreamChunk = ChatGenerationChunk & {
37
- message: AIMessageChunk;
38
- } | AIMessageChunk;
194
+ export type StreamChunk =
195
+ | (ChatGenerationChunk & {
196
+ message: AIMessageChunk;
197
+ })
198
+ | AIMessageChunk;
39
199
 
40
200
  /**
41
201
  * Data associated with a StreamEvent.
42
202
  */
43
203
  export type StreamEventData = {
44
- /**
45
- * The input passed to the runnable that generated the event.
46
- * Inputs will sometimes be available at the *START* of the runnable, and
47
- * sometimes at the *END* of the runnable.
48
- * If a runnable is able to stream its inputs, then its input by definition
49
- * won't be known until the *END* of the runnable when it has finished streaming
50
- * its inputs.
51
- */
52
- input?: unknown;
53
- /**
54
- * The output of the runnable that generated the event.
55
- * Outputs will only be available at the *END* of the runnable.
56
- * For most runnables, this field can be inferred from the `chunk` field,
57
- * though there might be some exceptions for special cased runnables (e.g., like
58
- * chat models), which may return more information.
59
- */
60
- output?: unknown;
61
- /**
62
- * A streaming chunk from the output that generated the event.
63
- * chunks support addition in general, and adding them up should result
64
- * in the output of the runnable that generated the event.
65
- */
66
- chunk?: StreamChunk;
67
- /**
68
- * Runnable config for invoking other runnables within handlers.
69
- */
70
- config?: RunnableConfig;
71
- /**
72
- * Custom result from the runnable that generated the event.
73
- */
74
- result?: unknown;
204
+ /**
205
+ * The input passed to the runnable that generated the event.
206
+ * Inputs will sometimes be available at the *START* of the runnable, and
207
+ * sometimes at the *END* of the runnable.
208
+ * If a runnable is able to stream its inputs, then its input by definition
209
+ * won't be known until the *END* of the runnable when it has finished streaming
210
+ * its inputs.
211
+ */
212
+ input?: unknown;
213
+ /**
214
+ * The output of the runnable that generated the event.
215
+ * Outputs will only be available at the *END* of the runnable.
216
+ * For most runnables, this field can be inferred from the `chunk` field,
217
+ * though there might be some exceptions for special cased runnables (e.g., like
218
+ * chat models), which may return more information.
219
+ */
220
+ output?: unknown;
221
+ /**
222
+ * A streaming chunk from the output that generated the event.
223
+ * chunks support addition in general, and adding them up should result
224
+ * in the output of the runnable that generated the event.
225
+ */
226
+ chunk?: StreamChunk;
227
+ /**
228
+ * Runnable config for invoking other runnables within handlers.
229
+ */
230
+ config?: RunnableConfig;
231
+ /**
232
+ * Custom result from the runnable that generated the event.
233
+ */
234
+ result?: unknown;
235
+ /**
236
+ * Custom field to indicate the event was manually emitted, and may have been handled already
237
+ */
238
+ emitted?: boolean;
75
239
  };
76
240
 
77
241
  /**
@@ -80,54 +244,54 @@ export type StreamEventData = {
80
244
  * Schema of a streaming event which is produced from the streamEvents method.
81
245
  */
82
246
  export type StreamEvent = {
83
- /**
84
- * Event names are of the format: on_[runnable_type]_(start|stream|end).
85
- *
86
- * Runnable types are one of:
87
- * - llm - used by non chat models
88
- * - chat_model - used by chat models
89
- * - prompt -- e.g., ChatPromptTemplate
90
- * - tool -- LangChain tools
91
- * - chain - most Runnables are of this type
92
- *
93
- * Further, the events are categorized as one of:
94
- * - start - when the runnable starts
95
- * - stream - when the runnable is streaming
96
- * - end - when the runnable ends
97
- *
98
- * start, stream and end are associated with slightly different `data` payload.
99
- *
100
- * Please see the documentation for `EventData` for more details.
101
- */
102
- event: string;
103
- /** The name of the runnable that generated the event. */
104
- name: string;
105
- /**
106
- * An randomly generated ID to keep track of the execution of the given runnable.
107
- *
108
- * Each child runnable that gets invoked as part of the execution of a parent runnable
109
- * is assigned its own unique ID.
110
- */
111
- run_id: string;
112
- /**
113
- * Tags associated with the runnable that generated this event.
114
- * Tags are always inherited from parent runnables.
115
- */
116
- tags?: string[];
117
- /** Metadata associated with the runnable that generated this event. */
118
- metadata: Record<string, unknown>;
119
- /**
120
- * Event data.
121
- *
122
- * The contents of the event data depend on the event type.
123
- */
124
- data: StreamEventData;
247
+ /**
248
+ * Event names are of the format: on_[runnable_type]_(start|stream|end).
249
+ *
250
+ * Runnable types are one of:
251
+ * - llm - used by non chat models
252
+ * - chat_model - used by chat models
253
+ * - prompt -- e.g., ChatPromptTemplate
254
+ * - tool -- LangChain tools
255
+ * - chain - most Runnables are of this type
256
+ *
257
+ * Further, the events are categorized as one of:
258
+ * - start - when the runnable starts
259
+ * - stream - when the runnable is streaming
260
+ * - end - when the runnable ends
261
+ *
262
+ * start, stream and end are associated with slightly different `data` payload.
263
+ *
264
+ * Please see the documentation for `EventData` for more details.
265
+ */
266
+ event: string;
267
+ /** The name of the runnable that generated the event. */
268
+ name: string;
269
+ /**
270
+ * An randomly generated ID to keep track of the execution of the given runnable.
271
+ *
272
+ * Each child runnable that gets invoked as part of the execution of a parent runnable
273
+ * is assigned its own unique ID.
274
+ */
275
+ run_id: string;
276
+ /**
277
+ * Tags associated with the runnable that generated this event.
278
+ * Tags are always inherited from parent runnables.
279
+ */
280
+ tags?: string[];
281
+ /** Metadata associated with the runnable that generated this event. */
282
+ metadata: Record<string, unknown>;
283
+ /**
284
+ * Event data.
285
+ *
286
+ * The contents of the event data depend on the event type.
287
+ */
288
+ data: StreamEventData;
125
289
  };
126
290
 
127
291
  export type GraphConfig = {
128
- provider: string;
129
- thread_id?: string;
130
- run_id?: string;
292
+ provider: string;
293
+ thread_id?: string;
294
+ run_id?: string;
131
295
  };
132
296
 
133
297
  export type PartMetadata = {
@@ -138,18 +302,71 @@ export type PartMetadata = {
138
302
  output?: string;
139
303
  };
140
304
 
141
- export type ModelEndData = StreamEventData & { output: AIMessageChunk | undefined } | undefined;
142
-
305
+ export type ModelEndData =
306
+ | (StreamEventData & { output: AIMessageChunk | undefined })
307
+ | undefined;
308
+ export type GraphTools = GenericTool[] | BindToolsInput[] | GoogleAIToolType[];
143
309
  export type StandardGraphInput = {
144
310
  runId?: string;
311
+ signal?: AbortSignal;
312
+ agents: AgentInputs[];
313
+ tokenCounter?: TokenCounter;
314
+ indexTokenCountMap?: Record<string, number>;
315
+ };
316
+
317
+ export type GraphEdge = {
318
+ /** Agent ID, use a list for multiple sources */
319
+ from: string | string[];
320
+ /** Agent ID, use a list for multiple destinations */
321
+ to: string | string[];
322
+ description?: string;
323
+ /** Can return boolean or specific destination(s) */
324
+ condition?: (state: BaseGraphState) => boolean | string | string[];
325
+ /** 'handoff' creates tools for dynamic routing, 'direct' creates direct edges, which also allow parallel execution */
326
+ edgeType?: 'handoff' | 'direct';
327
+ /**
328
+ * For direct edges: Optional prompt to add when transitioning through this edge.
329
+ * String prompts can include variables like {results} which will be replaced with
330
+ * messages from startIndex onwards. When {results} is used, excludeResults defaults to true.
331
+ *
332
+ * For handoff edges: Description for the input parameter that the handoff tool accepts,
333
+ * allowing the supervisor to pass specific instructions/context to the transferred agent.
334
+ */
335
+ prompt?:
336
+ | string
337
+ | ((
338
+ messages: BaseMessage[],
339
+ runStartIndex: number
340
+ ) => string | Promise<string> | undefined);
341
+ /**
342
+ * When true, excludes messages from startIndex when adding prompt.
343
+ * Automatically set to true when {results} variable is used in prompt.
344
+ */
345
+ excludeResults?: boolean;
346
+ /**
347
+ * For handoff edges: Customizes the parameter name for the handoff input.
348
+ * Defaults to "instructions" if not specified.
349
+ * Only applies when prompt is provided for handoff edges.
350
+ */
351
+ promptKey?: string;
352
+ };
353
+
354
+ export type MultiAgentGraphInput = StandardGraphInput & {
355
+ edges: GraphEdge[];
356
+ };
357
+
358
+ export interface AgentInputs {
359
+ agentId: string;
145
360
  toolEnd?: boolean;
146
361
  toolMap?: ToolMap;
362
+ tools?: GraphTools;
147
363
  provider: Providers;
148
- signal?: AbortSignal;
149
364
  instructions?: string;
150
365
  streamBuffer?: number;
151
- tools?: GenericTool[];
152
- clientOptions: ClientOptions;
366
+ maxContextTokens?: number;
367
+ clientOptions?: ClientOptions;
153
368
  additional_instructions?: string;
154
369
  reasoningKey?: 'reasoning_content' | 'reasoning';
155
- };
370
+ /** Format content blocks as strings (for legacy compatibility i.e. Ollama/Azure Serverless) */
371
+ useLegacyContent?: boolean;
372
+ }
package/llm.ts CHANGED
@@ -1,10 +1,7 @@
1
1
  // src/types/llm.ts
2
2
  import { ChatOllama } from '@langchain/ollama';
3
- import { ChatAnthropic } from '@langchain/anthropic';
4
3
  import { ChatMistralAI } from '@langchain/mistralai';
5
4
  import { ChatBedrockConverse } from '@langchain/aws';
6
- import { ChatVertexAI } from '@langchain/google-vertexai';
7
- import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
8
5
  import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
9
6
  import type {
10
7
  BindToolsInput,
@@ -18,11 +15,13 @@ import type {
18
15
  } from '@langchain/openai';
19
16
  import type { BedrockChatFields } from '@langchain/community/chat_models/bedrock/web';
20
17
  import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
18
+ import type { GeminiGenerationConfig } from '@langchain/google-common';
21
19
  import type { ChatVertexAIInput } from '@langchain/google-vertexai';
22
20
  import type { ChatDeepSeekCallOptions } from '@langchain/deepseek';
23
21
  import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
24
22
  import type { ChatBedrockConverseInput } from '@langchain/aws';
25
23
  import type { ChatMistralAIInput } from '@langchain/mistralai';
24
+ import type { RequestOptions } from '@google/generative-ai';
26
25
  import type { StructuredTool } from '@langchain/core/tools';
27
26
  import type { AnthropicInput } from '@langchain/anthropic';
28
27
  import type { Runnable } from '@langchain/core/runnables';
@@ -30,12 +29,15 @@ import type { ChatOllamaInput } from '@langchain/ollama';
30
29
  import type { OpenAI as OpenAIClient } from 'openai';
31
30
  import type { ChatXAIInput } from '@langchain/xai';
32
31
  import {
33
- ChatXAI,
34
- ChatOpenAI,
35
- ChatDeepSeek,
36
32
  AzureChatOpenAI,
33
+ ChatDeepSeek,
34
+ ChatOpenAI,
35
+ ChatXAI,
37
36
  } from '@/llm/openai';
37
+ import { CustomChatGoogleGenerativeAI } from '@/llm/google';
38
+ import { CustomAnthropic } from '@/llm/anthropic';
38
39
  import { ChatOpenRouter } from '@/llm/openrouter';
40
+ import { ChatVertexAI } from '@/llm/vertexai';
39
41
  import { Providers } from '@/common';
40
42
 
41
43
  export type AzureClientOptions = Partial<OpenAIChatInput> &
@@ -60,14 +62,19 @@ export type OpenAIClientOptions = ChatOpenAIFields;
60
62
  export type OllamaClientOptions = ChatOllamaInput;
61
63
  export type AnthropicClientOptions = AnthropicInput;
62
64
  export type MistralAIClientOptions = ChatMistralAIInput;
63
- export type VertexAIClientOptions = ChatVertexAIInput;
65
+ export type VertexAIClientOptions = ChatVertexAIInput & {
66
+ includeThoughts?: boolean;
67
+ };
64
68
  export type BedrockClientOptions = BedrockChatFields;
65
69
  export type BedrockAnthropicInput = ChatBedrockConverseInput & {
66
70
  additionalModelRequestFields?: ChatBedrockConverseInput['additionalModelRequestFields'] &
67
71
  AnthropicReasoning;
68
72
  };
69
73
  export type BedrockConverseClientOptions = ChatBedrockConverseInput;
70
- export type GoogleClientOptions = GoogleGenerativeAIChatInput;
74
+ export type GoogleClientOptions = GoogleGenerativeAIChatInput & {
75
+ customHeaders?: RequestOptions['customHeaders'];
76
+ thinkingConfig?: GeminiGenerationConfig['thinkingConfig'];
77
+ };
71
78
  export type DeepSeekClientOptions = ChatDeepSeekCallOptions;
72
79
  export type XAIClientOptions = ChatXAIInput;
73
80
 
@@ -84,9 +91,16 @@ export type ClientOptions =
84
91
  | DeepSeekClientOptions
85
92
  | XAIClientOptions;
86
93
 
87
- export type LLMConfig = {
94
+ export type SharedLLMConfig = {
88
95
  provider: Providers;
89
- } & ClientOptions;
96
+ _lc_stream_delay?: number;
97
+ };
98
+
99
+ export type LLMConfig = SharedLLMConfig &
100
+ ClientOptions & {
101
+ /** Optional provider fallbacks in order of attempt */
102
+ fallbacks?: Array<{ provider: Providers; clientOptions?: ClientOptions }>;
103
+ };
90
104
 
91
105
  export type ProviderOptionsMap = {
92
106
  [Providers.AZURE]: AzureClientOptions;
@@ -111,13 +125,13 @@ export type ChatModelMap = {
111
125
  [Providers.AZURE]: AzureChatOpenAI;
112
126
  [Providers.DEEPSEEK]: ChatDeepSeek;
113
127
  [Providers.VERTEXAI]: ChatVertexAI;
114
- [Providers.ANTHROPIC]: ChatAnthropic;
128
+ [Providers.ANTHROPIC]: CustomAnthropic;
115
129
  [Providers.MISTRALAI]: ChatMistralAI;
116
130
  [Providers.MISTRAL]: ChatMistralAI;
117
131
  [Providers.OPENROUTER]: ChatOpenRouter;
118
132
  [Providers.BEDROCK_LEGACY]: BedrockChat;
119
133
  [Providers.BEDROCK]: ChatBedrockConverse;
120
- [Providers.GOOGLE]: ChatGoogleGenerativeAI;
134
+ [Providers.GOOGLE]: CustomChatGoogleGenerativeAI;
121
135
  };
122
136
 
123
137
  export type ChatModelConstructorMap = {
package/messages.ts ADDED
@@ -0,0 +1,4 @@
1
+ import type Anthropic from '@anthropic-ai/sdk';
2
+ import type { BaseMessage } from '@langchain/core/messages';
3
+ export type AnthropicMessages = Array<AnthropicMessage | BaseMessage>;
4
+ export type AnthropicMessage = Anthropic.MessageParam;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents-types",
3
- "version": "2.4.322",
3
+ "version": "3.0.00",
4
4
  "description": "Type definitions for @librechat/agents",
5
5
  "types": "index.d.ts",
6
6
  "scripts": {
package/run.ts CHANGED
@@ -7,7 +7,6 @@ import type {
7
7
  BaseCallbackHandler,
8
8
  CallbackHandlerMethods,
9
9
  } from '@langchain/core/callbacks/base';
10
- import type * as graph from '@/graphs/Graph';
11
10
  import type * as s from '@/types/stream';
12
11
  import type * as e from '@/common/enum';
13
12
  import type * as g from '@/types/graph';
@@ -16,21 +15,58 @@ import type * as l from '@/types/llm';
16
15
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
17
16
  export type ZodObjectAny = z.ZodObject<any, any, any, any>;
18
17
  export type BaseGraphConfig = {
19
- type?: 'standard';
20
18
  llmConfig: l.LLMConfig;
21
19
  provider?: e.Providers;
22
20
  clientOptions?: l.ClientOptions;
21
+ /** Optional compile options for workflow.compile() */
22
+ compileOptions?: g.CompileOptions;
23
23
  };
24
- export type StandardGraphConfig = BaseGraphConfig &
25
- Omit<g.StandardGraphInput, 'provider' | 'clientOptions'>;
24
+ export type LegacyGraphConfig = BaseGraphConfig & {
25
+ type?: 'standard';
26
+ } & Omit<g.StandardGraphInput, 'provider' | 'clientOptions' | 'agents'> &
27
+ Omit<g.AgentInputs, 'provider' | 'clientOptions' | 'agentId'>;
28
+
29
+ /* Supervised graph (opt-in) */
30
+ export type SupervisedGraphConfig = BaseGraphConfig & {
31
+ type: 'supervised';
32
+ /** Enable supervised router; when false, fall back to standard loop */
33
+ routerEnabled?: boolean;
34
+ /** Table-driven routing policy per stage */
35
+ routingPolicies?: Array<{
36
+ stage: string;
37
+ agents?: string[];
38
+ model?: e.Providers;
39
+ parallel?: boolean;
40
+ /** Optional simple condition on content/tools */
41
+ when?:
42
+ | 'always'
43
+ | 'has_tools'
44
+ | 'no_tools'
45
+ | { includes?: string[]; excludes?: string[] };
46
+ }>;
47
+ /** Opt-in feature flags */
48
+ featureFlags?: {
49
+ multi_model_routing?: boolean;
50
+ fan_out?: boolean;
51
+ fan_out_retries?: number;
52
+ fan_out_backoff_ms?: number;
53
+ fan_out_concurrency?: number;
54
+ };
55
+ /** Optional per-stage model configs */
56
+ models?: Record<string, l.LLMConfig>;
57
+ } & Omit<g.StandardGraphInput, 'provider' | 'clientOptions'>;
26
58
 
27
59
  export type RunTitleOptions = {
28
60
  inputText: string;
61
+ provider: e.Providers;
29
62
  contentParts: (s.MessageContentComplex | undefined)[];
30
63
  titlePrompt?: string;
31
64
  skipLanguage?: boolean;
32
65
  clientOptions?: l.ClientOptions;
33
66
  chainOptions?: Partial<RunnableConfig> | undefined;
67
+ omitOptions?: Set<string>;
68
+ titleMethod?: e.TitleMethod;
69
+ titlePromptTemplate?: string;
34
70
  };
35
71
 
36
72
  export interface AgentStateChannels {
@@ -60,14 +96,25 @@ export type TaskManagerGraphConfig = {
60
96
  supervisorConfig: { systemPrompt?: string; llmConfig: l.LLMConfig };
61
97
  };
62
98
 
99
+ export type MultiAgentGraphConfig = {
100
+ type: 'multi-agent';
101
+ compileOptions?: g.CompileOptions;
102
+ agents: g.AgentInputs[];
103
+ edges: g.GraphEdge[];
104
+ };
105
+
106
+ export type StandardGraphConfig = Omit<
107
+ MultiAgentGraphConfig,
108
+ 'edges' | 'type'
109
+ > & { type?: 'standard'; signal?: AbortSignal };
110
+
63
111
  export type RunConfig = {
64
112
  runId: string;
65
- graphConfig:
66
- | StandardGraphConfig
67
- | CollaborativeGraphConfig
68
- | TaskManagerGraphConfig;
113
+ graphConfig: LegacyGraphConfig | StandardGraphConfig | MultiAgentGraphConfig;
69
114
  customHandlers?: Record<string, g.EventHandler>;
70
115
  returnContent?: boolean;
116
+ tokenCounter?: TokenCounter;
117
+ indexTokenCountMap?: Record<string, number>;
71
118
  };
72
119
 
73
120
  export type ProvidedCallbacks =
@@ -76,10 +123,6 @@ export type ProvidedCallbacks =
76
123
 
77
124
  export type TokenCounter = (message: BaseMessage) => number;
78
125
  export type EventStreamOptions = {
79
- callbacks?: graph.ClientCallbacks;
126
+ callbacks?: g.ClientCallbacks;
80
127
  keepContent?: boolean;
81
- /* Context Management */
82
- maxContextTokens?: number;
83
- tokenCounter?: TokenCounter;
84
- indexTokenCountMap?: Record<string, number>;
85
128
  };
package/stream.ts CHANGED
@@ -8,6 +8,8 @@ import type {
8
8
  } from '@langchain/core/messages';
9
9
  import type { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';
10
10
  import type { LLMResult, Generation } from '@langchain/core/outputs';
11
+ import type { AnthropicContentBlock } from '@/llm/anthropic/types';
12
+ import type { Command } from '@langchain/langgraph';
11
13
  import type { ToolEndEvent } from '@/types/tools';
12
14
  import { StepTypes, ContentTypes, GraphEvents } from '@/common/enum';
13
15
 
@@ -103,7 +105,7 @@ export type MessageCreationDetails = {
103
105
 
104
106
  export type ToolEndData = {
105
107
  input: string | Record<string, unknown>;
106
- output?: ToolMessage;
108
+ output?: ToolMessage | Command;
107
109
  };
108
110
  export type ToolErrorData = {
109
111
  id: string;
@@ -251,6 +253,12 @@ export type ReasoningContentText = {
251
253
  think: string;
252
254
  };
253
255
 
256
+ /** Vertex AI / Google Common - Reasoning Content Block Format */
257
+ export type GoogleReasoningContentText = {
258
+ type: ContentTypes.REASONING;
259
+ reasoning: string;
260
+ };
261
+
254
262
  /** Anthropic's Reasoning Content Block Format */
255
263
  export type ThinkingContentText = {
256
264
  type: ContentTypes.THINKING;
@@ -293,7 +301,20 @@ export type ToolCallContent = {
293
301
  tool_call?: ToolCallPart;
294
302
  };
295
303
 
304
+ export type ToolResultContent = {
305
+ content:
306
+ | string
307
+ | Record<string, unknown>
308
+ | Array<string | Record<string, unknown>>
309
+ | AnthropicContentBlock[];
310
+ type: 'tool_result' | 'web_search_result' | 'web_search_tool_result';
311
+ tool_use_id?: string;
312
+ input?: string | Record<string, unknown>;
313
+ index?: number;
314
+ };
315
+
296
316
  export type MessageContentComplex = (
317
+ | ToolResultContent
297
318
  | ThinkingContentText
298
319
  | AgentUpdate
299
320
  | ToolCallContent
package/tools.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  // src/types/tools.ts
2
- import type { RunnableToolLike } from '@langchain/core/runnables';
3
2
  import type { StructuredToolInterface } from '@langchain/core/tools';
3
+ import type { RunnableToolLike } from '@langchain/core/runnables';
4
4
  import type { ToolCall } from '@langchain/core/messages/tool';
5
5
  import type { ToolErrorData } from './stream';
6
6
  import { EnvVar } from '@/common';
@@ -13,9 +13,10 @@ export type CustomToolCall = {
13
13
  id?: string;
14
14
  type?: 'tool_call';
15
15
  output?: string;
16
- }
16
+ };
17
17
 
18
18
  export type GenericTool = StructuredToolInterface | RunnableToolLike;
19
+
19
20
  export type ToolMap = Map<string, GenericTool>;
20
21
  export type ToolRefs = {
21
22
  tools: GenericTool[];
@@ -30,7 +31,10 @@ export type ToolNodeOptions = {
30
31
  handleToolErrors?: boolean;
31
32
  loadRuntimeTools?: ToolRefGenerator;
32
33
  toolCallStepIds?: Map<string, string>;
33
- errorHandler?: (data: ToolErrorData, metadata?: Record<string, unknown>) => void
34
+ errorHandler?: (
35
+ data: ToolErrorData,
36
+ metadata?: Record<string, unknown>
37
+ ) => void;
34
38
  };
35
39
 
36
40
  export type ToolNodeConstructorParams = ToolRefs & ToolNodeOptions;
@@ -50,13 +54,15 @@ export type CodeEnvFile = {
50
54
  session_id: string;
51
55
  };
52
56
 
53
- export type CodeExecutionToolParams = undefined | {
54
- session_id?: string;
55
- user_id?: string;
56
- apiKey?: string;
57
- files?: CodeEnvFile[];
58
- [EnvVar.CODE_API_KEY]?: string;
59
- }
57
+ export type CodeExecutionToolParams =
58
+ | undefined
59
+ | {
60
+ session_id?: string;
61
+ user_id?: string;
62
+ apiKey?: string;
63
+ files?: CodeEnvFile[];
64
+ [EnvVar.CODE_API_KEY]?: string;
65
+ };
60
66
 
61
67
  export type FileRef = {
62
68
  id: string;