@librechat/agents 2.4.40 → 2.4.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -417,21 +417,22 @@ export class StandardGraph extends Graph<t.BaseGraphState, GraphNode> {
417
417
  }
418
418
 
419
419
  getNewModel({
420
- clientOptions = {},
421
- omitOriginalOptions,
420
+ provider,
421
+ clientOptions,
422
+ omitOptions,
422
423
  }: {
424
+ provider: Providers;
423
425
  clientOptions?: t.ClientOptions;
424
- omitOriginalOptions?: Set<string>;
426
+ omitOptions?: Set<string>;
425
427
  }): t.ChatModelInstance {
426
- const ChatModelClass = getChatModelClass(this.provider);
427
- const _options = omitOriginalOptions
428
+ const ChatModelClass = getChatModelClass(provider);
429
+ const options = omitOptions
428
430
  ? Object.fromEntries(
429
- Object.entries(this.clientOptions).filter(
430
- ([key]) => !omitOriginalOptions.has(key)
431
+ Object.entries(clientOptions ?? this.clientOptions).filter(
432
+ ([key]) => !omitOptions.has(key)
431
433
  )
432
434
  )
433
- : this.clientOptions;
434
- const options = Object.assign(_options, clientOptions);
435
+ : (clientOptions ?? this.clientOptions);
435
436
  return new ChatModelClass(options);
436
437
  }
437
438
 
@@ -277,6 +277,25 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
277
277
  }
278
278
 
279
279
  export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
280
+ override bindTools(
281
+ tools: ChatOpenAIToolType[],
282
+ kwargs?: Partial<t.ChatOpenAICallOptions>
283
+ ): Runnable<BaseLanguageModelInput, AIMessageChunk, t.ChatOpenAICallOptions> {
284
+ let strict: boolean | undefined;
285
+ if (kwargs?.strict !== undefined) {
286
+ strict = kwargs.strict;
287
+ } else if (this.supportsStrictToolCalling !== undefined) {
288
+ strict = this.supportsStrictToolCalling;
289
+ }
290
+ return this.withConfig({
291
+ tools: tools.map((tool) =>
292
+ isBuiltInTool(tool)
293
+ ? tool
294
+ : _convertChatOpenAIToolTypeToOpenAITool(tool, { strict })
295
+ ),
296
+ ...kwargs,
297
+ } as Partial<t.ChatOpenAICallOptions>);
298
+ }
280
299
  public get exposedClient(): CustomOpenAIClient {
281
300
  return this.client;
282
301
  }
package/src/run.ts CHANGED
@@ -235,6 +235,7 @@ export class Run<T extends t.BaseGraphState> {
235
235
  }
236
236
 
237
237
  async generateTitle({
238
+ provider,
238
239
  inputText,
239
240
  contentParts,
240
241
  titlePrompt,
@@ -255,8 +256,9 @@ export class Run<T extends t.BaseGraphState> {
255
256
  await convoTemplate.invoke({ input: inputText, output: response })
256
257
  ).value;
257
258
  const model = this.Graph?.getNewModel({
259
+ provider,
258
260
  clientOptions,
259
- omitOriginalOptions: new Set([
261
+ omitOptions: new Set([
260
262
  'clientOptions',
261
263
  'streaming',
262
264
  'stream',
@@ -270,7 +272,7 @@ export class Run<T extends t.BaseGraphState> {
270
272
  return { language: '', title: '' };
271
273
  }
272
274
  if (
273
- isOpenAILike(this.provider) &&
275
+ isOpenAILike(provider) &&
274
276
  (model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)
275
277
  ) {
276
278
  model.temperature = (clientOptions as t.OpenAIClientOptions | undefined)
@@ -4,41 +4,45 @@ import { hideBin } from 'yargs/helpers';
4
4
  import { llmConfigs } from '@/utils/llmConfig';
5
5
  import { Providers } from '@/common';
6
6
 
7
- export async function getArgs(): Promise<{ userName: string; location: string; provider: string; currentDate: string; }> {
7
+ export async function getArgs(): Promise<{
8
+ userName: string;
9
+ location: string;
10
+ provider: Providers;
11
+ currentDate: string;
12
+ }> {
8
13
  const argv = yargs(hideBin(process.argv))
9
14
  .option('name', {
10
15
  alias: 'n',
11
16
  type: 'string',
12
17
  description: 'User name',
13
- default: 'Jo'
18
+ default: 'Jo',
14
19
  })
15
20
  .option('location', {
16
21
  alias: 'l',
17
22
  type: 'string',
18
23
  description: 'User location',
19
- default: 'New York'
24
+ default: 'New York',
20
25
  })
21
26
  .option('provider', {
22
27
  alias: 'p',
23
28
  type: 'string',
24
29
  description: 'LLM provider',
25
30
  choices: Object.keys(llmConfigs),
26
- default: Providers.OPENAI
31
+ default: Providers.OPENAI,
27
32
  })
28
33
  .help()
29
- .alias('help', 'h')
30
- .argv;
34
+ .alias('help', 'h').argv;
31
35
 
32
36
  const args = await argv;
33
37
  const userName = args.name as string;
34
38
  const location = args.location as string;
35
- const provider = args.provider as string;
39
+ const provider = args.provider as Providers;
36
40
  const currentDate = new Date().toLocaleString();
37
41
 
38
42
  return {
39
43
  userName,
40
44
  location,
41
45
  provider,
42
- currentDate
46
+ currentDate,
43
47
  };
44
48
  }
@@ -6,7 +6,11 @@ import { TavilySearchResults } from '@langchain/community/tools/tavily_search';
6
6
  import type { RunnableConfig } from '@langchain/core/runnables';
7
7
  import type * as t from '@/types';
8
8
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
9
- import { ToolEndHandler, ModelEndHandler, createMetadataAggregator } from '@/events';
9
+ import {
10
+ ToolEndHandler,
11
+ ModelEndHandler,
12
+ createMetadataAggregator,
13
+ } from '@/events';
10
14
  import { getLLMConfig } from '@/utils/llmConfig';
11
15
  import { getArgs } from '@/scripts/args';
12
16
  import { GraphEvents } from '@/common';
@@ -23,38 +27,57 @@ async function testCodeExecution(): Promise<void> {
23
27
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
24
28
  [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
25
29
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
26
- handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
30
+ handle: (
31
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
32
+ data: t.StreamEventData
33
+ ): void => {
27
34
  console.log('====== ON_RUN_STEP_COMPLETED ======');
28
35
  console.dir(data, { depth: null });
29
- aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent } });
30
- }
36
+ aggregateContent({
37
+ event,
38
+ data: data as unknown as { result: t.ToolEndEvent },
39
+ });
40
+ },
31
41
  },
32
42
  [GraphEvents.ON_RUN_STEP]: {
33
- handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData): void => {
43
+ handle: (
44
+ event: GraphEvents.ON_RUN_STEP,
45
+ data: t.StreamEventData
46
+ ): void => {
34
47
  console.log('====== ON_RUN_STEP ======');
35
48
  console.dir(data, { depth: null });
36
49
  aggregateContent({ event, data: data as t.RunStep });
37
- }
50
+ },
38
51
  },
39
52
  [GraphEvents.ON_RUN_STEP_DELTA]: {
40
- handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
53
+ handle: (
54
+ event: GraphEvents.ON_RUN_STEP_DELTA,
55
+ data: t.StreamEventData
56
+ ): void => {
41
57
  console.log('====== ON_RUN_STEP_DELTA ======');
42
58
  console.dir(data, { depth: null });
43
59
  aggregateContent({ event, data: data as t.RunStepDeltaEvent });
44
- }
60
+ },
45
61
  },
46
62
  [GraphEvents.ON_MESSAGE_DELTA]: {
47
- handle: (event: GraphEvents.ON_MESSAGE_DELTA, data: t.StreamEventData): void => {
63
+ handle: (
64
+ event: GraphEvents.ON_MESSAGE_DELTA,
65
+ data: t.StreamEventData
66
+ ): void => {
48
67
  console.log('====== ON_MESSAGE_DELTA ======');
49
68
  console.dir(data, { depth: null });
50
69
  aggregateContent({ event, data: data as t.MessageDeltaEvent });
51
- }
70
+ },
52
71
  },
53
72
  [GraphEvents.TOOL_START]: {
54
- handle: (_event: string, data: t.StreamEventData, metadata?: Record<string, unknown>): void => {
73
+ handle: (
74
+ _event: string,
75
+ data: t.StreamEventData,
76
+ metadata?: Record<string, unknown>
77
+ ): void => {
55
78
  console.log('====== TOOL_START ======');
56
79
  console.dir(data, { depth: null });
57
- }
80
+ },
58
81
  },
59
82
  };
60
83
 
@@ -66,14 +89,19 @@ async function testCodeExecution(): Promise<void> {
66
89
  type: 'standard',
67
90
  llmConfig,
68
91
  tools: [new TavilySearchResults(), createCodeExecutionTool()],
69
- instructions: 'You are a friendly AI assistant with coding capabilities. Always address the user by their name.',
92
+ instructions:
93
+ 'You are a friendly AI assistant with coding capabilities. Always address the user by their name.',
70
94
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
71
95
  },
72
96
  returnContent: true,
73
97
  customHandlers,
74
98
  });
75
99
 
76
- const config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string } = {
100
+ const config: Partial<RunnableConfig> & {
101
+ version: 'v1' | 'v2';
102
+ run_id?: string;
103
+ streamMode: string;
104
+ } = {
77
105
  configurable: {
78
106
  provider,
79
107
  thread_id: 'conversation-num-1',
@@ -152,12 +180,15 @@ async function testCodeExecution(): Promise<void> {
152
180
 
153
181
  const { handleLLMEnd, collected } = createMetadataAggregator();
154
182
  const titleResult = await run.generateTitle({
183
+ provider,
155
184
  inputText: userMessage2,
156
185
  contentParts,
157
186
  chainOptions: {
158
- callbacks: [{
159
- handleLLMEnd,
160
- }],
187
+ callbacks: [
188
+ {
189
+ handleLLMEnd,
190
+ },
191
+ ],
161
192
  },
162
193
  });
163
194
  console.log('Generated Title:', titleResult);
@@ -180,4 +211,4 @@ testCodeExecution().catch((err) => {
180
211
  console.log('Conversation history:');
181
212
  console.dir(conversationHistory, { depth: null });
182
213
  process.exit(1);
183
- });
214
+ });
@@ -5,7 +5,11 @@ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
5
5
  import type { RunnableConfig } from '@langchain/core/runnables';
6
6
  import type * as t from '@/types';
7
7
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
8
- import { ToolEndHandler, ModelEndHandler, createMetadataAggregator } from '@/events';
8
+ import {
9
+ ToolEndHandler,
10
+ ModelEndHandler,
11
+ createMetadataAggregator,
12
+ } from '@/events';
9
13
  import { getLLMConfig } from '@/utils/llmConfig';
10
14
  import { getArgs } from '@/scripts/args';
11
15
  import { GraphEvents } from '@/common';
@@ -22,38 +26,57 @@ async function testCodeExecution(): Promise<void> {
22
26
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
23
27
  [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
24
28
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
25
- handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
29
+ handle: (
30
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
31
+ data: t.StreamEventData
32
+ ): void => {
26
33
  console.log('====== ON_RUN_STEP_COMPLETED ======');
27
34
  console.dir(data, { depth: null });
28
- aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent } });
29
- }
35
+ aggregateContent({
36
+ event,
37
+ data: data as unknown as { result: t.ToolEndEvent },
38
+ });
39
+ },
30
40
  },
31
41
  [GraphEvents.ON_RUN_STEP]: {
32
- handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData): void => {
42
+ handle: (
43
+ event: GraphEvents.ON_RUN_STEP,
44
+ data: t.StreamEventData
45
+ ): void => {
33
46
  console.log('====== ON_RUN_STEP ======');
34
47
  console.dir(data, { depth: null });
35
48
  aggregateContent({ event, data: data as t.RunStep });
36
- }
49
+ },
37
50
  },
38
51
  [GraphEvents.ON_RUN_STEP_DELTA]: {
39
- handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
52
+ handle: (
53
+ event: GraphEvents.ON_RUN_STEP_DELTA,
54
+ data: t.StreamEventData
55
+ ): void => {
40
56
  console.log('====== ON_RUN_STEP_DELTA ======');
41
57
  console.dir(data, { depth: null });
42
58
  aggregateContent({ event, data: data as t.RunStepDeltaEvent });
43
- }
59
+ },
44
60
  },
45
61
  [GraphEvents.ON_MESSAGE_DELTA]: {
46
- handle: (event: GraphEvents.ON_MESSAGE_DELTA, data: t.StreamEventData): void => {
62
+ handle: (
63
+ event: GraphEvents.ON_MESSAGE_DELTA,
64
+ data: t.StreamEventData
65
+ ): void => {
47
66
  console.log('====== ON_MESSAGE_DELTA ======');
48
67
  console.dir(data, { depth: null });
49
68
  aggregateContent({ event, data: data as t.MessageDeltaEvent });
50
- }
69
+ },
51
70
  },
52
71
  [GraphEvents.TOOL_START]: {
53
- handle: (_event: string, data: t.StreamEventData, metadata?: Record<string, unknown>): void => {
72
+ handle: (
73
+ _event: string,
74
+ data: t.StreamEventData,
75
+ metadata?: Record<string, unknown>
76
+ ): void => {
54
77
  console.log('====== TOOL_START ======');
55
78
  console.dir(data, { depth: null });
56
- }
79
+ },
57
80
  },
58
81
  };
59
82
 
@@ -65,14 +88,19 @@ async function testCodeExecution(): Promise<void> {
65
88
  type: 'standard',
66
89
  llmConfig,
67
90
  tools: [createCodeExecutionTool()],
68
- instructions: 'You are a friendly AI assistant with coding capabilities. Always address the user by their name.',
91
+ instructions:
92
+ 'You are a friendly AI assistant with coding capabilities. Always address the user by their name.',
69
93
  additional_instructions: `The user's name is ${userName} and they are located in ${location}. The current date is ${currentDate}.`,
70
94
  },
71
95
  returnContent: true,
72
96
  customHandlers,
73
97
  });
74
98
 
75
- const config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string } = {
99
+ const config: Partial<RunnableConfig> & {
100
+ version: 'v1' | 'v2';
101
+ run_id?: string;
102
+ streamMode: string;
103
+ } = {
76
104
  configurable: {
77
105
  provider,
78
106
  thread_id: 'conversation-num-1',
@@ -131,12 +159,15 @@ async function testCodeExecution(): Promise<void> {
131
159
 
132
160
  const { handleLLMEnd, collected } = createMetadataAggregator();
133
161
  const titleResult = await run.generateTitle({
162
+ provider,
134
163
  inputText: userMessage2,
135
164
  contentParts,
136
165
  chainOptions: {
137
- callbacks: [{
138
- handleLLMEnd,
139
- }],
166
+ callbacks: [
167
+ {
168
+ handleLLMEnd,
169
+ },
170
+ ],
140
171
  },
141
172
  });
142
173
  console.log('Generated Title:', titleResult);
@@ -5,14 +5,17 @@ import { HumanMessage, AIMessage, BaseMessage } from '@langchain/core/messages';
5
5
  import type { RunnableConfig } from '@langchain/core/runnables';
6
6
  import type * as t from '@/types';
7
7
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
8
- import { ToolEndHandler, ModelEndHandler, createMetadataAggregator } from '@/events';
8
+ import {
9
+ ToolEndHandler,
10
+ ModelEndHandler,
11
+ createMetadataAggregator,
12
+ } from '@/events';
9
13
  import { fetchRandomImageTool, fetchRandomImageURL } from '@/tools/example';
10
14
  import { getLLMConfig } from '@/utils/llmConfig';
11
15
  import { getArgs } from '@/scripts/args';
12
16
  import { GraphEvents } from '@/common';
13
17
  import { Run } from '@/run';
14
18
 
15
-
16
19
  const conversationHistory: BaseMessage[] = [];
17
20
 
18
21
  async function testCodeExecution(): Promise<void> {
@@ -23,38 +26,57 @@ async function testCodeExecution(): Promise<void> {
23
26
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
24
27
  [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
25
28
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
26
- handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
29
+ handle: (
30
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
31
+ data: t.StreamEventData
32
+ ): void => {
27
33
  console.log('====== ON_RUN_STEP_COMPLETED ======');
28
34
  console.dir(data, { depth: null });
29
- aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent } });
30
- }
35
+ aggregateContent({
36
+ event,
37
+ data: data as unknown as { result: t.ToolEndEvent },
38
+ });
39
+ },
31
40
  },
32
41
  [GraphEvents.ON_RUN_STEP]: {
33
- handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData): void => {
42
+ handle: (
43
+ event: GraphEvents.ON_RUN_STEP,
44
+ data: t.StreamEventData
45
+ ): void => {
34
46
  console.log('====== ON_RUN_STEP ======');
35
47
  console.dir(data, { depth: null });
36
48
  aggregateContent({ event, data: data as t.RunStep });
37
- }
49
+ },
38
50
  },
39
51
  [GraphEvents.ON_RUN_STEP_DELTA]: {
40
- handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
52
+ handle: (
53
+ event: GraphEvents.ON_RUN_STEP_DELTA,
54
+ data: t.StreamEventData
55
+ ): void => {
41
56
  console.log('====== ON_RUN_STEP_DELTA ======');
42
57
  console.dir(data, { depth: null });
43
58
  aggregateContent({ event, data: data as t.RunStepDeltaEvent });
44
- }
59
+ },
45
60
  },
46
61
  [GraphEvents.ON_MESSAGE_DELTA]: {
47
- handle: (event: GraphEvents.ON_MESSAGE_DELTA, data: t.StreamEventData): void => {
62
+ handle: (
63
+ event: GraphEvents.ON_MESSAGE_DELTA,
64
+ data: t.StreamEventData
65
+ ): void => {
48
66
  console.log('====== ON_MESSAGE_DELTA ======');
49
67
  console.dir(data, { depth: null });
50
68
  aggregateContent({ event, data: data as t.MessageDeltaEvent });
51
- }
69
+ },
52
70
  },
53
71
  [GraphEvents.TOOL_START]: {
54
- handle: (_event: string, data: t.StreamEventData, metadata?: Record<string, unknown>): void => {
72
+ handle: (
73
+ _event: string,
74
+ data: t.StreamEventData,
75
+ metadata?: Record<string, unknown>
76
+ ): void => {
55
77
  console.log('====== TOOL_START ======');
56
78
  console.dir(data, { depth: null });
57
- }
79
+ },
58
80
  },
59
81
  };
60
82
 
@@ -67,14 +89,19 @@ async function testCodeExecution(): Promise<void> {
67
89
  llmConfig,
68
90
  tools: [fetchRandomImageTool],
69
91
  // tools: [fetchRandomImageURL],
70
- instructions: 'You are a friendly AI assistant with internet capabilities. Always address the user by their name.',
92
+ instructions:
93
+ 'You are a friendly AI assistant with internet capabilities. Always address the user by their name.',
71
94
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
72
95
  },
73
96
  returnContent: true,
74
97
  customHandlers,
75
98
  });
76
99
 
77
- const config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string } = {
100
+ const config: Partial<RunnableConfig> & {
101
+ version: 'v1' | 'v2';
102
+ run_id?: string;
103
+ streamMode: string;
104
+ } = {
78
105
  configurable: {
79
106
  provider,
80
107
  thread_id: 'conversation-num-1',
@@ -109,7 +136,9 @@ async function testCodeExecution(): Promise<void> {
109
136
  inputs = {
110
137
  messages: conversationHistory,
111
138
  };
112
- const finalContentParts2 = await run.processStream(inputs, config, { keepContent: true });
139
+ const finalContentParts2 = await run.processStream(inputs, config, {
140
+ keepContent: true,
141
+ });
113
142
  const finalMessages2 = run.getRunMessages();
114
143
  if (finalMessages2) {
115
144
  conversationHistory.push(...finalMessages2);
@@ -119,12 +148,15 @@ async function testCodeExecution(): Promise<void> {
119
148
 
120
149
  const { handleLLMEnd, collected } = createMetadataAggregator();
121
150
  const titleResult = await run.generateTitle({
151
+ provider,
122
152
  inputText: userMessage2,
123
153
  contentParts,
124
154
  chainOptions: {
125
- callbacks: [{
126
- handleLLMEnd,
127
- }],
155
+ callbacks: [
156
+ {
157
+ handleLLMEnd,
158
+ },
159
+ ],
128
160
  },
129
161
  });
130
162
  console.log('Generated Title:', titleResult);
@@ -143,4 +175,4 @@ testCodeExecution().catch((err) => {
143
175
  console.log('Conversation history:');
144
176
  console.dir(conversationHistory, { depth: null });
145
177
  process.exit(1);
146
- });
178
+ });
@@ -160,6 +160,7 @@ async function testStandardStreaming(): Promise<void> {
160
160
  console.dir(contentParts, { depth: null });
161
161
  const { handleLLMEnd, collected } = createMetadataAggregator();
162
162
  const titleOptions: t.RunTitleOptions = {
163
+ provider,
163
164
  inputText: userMessage,
164
165
  contentParts,
165
166
  chainOptions: {