@librechat/agents 3.0.0-rc4 ā 3.0.0-rc6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/common/enum.cjs +1 -0
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs +2 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
- package/dist/cjs/llm/openai/index.cjs +33 -0
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/esm/common/enum.mjs +1 -0
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs +2 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
- package/dist/esm/llm/openai/index.mjs +33 -0
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/types/common/enum.d.ts +2 -1
- package/dist/types/llm/openai/index.d.ts +10 -0
- package/dist/types/types/llm.d.ts +1 -0
- package/package.json +1 -1
- package/src/common/enum.ts +1 -0
- package/src/graphs/MultiAgentGraph.ts +2 -1
- package/src/llm/openai/index.ts +41 -0
- package/src/scripts/multi-agent-supervisor.ts +26 -25
- package/src/scripts/test-multi-agent-list-handoff.ts +6 -3
- package/src/types/llm.ts +1 -0
package/src/llm/openai/index.ts
CHANGED
|
@@ -30,6 +30,7 @@ import {
|
|
|
30
30
|
_convertOpenAIResponsesDeltaToBaseMessageChunk,
|
|
31
31
|
type ResponseReturnStreamEvents,
|
|
32
32
|
} from './utils';
|
|
33
|
+
import { sleep } from '@/utils';
|
|
33
34
|
|
|
34
35
|
// eslint-disable-next-line @typescript-eslint/explicit-function-return-type
|
|
35
36
|
const iife = <T>(fn: () => T) => fn();
|
|
@@ -195,6 +196,17 @@ export class CustomAzureOpenAIClient extends AzureOpenAIClient {
|
|
|
195
196
|
|
|
196
197
|
/** @ts-expect-error We are intentionally overriding `getReasoningParams` */
|
|
197
198
|
export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
|
|
199
|
+
_lc_stream_delay?: number;
|
|
200
|
+
|
|
201
|
+
constructor(
|
|
202
|
+
fields?: t.ChatOpenAICallOptions & {
|
|
203
|
+
_lc_stream_delay?: number;
|
|
204
|
+
} & t.OpenAIChatInput['modelKwargs']
|
|
205
|
+
) {
|
|
206
|
+
super(fields);
|
|
207
|
+
this._lc_stream_delay = fields?._lc_stream_delay;
|
|
208
|
+
}
|
|
209
|
+
|
|
198
210
|
public get exposedClient(): CustomOpenAIClient {
|
|
199
211
|
return this.client;
|
|
200
212
|
}
|
|
@@ -288,6 +300,9 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
|
|
|
288
300
|
);
|
|
289
301
|
if (chunk == null) continue;
|
|
290
302
|
yield chunk;
|
|
303
|
+
if (this._lc_stream_delay != null) {
|
|
304
|
+
await sleep(this._lc_stream_delay);
|
|
305
|
+
}
|
|
291
306
|
await runManager?.handleLLMNewToken(
|
|
292
307
|
chunk.text || '',
|
|
293
308
|
undefined,
|
|
@@ -376,6 +391,9 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
|
|
|
376
391
|
generationInfo,
|
|
377
392
|
});
|
|
378
393
|
yield generationChunk;
|
|
394
|
+
if (this._lc_stream_delay != null) {
|
|
395
|
+
await sleep(this._lc_stream_delay);
|
|
396
|
+
}
|
|
379
397
|
await runManager?.handleLLMNewToken(
|
|
380
398
|
generationChunk.text || '',
|
|
381
399
|
newTokenIndices,
|
|
@@ -423,6 +441,9 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
|
|
|
423
441
|
text: '',
|
|
424
442
|
});
|
|
425
443
|
yield generationChunk;
|
|
444
|
+
if (this._lc_stream_delay != null) {
|
|
445
|
+
await sleep(this._lc_stream_delay);
|
|
446
|
+
}
|
|
426
447
|
}
|
|
427
448
|
if (options.signal?.aborted === true) {
|
|
428
449
|
throw new Error('AbortError');
|
|
@@ -432,6 +453,13 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
|
|
|
432
453
|
|
|
433
454
|
/** @ts-expect-error We are intentionally overriding `getReasoningParams` */
|
|
434
455
|
export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
|
|
456
|
+
_lc_stream_delay?: number;
|
|
457
|
+
|
|
458
|
+
constructor(fields?: t.AzureOpenAIInput & { _lc_stream_delay?: number }) {
|
|
459
|
+
super(fields);
|
|
460
|
+
this._lc_stream_delay = fields?._lc_stream_delay;
|
|
461
|
+
}
|
|
462
|
+
|
|
435
463
|
public get exposedClient(): CustomOpenAIClient {
|
|
436
464
|
return this.client;
|
|
437
465
|
}
|
|
@@ -559,6 +587,9 @@ export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
|
|
|
559
587
|
);
|
|
560
588
|
if (chunk == null) continue;
|
|
561
589
|
yield chunk;
|
|
590
|
+
if (this._lc_stream_delay != null) {
|
|
591
|
+
await sleep(this._lc_stream_delay);
|
|
592
|
+
}
|
|
562
593
|
await runManager?.handleLLMNewToken(
|
|
563
594
|
chunk.text || '',
|
|
564
595
|
undefined,
|
|
@@ -624,13 +655,17 @@ export interface XAIUsageMetadata
|
|
|
624
655
|
}
|
|
625
656
|
|
|
626
657
|
export class ChatXAI extends OriginalChatXAI {
|
|
658
|
+
_lc_stream_delay?: number;
|
|
659
|
+
|
|
627
660
|
constructor(
|
|
628
661
|
fields?: Partial<ChatXAIInput> & {
|
|
629
662
|
configuration?: { baseURL?: string };
|
|
630
663
|
clientConfig?: { baseURL?: string };
|
|
664
|
+
_lc_stream_delay?: number;
|
|
631
665
|
}
|
|
632
666
|
) {
|
|
633
667
|
super(fields);
|
|
668
|
+
this._lc_stream_delay = fields?._lc_stream_delay;
|
|
634
669
|
const customBaseURL =
|
|
635
670
|
fields?.configuration?.baseURL ?? fields?.clientConfig?.baseURL;
|
|
636
671
|
if (customBaseURL != null && customBaseURL) {
|
|
@@ -759,6 +794,9 @@ export class ChatXAI extends OriginalChatXAI {
|
|
|
759
794
|
generationInfo,
|
|
760
795
|
});
|
|
761
796
|
yield generationChunk;
|
|
797
|
+
if (this._lc_stream_delay != null) {
|
|
798
|
+
await sleep(this._lc_stream_delay);
|
|
799
|
+
}
|
|
762
800
|
await runManager?.handleLLMNewToken(
|
|
763
801
|
generationChunk.text || '',
|
|
764
802
|
newTokenIndices,
|
|
@@ -832,6 +870,9 @@ export class ChatXAI extends OriginalChatXAI {
|
|
|
832
870
|
text: '',
|
|
833
871
|
});
|
|
834
872
|
yield generationChunk;
|
|
873
|
+
if (this._lc_stream_delay != null) {
|
|
874
|
+
await sleep(this._lc_stream_delay);
|
|
875
|
+
}
|
|
835
876
|
}
|
|
836
877
|
if (options.signal?.aborted === true) {
|
|
837
878
|
throw new Error('AbortError');
|
|
@@ -36,10 +36,10 @@ async function testSupervisorMultiAgent() {
|
|
|
36
36
|
// Define configurations for all possible specialists
|
|
37
37
|
const specialistConfigs = {
|
|
38
38
|
data_analyst: {
|
|
39
|
-
provider: Providers.
|
|
39
|
+
provider: Providers.OPENAI,
|
|
40
40
|
clientOptions: {
|
|
41
|
-
modelName: '
|
|
42
|
-
apiKey: process.env.
|
|
41
|
+
modelName: 'gpt-4.1',
|
|
42
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
43
43
|
},
|
|
44
44
|
instructions: `You are a Data Analyst specialist. Your expertise includes:
|
|
45
45
|
- Statistical analysis and data visualization
|
|
@@ -52,10 +52,10 @@ async function testSupervisorMultiAgent() {
|
|
|
52
52
|
maxContextTokens: 8000,
|
|
53
53
|
},
|
|
54
54
|
security_expert: {
|
|
55
|
-
provider: Providers.
|
|
55
|
+
provider: Providers.OPENAI,
|
|
56
56
|
clientOptions: {
|
|
57
|
-
modelName: '
|
|
58
|
-
apiKey: process.env.
|
|
57
|
+
modelName: 'gpt-4.1',
|
|
58
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
59
59
|
},
|
|
60
60
|
instructions: `You are a Security Expert. Your expertise includes:
|
|
61
61
|
- Cybersecurity best practices
|
|
@@ -68,10 +68,10 @@ async function testSupervisorMultiAgent() {
|
|
|
68
68
|
maxContextTokens: 8000,
|
|
69
69
|
},
|
|
70
70
|
product_designer: {
|
|
71
|
-
provider: Providers.
|
|
71
|
+
provider: Providers.OPENAI,
|
|
72
72
|
clientOptions: {
|
|
73
|
-
modelName: '
|
|
74
|
-
apiKey: process.env.
|
|
73
|
+
modelName: 'gpt-4.1',
|
|
74
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
75
75
|
},
|
|
76
76
|
instructions: `You are a Product Designer. Your expertise includes:
|
|
77
77
|
- User experience (UX) design principles
|
|
@@ -84,10 +84,10 @@ async function testSupervisorMultiAgent() {
|
|
|
84
84
|
maxContextTokens: 8000,
|
|
85
85
|
},
|
|
86
86
|
devops_engineer: {
|
|
87
|
-
provider: Providers.
|
|
87
|
+
provider: Providers.OPENAI,
|
|
88
88
|
clientOptions: {
|
|
89
|
-
modelName: '
|
|
90
|
-
apiKey: process.env.
|
|
89
|
+
modelName: 'gpt-4.1',
|
|
90
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
91
91
|
},
|
|
92
92
|
instructions: `You are a DevOps Engineer. Your expertise includes:
|
|
93
93
|
- CI/CD pipeline design and optimization
|
|
@@ -100,10 +100,10 @@ async function testSupervisorMultiAgent() {
|
|
|
100
100
|
maxContextTokens: 8000,
|
|
101
101
|
},
|
|
102
102
|
legal_advisor: {
|
|
103
|
-
provider: Providers.
|
|
103
|
+
provider: Providers.OPENAI,
|
|
104
104
|
clientOptions: {
|
|
105
|
-
modelName: '
|
|
106
|
-
apiKey: process.env.
|
|
105
|
+
modelName: 'gpt-4.1',
|
|
106
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
107
107
|
},
|
|
108
108
|
instructions: `You are a Legal Advisor specializing in technology. Your expertise includes:
|
|
109
109
|
- Software licensing and open source compliance
|
|
@@ -154,6 +154,7 @@ async function testSupervisorMultiAgent() {
|
|
|
154
154
|
event: GraphEvents.ON_MESSAGE_DELTA,
|
|
155
155
|
data: t.StreamEventData
|
|
156
156
|
): void => {
|
|
157
|
+
console.dir(data, { depth: null });
|
|
157
158
|
aggregateContent({ event, data: data as t.MessageDeltaEvent });
|
|
158
159
|
},
|
|
159
160
|
},
|
|
@@ -180,10 +181,10 @@ async function testSupervisorMultiAgent() {
|
|
|
180
181
|
|
|
181
182
|
// Define the adaptive specialist configuration that will be reused
|
|
182
183
|
const specialistConfig = {
|
|
183
|
-
provider: Providers.
|
|
184
|
+
provider: Providers.OPENAI,
|
|
184
185
|
clientOptions: {
|
|
185
|
-
modelName: '
|
|
186
|
-
apiKey: process.env.
|
|
186
|
+
modelName: 'gpt-4.1',
|
|
187
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
187
188
|
},
|
|
188
189
|
instructions: `You are an Adaptive Specialist. Your agent ID indicates your role:
|
|
189
190
|
|
|
@@ -202,10 +203,10 @@ async function testSupervisorMultiAgent() {
|
|
|
202
203
|
const agents: t.AgentInputs[] = [
|
|
203
204
|
{
|
|
204
205
|
agentId: 'supervisor',
|
|
205
|
-
provider: Providers.
|
|
206
|
+
provider: Providers.OPENAI,
|
|
206
207
|
clientOptions: {
|
|
207
|
-
modelName: '
|
|
208
|
-
apiKey: process.env.
|
|
208
|
+
modelName: 'gpt-4.1-mini',
|
|
209
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
209
210
|
},
|
|
210
211
|
instructions: `You are a Task Supervisor with access to 5 specialist agents:
|
|
211
212
|
1. transfer_to_data_analyst - For statistical analysis and metrics
|
|
@@ -298,10 +299,10 @@ async function testSupervisorMultiAgent() {
|
|
|
298
299
|
// Test with different queries
|
|
299
300
|
const testQueries = [
|
|
300
301
|
'How can we analyze user engagement metrics to improve our product?',
|
|
301
|
-
'What security measures should we implement for our new API?',
|
|
302
|
-
'Can you help design a better onboarding flow for our mobile app?',
|
|
303
|
-
'We need to set up a CI/CD pipeline for our microservices.',
|
|
304
|
-
'What are the legal implications of using GPL-licensed code in our product?',
|
|
302
|
+
// 'What security measures should we implement for our new API?',
|
|
303
|
+
// 'Can you help design a better onboarding flow for our mobile app?',
|
|
304
|
+
// 'We need to set up a CI/CD pipeline for our microservices.',
|
|
305
|
+
// 'What are the legal implications of using GPL-licensed code in our product?',
|
|
305
306
|
];
|
|
306
307
|
|
|
307
308
|
const config = {
|
|
@@ -5,8 +5,8 @@ config();
|
|
|
5
5
|
|
|
6
6
|
import { HumanMessage, BaseMessage } from '@langchain/core/messages';
|
|
7
7
|
import { Run } from '@/run';
|
|
8
|
-
import { Providers, GraphEvents } from '@/common';
|
|
9
8
|
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
9
|
+
import { Providers, GraphEvents, Constants } from '@/common';
|
|
10
10
|
import { ToolEndHandler, ModelEndHandler } from '@/events';
|
|
11
11
|
import type * as t from '@/types';
|
|
12
12
|
|
|
@@ -70,8 +70,11 @@ async function testSupervisorListHandoff() {
|
|
|
70
70
|
metadata?: Record<string, unknown>
|
|
71
71
|
): void => {
|
|
72
72
|
const toolData = data as any;
|
|
73
|
-
if (toolData?.name?.
|
|
74
|
-
const specialist = toolData.name.replace(
|
|
73
|
+
if (toolData?.name?.startsWith(Constants.LC_TRANSFER_TO_)) {
|
|
74
|
+
const specialist = toolData.name.replace(
|
|
75
|
+
Constants.LC_TRANSFER_TO_,
|
|
76
|
+
''
|
|
77
|
+
);
|
|
75
78
|
console.log(`\nš Transferring to ${specialist}...`);
|
|
76
79
|
selectedRole = specialist;
|
|
77
80
|
}
|