@librechat/agents 3.0.0 → 3.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/dist/cjs/common/enum.cjs +0 -1
  2. package/dist/cjs/common/enum.cjs.map +1 -1
  3. package/dist/cjs/llm/providers.cjs +0 -2
  4. package/dist/cjs/llm/providers.cjs.map +1 -1
  5. package/dist/cjs/main.cjs +2 -0
  6. package/dist/cjs/main.cjs.map +1 -1
  7. package/dist/cjs/tools/Calculator.cjs +45 -0
  8. package/dist/cjs/tools/Calculator.cjs.map +1 -0
  9. package/dist/esm/common/enum.mjs +0 -1
  10. package/dist/esm/common/enum.mjs.map +1 -1
  11. package/dist/esm/llm/providers.mjs +0 -2
  12. package/dist/esm/llm/providers.mjs.map +1 -1
  13. package/dist/esm/main.mjs +1 -0
  14. package/dist/esm/main.mjs.map +1 -1
  15. package/dist/esm/tools/Calculator.mjs +24 -0
  16. package/dist/esm/tools/Calculator.mjs.map +1 -0
  17. package/dist/types/common/enum.d.ts +0 -1
  18. package/dist/types/index.d.ts +1 -0
  19. package/dist/types/tools/Calculator.d.ts +8 -0
  20. package/dist/types/types/llm.d.ts +1 -6
  21. package/package.json +5 -3
  22. package/src/common/enum.ts +0 -1
  23. package/src/index.ts +1 -0
  24. package/src/llm/providers.ts +0 -2
  25. package/src/scripts/abort.ts +34 -15
  26. package/src/scripts/cli.ts +25 -20
  27. package/src/scripts/cli2.ts +23 -15
  28. package/src/scripts/cli3.ts +35 -29
  29. package/src/scripts/cli4.ts +1 -2
  30. package/src/scripts/cli5.ts +1 -2
  31. package/src/scripts/code_exec.ts +1 -2
  32. package/src/scripts/code_exec_simple.ts +1 -2
  33. package/src/scripts/content.ts +33 -15
  34. package/src/scripts/simple.ts +1 -2
  35. package/src/scripts/stream.ts +33 -15
  36. package/src/scripts/test-tools-before-handoff.ts +17 -28
  37. package/src/scripts/tools.ts +4 -6
  38. package/src/specs/anthropic.simple.test.ts +1 -1
  39. package/src/specs/azure.simple.test.ts +1 -1
  40. package/src/specs/openai.simple.test.ts +1 -1
  41. package/src/specs/openrouter.simple.test.ts +1 -1
  42. package/src/tools/Calculator.ts +25 -0
  43. package/src/types/llm.ts +0 -6
  44. package/dist/types/tools/example.d.ts +0 -78
  45. package/src/proto/CollabGraph.ts +0 -269
  46. package/src/proto/TaskManager.ts +0 -243
  47. package/src/proto/collab.ts +0 -200
  48. package/src/proto/collab_design.ts +0 -184
  49. package/src/proto/collab_design_v2.ts +0 -224
  50. package/src/proto/collab_design_v3.ts +0 -255
  51. package/src/proto/collab_design_v4.ts +0 -220
  52. package/src/proto/collab_design_v5.ts +0 -251
  53. package/src/proto/collab_graph.ts +0 -181
  54. package/src/proto/collab_original.ts +0 -123
  55. package/src/proto/example.ts +0 -93
  56. package/src/proto/example_new.ts +0 -68
  57. package/src/proto/example_old.ts +0 -201
  58. package/src/proto/example_test.ts +0 -152
  59. package/src/proto/example_test_anthropic.ts +0 -100
  60. package/src/proto/log_stream.ts +0 -202
  61. package/src/proto/main_collab_community_event.ts +0 -133
  62. package/src/proto/main_collab_design_v2.ts +0 -96
  63. package/src/proto/main_collab_design_v4.ts +0 -100
  64. package/src/proto/main_collab_design_v5.ts +0 -135
  65. package/src/proto/main_collab_global_analysis.ts +0 -122
  66. package/src/proto/main_collab_hackathon_event.ts +0 -153
  67. package/src/proto/main_collab_space_mission.ts +0 -153
  68. package/src/proto/main_philosophy.ts +0 -210
  69. package/src/proto/original_script.ts +0 -126
  70. package/src/proto/standard.ts +0 -100
  71. package/src/proto/stream.ts +0 -56
  72. package/src/proto/tasks.ts +0 -118
  73. package/src/proto/tools/global_analysis_tools.ts +0 -86
  74. package/src/proto/tools/space_mission_tools.ts +0 -60
  75. package/src/proto/vertexai.ts +0 -54
  76. package/src/tools/example.ts +0 -129
@@ -1,7 +1,6 @@
1
1
  import { config } from 'dotenv';
2
2
  config();
3
3
 
4
- import { TavilySearch } from '@langchain/tavily';
5
4
  import { HumanMessage, BaseMessage } from '@langchain/core/messages';
6
5
  import { Run } from '@/run';
7
6
  import { Providers, GraphEvents } from '@/common';
@@ -78,10 +77,7 @@ async function testToolsBeforeHandoff() {
78
77
  console.log(`\nšŸ”§ Tool started:`);
79
78
  console.dir({ toolData, metadata }, { depth: null });
80
79
 
81
- if (toolData?.output?.name === 'tavily_search_results_json') {
82
- toolCallCount++;
83
- console.log(`šŸ“Š Search #${toolCallCount} initiated`);
84
- } else if (toolData?.output?.name?.includes('transfer_to_')) {
80
+ if (toolData?.output?.name?.includes('transfer_to_')) {
85
81
  handoffOccurred = true;
86
82
  const specialist = toolData.name.replace('transfer_to_', '');
87
83
  console.log(`\nšŸ”€ Handoff initiated to: ${specialist}`);
@@ -100,21 +96,17 @@ async function testToolsBeforeHandoff() {
100
96
  modelName: 'gpt-4.1-mini',
101
97
  apiKey: process.env.OPENAI_API_KEY,
102
98
  },
103
- tools: [new TavilySearch({ maxResults: 3 })],
104
- instructions: `You are a Research Coordinator with access to web search and a report writer specialist.
99
+ tools: [],
100
+ instructions: `You are a Research Coordinator with access to a report writer specialist.
105
101
 
106
102
  Your workflow MUST follow these steps IN ORDER:
107
- 1. FIRST: Write an initial response acknowledging the request and outlining your research plan
108
- - Explain what aspects you'll investigate
109
- - Describe your search strategy
110
- 2. SECOND: Conduct exactly 2 web searches to gather comprehensive information
111
- - Search 1: Get general information about the topic
112
- - Search 2: Get specific details, recent updates, or complementary data
113
- - Note: Even if your searches are unsuccessful, you MUST still proceed to handoff after EXACTLY 2 searches
114
- 3. FINALLY: After completing both searches, transfer to the report writer
115
- - Provide the report writer with a summary of your findings
103
+ 1. FIRST: Write an initial response acknowledging the request
104
+ - Explain what you understand about the topic
105
+ - Provide any general knowledge you have
106
+ 2. FINALLY: Transfer to the report writer
107
+ - Provide the report writer with a summary of the information
116
108
 
117
- CRITICAL: You MUST write your initial response before ANY tool use. Then complete both searches before handoff.`,
109
+ CRITICAL: You MUST write your initial response before transferring to the report writer.`,
118
110
  maxContextTokens: 8000,
119
111
  },
120
112
  {
@@ -159,10 +151,10 @@ async function testToolsBeforeHandoff() {
159
151
  }
160
152
 
161
153
  try {
162
- // Single test query that requires research before report writing
163
- const query = `Research the latest developments in quantum computing from 2025,
154
+ // Single test query that requires handoff to report writer
155
+ const query = `Tell me about quantum computing developments,
164
156
  including major breakthroughs and commercial applications.
165
- I need a comprehensive report with recent findings.`;
157
+ I need a comprehensive report.`;
166
158
 
167
159
  console.log('='.repeat(60));
168
160
  console.log(`USER QUERY: "${query}"`);
@@ -173,10 +165,9 @@ async function testToolsBeforeHandoff() {
173
165
  const run = await Run.create(runConfig);
174
166
 
175
167
  console.log('\nExpected behavior:');
176
- console.log('1. Research Coordinator writes initial response/plan');
177
- console.log('2. Research Coordinator performs 2 web searches');
178
- console.log('3. Research Coordinator hands off to Report Writer');
179
- console.log('4. Report Writer creates final report\n');
168
+ console.log('1. Research Coordinator writes initial response');
169
+ console.log('2. Research Coordinator hands off to Report Writer');
170
+ console.log('3. Report Writer creates final report\n');
180
171
 
181
172
  // Process with streaming
182
173
  conversationHistory.push(new HumanMessage(query));
@@ -204,11 +195,9 @@ async function testToolsBeforeHandoff() {
204
195
  console.log('EDGE CASE TEST RESULTS:');
205
196
  console.log('─'.repeat(60));
206
197
  console.log(`Tool calls before handoff: ${toolCallCount}`);
207
- console.log(`Expected tool calls: 2`);
198
+ console.log(`Expected tool calls: 0 (no web search available)`);
208
199
  console.log(`Handoff occurred: ${handoffOccurred ? 'Yes āœ…' : 'No āŒ'}`);
209
- console.log(
210
- `Test status: ${toolCallCount === 2 && handoffOccurred ? 'PASSED āœ…' : 'FAILED āŒ'}`
211
- );
200
+ console.log(`Test status: ${handoffOccurred ? 'PASSED āœ…' : 'FAILED āŒ'}`);
212
201
  console.log('─'.repeat(60));
213
202
 
214
203
  // Display conversation history
@@ -3,7 +3,6 @@
3
3
  import { config } from 'dotenv';
4
4
  config();
5
5
  import { HumanMessage, BaseMessage } from '@langchain/core/messages';
6
- import { TavilySearchResults } from '@langchain/community/tools/tavily_search';
7
6
  import type * as t from '@/types';
8
7
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
9
8
  import { ToolEndHandler, ModelEndHandler } from '@/events';
@@ -95,11 +94,13 @@ async function testStandardStreaming(): Promise<void> {
95
94
  graphConfig: {
96
95
  type: 'standard',
97
96
  llmConfig,
98
- tools: [new TavilySearchResults()],
97
+ tools: [],
99
98
  instructions:
100
99
  'You are a friendly AI assistant. Always address the user by their name.',
101
100
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
101
+ maxContextTokens: 89000,
102
102
  },
103
+ indexTokenCountMap: { 0: 35 },
103
104
  returnContent: true,
104
105
  customHandlers,
105
106
  });
@@ -126,10 +127,7 @@ async function testStandardStreaming(): Promise<void> {
126
127
  const inputs = {
127
128
  messages: conversationHistory,
128
129
  };
129
- const finalContentParts = await run.processStream(inputs, config, {
130
- indexTokenCountMap: { 0: 35 },
131
- maxContextTokens: 89000,
132
- });
130
+ const finalContentParts = await run.processStream(inputs, config);
133
131
  const finalMessages = run.getRunMessages();
134
132
  if (finalMessages) {
135
133
  conversationHistory.push(...finalMessages);
@@ -3,7 +3,7 @@
3
3
  // src/scripts/cli.test.ts
4
4
  import { config } from 'dotenv';
5
5
  config();
6
- import { Calculator } from '@langchain/community/tools/calculator';
6
+ import { Calculator } from '@/tools/Calculator';
7
7
  import {
8
8
  HumanMessage,
9
9
  BaseMessage,
@@ -3,7 +3,7 @@
3
3
  // src/scripts/cli.test.ts
4
4
  import { config } from 'dotenv';
5
5
  config();
6
- import { Calculator } from '@langchain/community/tools/calculator';
6
+ import { Calculator } from '@/tools/Calculator';
7
7
  import {
8
8
  HumanMessage,
9
9
  BaseMessage,
@@ -3,7 +3,7 @@
3
3
  // src/scripts/cli.test.ts
4
4
  import { config } from 'dotenv';
5
5
  config();
6
- import { Calculator } from '@langchain/community/tools/calculator';
6
+ import { Calculator } from '@/tools/Calculator';
7
7
  import {
8
8
  HumanMessage,
9
9
  BaseMessage,
@@ -1,6 +1,6 @@
1
1
  import { config } from 'dotenv';
2
2
  config();
3
- import { Calculator } from '@langchain/community/tools/calculator';
3
+ import { Calculator } from '@/tools/Calculator';
4
4
  import {
5
5
  HumanMessage,
6
6
  BaseMessage,
@@ -0,0 +1,25 @@
1
+ import { Tool } from '@langchain/core/tools';
2
+ import * as math from 'mathjs';
3
+
4
+ export class Calculator extends Tool {
5
+ static lc_name(): string {
6
+ return 'Calculator';
7
+ }
8
+
9
+ get lc_namespace(): string[] {
10
+ return [...super.lc_namespace, 'calculator'];
11
+ }
12
+
13
+ name = 'calculator';
14
+
15
+ async _call(input: string): Promise<string> {
16
+ try {
17
+ return math.evaluate(input).toString();
18
+ } catch {
19
+ return 'I don\'t know how to do that.';
20
+ }
21
+ }
22
+
23
+ description =
24
+ 'Useful for getting the result of a math expression. The input to this tool should be a valid mathematical expression that could be executed by a simple calculator.';
25
+ }
package/src/types/llm.ts CHANGED
@@ -2,7 +2,6 @@
2
2
  import { ChatOllama } from '@langchain/ollama';
3
3
  import { ChatMistralAI } from '@langchain/mistralai';
4
4
  import { ChatBedrockConverse } from '@langchain/aws';
5
- import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
6
5
  import type {
7
6
  BindToolsInput,
8
7
  BaseChatModelParams,
@@ -13,7 +12,6 @@ import type {
13
12
  AzureOpenAIInput,
14
13
  ClientOptions as OAIClientOptions,
15
14
  } from '@langchain/openai';
16
- import type { BedrockChatFields } from '@langchain/community/chat_models/bedrock/web';
17
15
  import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
18
16
  import type { GeminiGenerationConfig } from '@langchain/google-common';
19
17
  import type { ChatVertexAIInput } from '@langchain/google-vertexai';
@@ -65,7 +63,6 @@ export type MistralAIClientOptions = ChatMistralAIInput;
65
63
  export type VertexAIClientOptions = ChatVertexAIInput & {
66
64
  includeThoughts?: boolean;
67
65
  };
68
- export type BedrockClientOptions = BedrockChatFields;
69
66
  export type BedrockAnthropicInput = ChatBedrockConverseInput & {
70
67
  additionalModelRequestFields?: ChatBedrockConverseInput['additionalModelRequestFields'] &
71
68
  AnthropicReasoning;
@@ -85,7 +82,6 @@ export type ClientOptions =
85
82
  | AnthropicClientOptions
86
83
  | MistralAIClientOptions
87
84
  | VertexAIClientOptions
88
- | BedrockClientOptions
89
85
  | BedrockConverseClientOptions
90
86
  | GoogleClientOptions
91
87
  | DeepSeekClientOptions
@@ -113,7 +109,6 @@ export type ProviderOptionsMap = {
113
109
  [Providers.MISTRALAI]: MistralAIClientOptions;
114
110
  [Providers.MISTRAL]: MistralAIClientOptions;
115
111
  [Providers.OPENROUTER]: ChatOpenRouterCallOptions;
116
- [Providers.BEDROCK_LEGACY]: BedrockClientOptions;
117
112
  [Providers.BEDROCK]: BedrockConverseClientOptions;
118
113
  [Providers.XAI]: XAIClientOptions;
119
114
  };
@@ -129,7 +124,6 @@ export type ChatModelMap = {
129
124
  [Providers.MISTRALAI]: ChatMistralAI;
130
125
  [Providers.MISTRAL]: ChatMistralAI;
131
126
  [Providers.OPENROUTER]: ChatOpenRouter;
132
- [Providers.BEDROCK_LEGACY]: BedrockChat;
133
127
  [Providers.BEDROCK]: ChatBedrockConverse;
134
128
  [Providers.GOOGLE]: CustomChatGoogleGenerativeAI;
135
129
  };
@@ -1,78 +0,0 @@
1
- import { TavilySearchResults } from '@langchain/community/tools/tavily_search';
2
- import { DynamicStructuredTool } from '@langchain/core/tools';
3
- import { z } from 'zod';
4
- export declare const fetchRandomImageTool: DynamicStructuredTool<z.ZodObject<{
5
- input: z.ZodOptional<z.ZodString>;
6
- }, "strip", z.ZodTypeAny, {
7
- input?: string | undefined;
8
- }, {
9
- input?: string | undefined;
10
- }>, {
11
- input?: string | undefined;
12
- }, {
13
- input?: string | undefined;
14
- }, (string | undefined)[] | ({
15
- type: string;
16
- text: string;
17
- }[] | {
18
- content: {
19
- type: string;
20
- image_url: {
21
- url: string;
22
- };
23
- }[];
24
- })[]>;
25
- export declare const fetchRandomImageURL: DynamicStructuredTool<z.ZodObject<{
26
- input: z.ZodOptional<z.ZodString>;
27
- }, "strip", z.ZodTypeAny, {
28
- input?: string | undefined;
29
- }, {
30
- input?: string | undefined;
31
- }>, {
32
- input?: string | undefined;
33
- }, {
34
- input?: string | undefined;
35
- }, (string | undefined)[] | ({
36
- type: string;
37
- text: string;
38
- }[] | {
39
- content: {
40
- type: string;
41
- image_url: {
42
- url: string;
43
- };
44
- }[];
45
- })[]>;
46
- export declare const chartTool: DynamicStructuredTool<z.ZodObject<{
47
- data: z.ZodArray<z.ZodObject<{
48
- label: z.ZodString;
49
- value: z.ZodNumber;
50
- }, "strip", z.ZodTypeAny, {
51
- value: number;
52
- label: string;
53
- }, {
54
- value: number;
55
- label: string;
56
- }>, "many">;
57
- }, "strip", z.ZodTypeAny, {
58
- data: {
59
- value: number;
60
- label: string;
61
- }[];
62
- }, {
63
- data: {
64
- value: number;
65
- label: string;
66
- }[];
67
- }>, {
68
- data: {
69
- value: number;
70
- label: string;
71
- }[];
72
- }, {
73
- data: {
74
- value: number;
75
- label: string;
76
- }[];
77
- }, string>;
78
- export declare const tavilyTool: TavilySearchResults;
@@ -1,269 +0,0 @@
1
- // src/graphs/CollabGraph.ts
2
- import { AIMessageChunk, BaseMessage, HumanMessage } from '@langchain/core/messages';
3
- import { END, StateGraphArgs, START, StateGraph, MemorySaver } from '@langchain/langgraph';
4
- import { AgentExecutor, createOpenAIToolsAgent } from 'langchain/agents';
5
- import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts';
6
- import type { StructuredTool } from '@langchain/core/tools';
7
- import { Runnable, RunnableConfig } from '@langchain/core/runnables';
8
- import { JsonOutputToolsParser } from 'langchain/output_parsers';
9
- import { Providers } from '@/common';
10
- import { getChatModelClass } from '@/llm/providers';
11
- import { Graph } from '../graphs/Graph';
12
- import type * as t from '@/types';
13
- import { supervisorPrompt } from '@/prompts/collab';
14
-
15
- export interface CollabAgentStateChannels {
16
- messages: BaseMessage[];
17
- next: string;
18
- [key: string]: any;
19
- }
20
-
21
- export interface CollabMember {
22
- name: string;
23
- systemPrompt: string;
24
- tools: any[];
25
- llmConfig: t.LLMConfig;
26
- }
27
-
28
- interface SupervisorConfig {
29
- systemPrompt?: string;
30
- llmConfig: t.LLMConfig;
31
- }
32
-
33
- export class CollabGraph extends Graph<CollabAgentStateChannels, string> {
34
- resetValues(): void {
35
- throw new Error('Method not implemented.');
36
- }
37
- getFinalMessage(): AIMessageChunk | undefined {
38
- throw new Error('Method not implemented.');
39
- }
40
- generateStepId(stepKey: string): [string, number] {
41
- throw new Error('Method not implemented.');
42
- }
43
- getKeyList(metadata: Record<string, unknown> | undefined): (string | number | undefined)[] {
44
- throw new Error('Method not implemented.');
45
- }
46
- getStepKey(metadata: Record<string, unknown> | undefined): string {
47
- throw new Error('Method not implemented.');
48
- }
49
- checkKeyList(keyList: (string | number | undefined)[]): boolean {
50
- throw new Error('Method not implemented.');
51
- }
52
- getStepIdByKey(stepKey: string, index?: number): string {
53
- throw new Error('Method not implemented.');
54
- }
55
- getRunStep(stepId: string): t.RunStep | undefined {
56
- throw new Error('Method not implemented.');
57
- }
58
- dispatchRunStep(stepKey: string, stepDetails: t.StepDetails): void {
59
- throw new Error('Method not implemented.');
60
- }
61
- dispatchRunStepDelta(id: string, delta: t.ToolCallDelta): void {
62
- throw new Error('Method not implemented.');
63
- }
64
- dispatchMessageDelta(id: string, delta: t.MessageDelta): void {
65
- throw new Error('Method not implemented.');
66
- }
67
- private graph: t.CompiledWorkflow<CollabAgentStateChannels, Partial<CollabAgentStateChannels>, string> | null = null;
68
- private members: t.Member[];
69
- private supervisorConfig: SupervisorConfig;
70
- private supervisorChain: Runnable | null = null;
71
-
72
- constructor(members: t.Member[], supervisorConfig: SupervisorConfig) {
73
- super();
74
- this.members = members;
75
- this.supervisorConfig = supervisorConfig;
76
- }
77
-
78
- async initialize(): Promise<void> {
79
- const memberNames = this.members.map(member => member.name);
80
- const systemPrompt = this.supervisorConfig.systemPrompt || supervisorPrompt;
81
- const options = [END, ...memberNames];
82
- this.supervisorChain = await this.createSupervisorChain(systemPrompt, options);
83
- }
84
-
85
- createGraphState(): StateGraphArgs<CollabAgentStateChannels>['channels'] {
86
- return {
87
- messages: {
88
- value: (x?: BaseMessage[], y?: BaseMessage[]) => (x ?? []).concat(y ?? []),
89
- default: () => [],
90
- },
91
- next: {
92
- value: (x?: string, y?: string) => y ?? x ?? END,
93
- default: () => END,
94
- },
95
- };
96
- }
97
-
98
- initializeTools(tools: StructuredTool[]): any {
99
- // This method is not used in the collaborative graph
100
- return null;
101
- }
102
-
103
- initializeModel(provider: Providers, clientOptions: Record<string, unknown>, tools: any[]) {
104
- const LLMClass = getChatModelClass(provider);
105
- if (!LLMClass) {
106
- throw new Error(`Unsupported LLM provider: ${provider}`);
107
- }
108
- return new LLMClass(clientOptions);
109
- }
110
-
111
- createCallModel(boundModel: any) {
112
- // This method is not directly used in the collaborative graph
113
- return async (state: CollabAgentStateChannels, config?: RunnableConfig) => {
114
- return { messages: [] };
115
- };
116
- }
117
-
118
- private async createAgent(
119
- llmConfig: t.LLMConfig,
120
- tools: any[],
121
- systemPrompt: string
122
- ): Promise<AgentExecutor> {
123
- const { provider, ...clientOptions } = llmConfig;
124
- const LLMClass = getChatModelClass(provider);
125
- if (!LLMClass) {
126
- throw new Error(`Unsupported LLM provider: ${provider}`);
127
- }
128
- const llm = new LLMClass(clientOptions);
129
-
130
- const prompt = await ChatPromptTemplate.fromMessages([
131
- ['system', systemPrompt],
132
- new MessagesPlaceholder('messages'),
133
- new MessagesPlaceholder('agent_scratchpad'),
134
- ]);
135
- const agent = await createOpenAIToolsAgent({ llm, tools, prompt });
136
- return new AgentExecutor({ agent, tools });
137
- }
138
-
139
- createWorkflow(
140
- graphState: StateGraphArgs<CollabAgentStateChannels>['channels'],
141
- callModel?: any,
142
- toolNode?: any
143
- ): t.CompiledWorkflow<CollabAgentStateChannels, Partial<CollabAgentStateChannels>, string> {
144
- if (!this.supervisorChain) {
145
- throw new Error('CollabGraph not initialized. Call initialize() first.');
146
- }
147
-
148
- const workflow = new StateGraph<CollabAgentStateChannels, Partial<CollabAgentStateChannels>, string>({
149
- channels: graphState,
150
- });
151
-
152
- // Dynamically create agents and add nodes for each member
153
- for (const member of this.members) {
154
- const node = async (
155
- state: CollabAgentStateChannels,
156
- config?: RunnableConfig,
157
- ) => {
158
- const agent = await this.createAgent(member.llmConfig, member.tools, member.systemPrompt);
159
- const agentPromise = agent.invoke(state, config);
160
-
161
- // Store the promise in the state
162
- await this.graph?.updateState(config ?? {}, {
163
- [`${member.name}Promise`]: agentPromise,
164
- });
165
-
166
- const result = await agentPromise;
167
- return {
168
- messages: [
169
- new HumanMessage({ content: result.output, name: member.name }),
170
- ],
171
- };
172
- };
173
- workflow.addNode(member.name, node);
174
- workflow.addEdge(member.name, 'supervisor');
175
- }
176
-
177
- const supervisorNode = async (
178
- state: CollabAgentStateChannels,
179
- config?: RunnableConfig,
180
- ) => {
181
- // Get the current state
182
- const currentState = await this.graph?.getState(config ?? {});
183
-
184
- // Wait for all member promises to resolve
185
- const memberPromises = this.members.map(member => currentState?.[`${member.name}Promise` as keyof typeof currentState]);
186
- await Promise.all(memberPromises);
187
-
188
- // Clear the promises for the next iteration
189
- for (const member of this.members) {
190
- await this.graph?.updateState(config ?? {}, {
191
- [`${member.name}Promise`]: undefined,
192
- });
193
- }
194
-
195
- const result = await this.supervisorChain?.invoke(state, config);
196
- return result;
197
- };
198
-
199
- workflow.addNode('supervisor', supervisorNode);
200
-
201
- workflow.addConditionalEdges(
202
- 'supervisor',
203
- (x: CollabAgentStateChannels) => x.next,
204
- );
205
-
206
- workflow.addEdge(START, 'supervisor');
207
-
208
- const memory = new MemorySaver();
209
- this.graph = workflow.compile({ checkpointer: memory });
210
- return this.graph;
211
- }
212
-
213
- private async createSupervisorChain(systemPrompt: string, options: string[]): Promise<Runnable> {
214
- const functionDef = {
215
- name: 'route',
216
- description: 'Select the next role.',
217
- parameters: {
218
- title: 'routeSchema',
219
- type: 'object',
220
- properties: {
221
- next: {
222
- title: 'Next',
223
- anyOf: [
224
- { enum: options },
225
- ],
226
- },
227
- },
228
- required: ['next'],
229
- },
230
- };
231
-
232
- const toolDef = {
233
- type: 'function',
234
- function: functionDef,
235
- } as const;
236
-
237
- const prompt = ChatPromptTemplate.fromMessages([
238
- ['system', systemPrompt],
239
- new MessagesPlaceholder('messages'),
240
- [
241
- 'system',
242
- 'Given the conversation above, who should act next?' +
243
- ' Or should we FINISH? Select one of: {options}',
244
- ],
245
- ]);
246
-
247
- const formattedPrompt = await prompt.partial({
248
- options: options.join(', '),
249
- members: this.members.map(m => m.name).join(', '),
250
- });
251
-
252
- const { provider, ...clientOptions } = this.supervisorConfig.llmConfig;
253
- const LLMClass = getChatModelClass(provider);
254
- if (!LLMClass) {
255
- throw new Error(`Unsupported LLM provider for supervisor: ${provider}`);
256
- }
257
- const llm = new LLMClass(clientOptions);
258
-
259
- return formattedPrompt
260
- .pipe(llm.bindTools(
261
- [toolDef],
262
- {
263
- tool_choice: { type: 'function', function: { name: 'route' } } as any,
264
- },
265
- ))
266
- .pipe(new JsonOutputToolsParser())
267
- .pipe((x: any) => (x[0].args));
268
- }
269
- }