@agentforge/patterns 0.5.3 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +86 -32
- package/dist/index.d.cts +214 -1
- package/dist/index.d.ts +214 -1
- package/dist/index.js +87 -33
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -394,7 +394,8 @@ function createReActAgent(config, options) {
|
|
|
394
394
|
systemPrompt = DEFAULT_REACT_SYSTEM_PROMPT,
|
|
395
395
|
maxIterations = 10,
|
|
396
396
|
returnIntermediateSteps = false,
|
|
397
|
-
stopCondition
|
|
397
|
+
stopCondition,
|
|
398
|
+
checkpointer
|
|
398
399
|
} = config;
|
|
399
400
|
const {
|
|
400
401
|
verbose = false,
|
|
@@ -429,7 +430,7 @@ function createReActAgent(config, options) {
|
|
|
429
430
|
return ACTION_NODE;
|
|
430
431
|
};
|
|
431
432
|
const workflow = new import_langgraph.StateGraph(ReActState).addNode(REASONING_NODE, reasoningNode).addNode(ACTION_NODE, actionNode).addNode(OBSERVATION_NODE, observationNode).addEdge("__start__", REASONING_NODE).addConditionalEdges(REASONING_NODE, shouldContinue).addEdge(ACTION_NODE, OBSERVATION_NODE).addEdge(OBSERVATION_NODE, REASONING_NODE);
|
|
432
|
-
return workflow.compile();
|
|
433
|
+
return workflow.compile(checkpointer ? { checkpointer } : void 0);
|
|
433
434
|
}
|
|
434
435
|
|
|
435
436
|
// src/react/builder.ts
|
|
@@ -970,7 +971,8 @@ function createPlanExecuteAgent(config) {
|
|
|
970
971
|
executor,
|
|
971
972
|
replanner,
|
|
972
973
|
maxIterations = 5,
|
|
973
|
-
verbose = false
|
|
974
|
+
verbose = false,
|
|
975
|
+
checkpointer
|
|
974
976
|
} = config;
|
|
975
977
|
const plannerNode = createPlannerNode(planner);
|
|
976
978
|
const executorNode = createExecutorNode(executor);
|
|
@@ -1030,7 +1032,7 @@ function createPlanExecuteAgent(config) {
|
|
|
1030
1032
|
finish: import_langgraph2.END
|
|
1031
1033
|
}
|
|
1032
1034
|
);
|
|
1033
|
-
return workflow.compile();
|
|
1035
|
+
return workflow.compile(checkpointer ? { checkpointer } : void 0);
|
|
1034
1036
|
}
|
|
1035
1037
|
|
|
1036
1038
|
// src/reflection/state.ts
|
|
@@ -1507,7 +1509,8 @@ function createReflectionAgent(config) {
|
|
|
1507
1509
|
reviser,
|
|
1508
1510
|
maxIterations = 3,
|
|
1509
1511
|
qualityCriteria,
|
|
1510
|
-
verbose = false
|
|
1512
|
+
verbose = false,
|
|
1513
|
+
checkpointer
|
|
1511
1514
|
} = config;
|
|
1512
1515
|
const generatorNode = createGeneratorNode({ ...generator, verbose });
|
|
1513
1516
|
const reflectorNode = createReflectorNode({ ...reflector, qualityCriteria, verbose });
|
|
@@ -1565,7 +1568,7 @@ function createReflectionAgent(config) {
|
|
|
1565
1568
|
error: import_langgraph3.END
|
|
1566
1569
|
}
|
|
1567
1570
|
).addEdge("finisher", import_langgraph3.END);
|
|
1568
|
-
return workflow.compile();
|
|
1571
|
+
return workflow.compile(checkpointer ? { checkpointer } : void 0);
|
|
1569
1572
|
}
|
|
1570
1573
|
|
|
1571
1574
|
// src/multi-agent/state.ts
|
|
@@ -1888,6 +1891,33 @@ var MultiAgentState = (0, import_core6.createStateAnnotation)(MultiAgentStateCon
|
|
|
1888
1891
|
|
|
1889
1892
|
// src/multi-agent/routing.ts
|
|
1890
1893
|
var import_messages4 = require("@langchain/core/messages");
|
|
1894
|
+
async function executeTools(toolCalls, tools) {
|
|
1895
|
+
const results = [];
|
|
1896
|
+
for (const toolCall of toolCalls) {
|
|
1897
|
+
const tool = tools.find((t) => t.metadata.name === toolCall.name);
|
|
1898
|
+
if (!tool) {
|
|
1899
|
+
results.push(new import_messages4.ToolMessage({
|
|
1900
|
+
content: `Error: Tool '${toolCall.name}' not found`,
|
|
1901
|
+
tool_call_id: toolCall.id
|
|
1902
|
+
}));
|
|
1903
|
+
continue;
|
|
1904
|
+
}
|
|
1905
|
+
try {
|
|
1906
|
+
const result = await tool.execute(toolCall.args);
|
|
1907
|
+
const content = typeof result === "string" ? result : JSON.stringify(result);
|
|
1908
|
+
results.push(new import_messages4.ToolMessage({
|
|
1909
|
+
content,
|
|
1910
|
+
tool_call_id: toolCall.id
|
|
1911
|
+
}));
|
|
1912
|
+
} catch (error) {
|
|
1913
|
+
results.push(new import_messages4.ToolMessage({
|
|
1914
|
+
content: `Error executing tool: ${error.message}`,
|
|
1915
|
+
tool_call_id: toolCall.id
|
|
1916
|
+
}));
|
|
1917
|
+
}
|
|
1918
|
+
}
|
|
1919
|
+
return results;
|
|
1920
|
+
}
|
|
1891
1921
|
var DEFAULT_SUPERVISOR_SYSTEM_PROMPT = `You are a supervisor agent responsible for routing tasks to specialized worker agents.
|
|
1892
1922
|
|
|
1893
1923
|
Your job is to:
|
|
@@ -1910,11 +1940,13 @@ var llmBasedRouting = {
|
|
|
1910
1940
|
throw new Error("LLM-based routing requires a model to be configured");
|
|
1911
1941
|
}
|
|
1912
1942
|
const systemPrompt = config.systemPrompt || DEFAULT_SUPERVISOR_SYSTEM_PROMPT;
|
|
1943
|
+
const maxRetries = config.maxToolRetries || 3;
|
|
1944
|
+
const tools = config.tools || [];
|
|
1913
1945
|
const workerInfo = Object.entries(state.workers).map(([id, caps]) => {
|
|
1914
1946
|
const skills = caps.skills.join(", ");
|
|
1915
|
-
const
|
|
1947
|
+
const tools2 = caps.tools.join(", ");
|
|
1916
1948
|
const available = caps.available ? "available" : "busy";
|
|
1917
|
-
return `- ${id}: Skills: [${skills}], Tools: [${
|
|
1949
|
+
return `- ${id}: Skills: [${skills}], Tools: [${tools2}], Status: ${available}, Workload: ${caps.currentWorkload}`;
|
|
1918
1950
|
}).join("\n");
|
|
1919
1951
|
const lastMessage = state.messages[state.messages.length - 1];
|
|
1920
1952
|
const taskContext = lastMessage?.content || state.input;
|
|
@@ -1924,24 +1956,42 @@ Available workers:
|
|
|
1924
1956
|
${workerInfo}
|
|
1925
1957
|
|
|
1926
1958
|
Select the best worker for this task and explain your reasoning.`;
|
|
1927
|
-
const
|
|
1928
|
-
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
|
|
1940
|
-
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
|
|
1959
|
+
const conversationHistory = [];
|
|
1960
|
+
let attempt = 0;
|
|
1961
|
+
while (attempt < maxRetries) {
|
|
1962
|
+
const messages = [
|
|
1963
|
+
new import_messages4.SystemMessage(systemPrompt),
|
|
1964
|
+
new import_messages4.HumanMessage(userPrompt),
|
|
1965
|
+
...conversationHistory
|
|
1966
|
+
];
|
|
1967
|
+
const response = await config.model.invoke(messages);
|
|
1968
|
+
if (response.tool_calls && response.tool_calls.length > 0) {
|
|
1969
|
+
if (tools.length === 0) {
|
|
1970
|
+
throw new Error("LLM requested tool calls but no tools are configured");
|
|
1971
|
+
}
|
|
1972
|
+
const toolResults = await executeTools(response.tool_calls, tools);
|
|
1973
|
+
conversationHistory.push(
|
|
1974
|
+
new import_messages4.AIMessage({ content: response.content || "", tool_calls: response.tool_calls }),
|
|
1975
|
+
...toolResults
|
|
1976
|
+
);
|
|
1977
|
+
attempt++;
|
|
1978
|
+
continue;
|
|
1979
|
+
}
|
|
1980
|
+
const content = typeof response.content === "string" ? response.content : JSON.stringify(response.content);
|
|
1981
|
+
try {
|
|
1982
|
+
const decision = JSON.parse(content);
|
|
1983
|
+
return {
|
|
1984
|
+
targetAgent: decision.targetAgent,
|
|
1985
|
+
reasoning: decision.reasoning,
|
|
1986
|
+
confidence: decision.confidence,
|
|
1987
|
+
strategy: "llm-based",
|
|
1988
|
+
timestamp: Date.now()
|
|
1989
|
+
};
|
|
1990
|
+
} catch (error) {
|
|
1991
|
+
throw new Error(`Failed to parse routing decision from LLM: ${error}`);
|
|
1992
|
+
}
|
|
1944
1993
|
}
|
|
1994
|
+
throw new Error(`Max tool retries (${maxRetries}) exceeded without routing decision`);
|
|
1945
1995
|
}
|
|
1946
1996
|
};
|
|
1947
1997
|
var roundRobinRouting = {
|
|
@@ -2416,20 +2466,24 @@ Please synthesize these results into a comprehensive response that addresses the
|
|
|
2416
2466
|
|
|
2417
2467
|
// src/multi-agent/agent.ts
|
|
2418
2468
|
var import_langgraph4 = require("@langchain/langgraph");
|
|
2469
|
+
var import_core8 = require("@agentforge/core");
|
|
2419
2470
|
function createMultiAgentSystem(config) {
|
|
2420
2471
|
const {
|
|
2421
2472
|
supervisor,
|
|
2422
2473
|
workers,
|
|
2423
2474
|
aggregator,
|
|
2424
2475
|
maxIterations = 10,
|
|
2425
|
-
verbose = false
|
|
2476
|
+
verbose = false,
|
|
2477
|
+
checkpointer
|
|
2426
2478
|
} = config;
|
|
2427
2479
|
const workflow = new import_langgraph4.StateGraph(MultiAgentState);
|
|
2428
|
-
|
|
2429
|
-
|
|
2430
|
-
|
|
2431
|
-
|
|
2432
|
-
|
|
2480
|
+
let supervisorConfig = { ...supervisor, maxIterations, verbose };
|
|
2481
|
+
if (supervisor.model && supervisor.tools && supervisor.tools.length > 0) {
|
|
2482
|
+
const langchainTools = (0, import_core8.toLangChainTools)(supervisor.tools);
|
|
2483
|
+
const modelWithTools = supervisor.model.bindTools(langchainTools);
|
|
2484
|
+
supervisorConfig.model = modelWithTools;
|
|
2485
|
+
}
|
|
2486
|
+
const supervisorNode = createSupervisorNode(supervisorConfig);
|
|
2433
2487
|
workflow.addNode("supervisor", supervisorNode);
|
|
2434
2488
|
const workerIds = [];
|
|
2435
2489
|
const workerCapabilities = {};
|
|
@@ -2475,7 +2529,7 @@ function createMultiAgentSystem(config) {
|
|
|
2475
2529
|
workflow.addConditionalEdges(workerId, workerRouter, ["supervisor"]);
|
|
2476
2530
|
}
|
|
2477
2531
|
workflow.addConditionalEdges("aggregator", aggregatorRouter, [import_langgraph4.END]);
|
|
2478
|
-
const compiled = workflow.compile();
|
|
2532
|
+
const compiled = workflow.compile(checkpointer ? { checkpointer } : void 0);
|
|
2479
2533
|
const originalInvoke = compiled.invoke.bind(compiled);
|
|
2480
2534
|
compiled.invoke = async function(input, config2) {
|
|
2481
2535
|
const mergedInput = {
|
package/dist/index.d.cts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { z } from 'zod';
|
|
2
2
|
import * as _langchain_langgraph from '@langchain/langgraph';
|
|
3
|
-
import { CompiledStateGraph } from '@langchain/langgraph';
|
|
3
|
+
import { BaseCheckpointSaver, CompiledStateGraph } from '@langchain/langgraph';
|
|
4
4
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
5
5
|
import { ToolRegistry, Tool } from '@agentforge/core';
|
|
6
6
|
|
|
@@ -164,6 +164,23 @@ interface ReActAgentConfig {
|
|
|
164
164
|
* Return true to stop the ReAct loop
|
|
165
165
|
*/
|
|
166
166
|
stopCondition?: (state: ReActStateType) => boolean;
|
|
167
|
+
/**
|
|
168
|
+
* Optional checkpointer for state persistence
|
|
169
|
+
* Required for human-in-the-loop workflows (askHuman tool), interrupts, and conversation continuity
|
|
170
|
+
*
|
|
171
|
+
* @example
|
|
172
|
+
* ```typescript
|
|
173
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
174
|
+
*
|
|
175
|
+
* const checkpointer = new MemorySaver();
|
|
176
|
+
* const agent = createReActAgent({
|
|
177
|
+
* model,
|
|
178
|
+
* tools,
|
|
179
|
+
* checkpointer
|
|
180
|
+
* });
|
|
181
|
+
* ```
|
|
182
|
+
*/
|
|
183
|
+
checkpointer?: BaseCheckpointSaver;
|
|
167
184
|
}
|
|
168
185
|
/**
|
|
169
186
|
* Options for the ReAct agent builder
|
|
@@ -221,6 +238,7 @@ declare const DEFAULT_REACT_SYSTEM_PROMPT = "You are a helpful assistant that us
|
|
|
221
238
|
* @returns A compiled LangGraph StateGraph
|
|
222
239
|
*
|
|
223
240
|
* @example
|
|
241
|
+
* Basic usage:
|
|
224
242
|
* ```typescript
|
|
225
243
|
* import { createReActAgent } from '@agentforge/patterns';
|
|
226
244
|
* import { ChatOpenAI } from '@langchain/openai';
|
|
@@ -236,6 +254,30 @@ declare const DEFAULT_REACT_SYSTEM_PROMPT = "You are a helpful assistant that us
|
|
|
236
254
|
* messages: [{ role: 'user', content: 'What is the weather?' }]
|
|
237
255
|
* });
|
|
238
256
|
* ```
|
|
257
|
+
*
|
|
258
|
+
* @example
|
|
259
|
+
* With checkpointer for human-in-the-loop workflows:
|
|
260
|
+
* ```typescript
|
|
261
|
+
* import { createReActAgent } from '@agentforge/patterns';
|
|
262
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
263
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
264
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
265
|
+
*
|
|
266
|
+
* const checkpointer = new MemorySaver();
|
|
267
|
+
* const askHuman = createAskHumanTool();
|
|
268
|
+
*
|
|
269
|
+
* const agent = createReActAgent({
|
|
270
|
+
* model: new ChatOpenAI({ model: 'gpt-4' }),
|
|
271
|
+
* tools: [askHuman, ...otherTools],
|
|
272
|
+
* checkpointer // Required for askHuman tool
|
|
273
|
+
* });
|
|
274
|
+
*
|
|
275
|
+
* // Invoke with thread_id for conversation continuity
|
|
276
|
+
* const result = await agent.invoke(
|
|
277
|
+
* { messages: [{ role: 'user', content: 'Help me with this task' }] },
|
|
278
|
+
* { configurable: { thread_id: 'conversation-123' } }
|
|
279
|
+
* );
|
|
280
|
+
* ```
|
|
239
281
|
*/
|
|
240
282
|
declare function createReActAgent(config: ReActAgentConfig, options?: ReActBuilderOptions): CompiledStateGraph<any, any>;
|
|
241
283
|
|
|
@@ -887,6 +929,23 @@ interface PlanExecuteAgentConfig {
|
|
|
887
929
|
* Verbose logging
|
|
888
930
|
*/
|
|
889
931
|
verbose?: boolean;
|
|
932
|
+
/**
|
|
933
|
+
* Optional checkpointer for state persistence
|
|
934
|
+
* Required for human-in-the-loop workflows (askHuman tool), interrupts, and conversation continuity
|
|
935
|
+
*
|
|
936
|
+
* @example
|
|
937
|
+
* ```typescript
|
|
938
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
939
|
+
*
|
|
940
|
+
* const checkpointer = new MemorySaver();
|
|
941
|
+
* const agent = createPlanExecuteAgent({
|
|
942
|
+
* planner: { model },
|
|
943
|
+
* executor: { tools },
|
|
944
|
+
* checkpointer
|
|
945
|
+
* });
|
|
946
|
+
* ```
|
|
947
|
+
*/
|
|
948
|
+
checkpointer?: BaseCheckpointSaver;
|
|
890
949
|
}
|
|
891
950
|
/**
|
|
892
951
|
* Node function type for Plan-Execute pattern
|
|
@@ -919,6 +978,7 @@ type PlanExecuteRouter = (state: PlanExecuteStateType) => PlanExecuteRoute;
|
|
|
919
978
|
* @returns A compiled LangGraph StateGraph
|
|
920
979
|
*
|
|
921
980
|
* @example
|
|
981
|
+
* Basic usage:
|
|
922
982
|
* ```typescript
|
|
923
983
|
* import { createPlanExecuteAgent } from '@agentforge/patterns';
|
|
924
984
|
* import { ChatOpenAI } from '@langchain/openai';
|
|
@@ -942,6 +1002,30 @@ type PlanExecuteRouter = (state: PlanExecuteStateType) => PlanExecuteRoute;
|
|
|
942
1002
|
* input: 'Research the latest AI developments and summarize them'
|
|
943
1003
|
* });
|
|
944
1004
|
* ```
|
|
1005
|
+
*
|
|
1006
|
+
* @example
|
|
1007
|
+
* With checkpointer for human-in-the-loop workflows:
|
|
1008
|
+
* ```typescript
|
|
1009
|
+
* import { createPlanExecuteAgent } from '@agentforge/patterns';
|
|
1010
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
1011
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
1012
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
1013
|
+
*
|
|
1014
|
+
* const checkpointer = new MemorySaver();
|
|
1015
|
+
* const askHuman = createAskHumanTool();
|
|
1016
|
+
*
|
|
1017
|
+
* const agent = createPlanExecuteAgent({
|
|
1018
|
+
* planner: { model: new ChatOpenAI({ model: 'gpt-4' }), maxSteps: 5 },
|
|
1019
|
+
* executor: { tools: [askHuman, ...otherTools] },
|
|
1020
|
+
* checkpointer // Required for askHuman tool
|
|
1021
|
+
* });
|
|
1022
|
+
*
|
|
1023
|
+
* // Invoke with thread_id for conversation continuity
|
|
1024
|
+
* const result = await agent.invoke(
|
|
1025
|
+
* { input: 'Help me plan this project' },
|
|
1026
|
+
* { configurable: { thread_id: 'conversation-123' } }
|
|
1027
|
+
* );
|
|
1028
|
+
* ```
|
|
945
1029
|
*/
|
|
946
1030
|
declare function createPlanExecuteAgent(config: PlanExecuteAgentConfig): any;
|
|
947
1031
|
|
|
@@ -1525,6 +1609,24 @@ interface ReflectionAgentConfig {
|
|
|
1525
1609
|
* Whether to include verbose logging
|
|
1526
1610
|
*/
|
|
1527
1611
|
verbose?: boolean;
|
|
1612
|
+
/**
|
|
1613
|
+
* Optional checkpointer for state persistence
|
|
1614
|
+
* Required for human-in-the-loop workflows (askHuman tool), interrupts, and conversation continuity
|
|
1615
|
+
*
|
|
1616
|
+
* @example
|
|
1617
|
+
* ```typescript
|
|
1618
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
1619
|
+
*
|
|
1620
|
+
* const checkpointer = new MemorySaver();
|
|
1621
|
+
* const agent = createReflectionAgent({
|
|
1622
|
+
* generator: { model },
|
|
1623
|
+
* reflector: { model },
|
|
1624
|
+
* reviser: { model },
|
|
1625
|
+
* checkpointer
|
|
1626
|
+
* });
|
|
1627
|
+
* ```
|
|
1628
|
+
*/
|
|
1629
|
+
checkpointer?: BaseCheckpointSaver;
|
|
1528
1630
|
}
|
|
1529
1631
|
/**
|
|
1530
1632
|
* Node function type for reflection pattern
|
|
@@ -1630,6 +1732,7 @@ declare function createFinisherNode(): (state: ReflectionStateType) => Promise<P
|
|
|
1630
1732
|
* @returns A compiled LangGraph StateGraph
|
|
1631
1733
|
*
|
|
1632
1734
|
* @example
|
|
1735
|
+
* Basic usage:
|
|
1633
1736
|
* ```typescript
|
|
1634
1737
|
* import { createReflectionAgent } from '@agentforge/patterns';
|
|
1635
1738
|
* import { ChatOpenAI } from '@langchain/openai';
|
|
@@ -1651,6 +1754,32 @@ declare function createFinisherNode(): (state: ReflectionStateType) => Promise<P
|
|
|
1651
1754
|
* input: 'Write an essay about AI safety'
|
|
1652
1755
|
* });
|
|
1653
1756
|
* ```
|
|
1757
|
+
*
|
|
1758
|
+
* @example
|
|
1759
|
+
* With checkpointer for human-in-the-loop workflows:
|
|
1760
|
+
* ```typescript
|
|
1761
|
+
* import { createReflectionAgent } from '@agentforge/patterns';
|
|
1762
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
1763
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
1764
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
1765
|
+
*
|
|
1766
|
+
* const checkpointer = new MemorySaver();
|
|
1767
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
1768
|
+
*
|
|
1769
|
+
* const agent = createReflectionAgent({
|
|
1770
|
+
* generator: { model },
|
|
1771
|
+
* reflector: { model },
|
|
1772
|
+
* reviser: { model },
|
|
1773
|
+
* maxIterations: 3,
|
|
1774
|
+
* checkpointer // Required for askHuman tool
|
|
1775
|
+
* });
|
|
1776
|
+
*
|
|
1777
|
+
* // Invoke with thread_id for conversation continuity
|
|
1778
|
+
* const result = await agent.invoke(
|
|
1779
|
+
* { input: 'Write a report on this topic' },
|
|
1780
|
+
* { configurable: { thread_id: 'conversation-123' } }
|
|
1781
|
+
* );
|
|
1782
|
+
* ```
|
|
1654
1783
|
*/
|
|
1655
1784
|
declare function createReflectionAgent(config: ReflectionAgentConfig): any;
|
|
1656
1785
|
|
|
@@ -2282,6 +2411,37 @@ interface SupervisorConfig {
|
|
|
2282
2411
|
* Maximum number of routing iterations
|
|
2283
2412
|
*/
|
|
2284
2413
|
maxIterations?: number;
|
|
2414
|
+
/**
|
|
2415
|
+
* Optional tools the supervisor can use during routing
|
|
2416
|
+
*
|
|
2417
|
+
* Enables the supervisor to gather additional information before making routing decisions.
|
|
2418
|
+
* Common use case: askHuman tool for clarifying ambiguous queries.
|
|
2419
|
+
*
|
|
2420
|
+
* Note: Only works with LLM-based routing strategy.
|
|
2421
|
+
*
|
|
2422
|
+
* @example
|
|
2423
|
+
* ```typescript
|
|
2424
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
2425
|
+
*
|
|
2426
|
+
* const system = createMultiAgentSystem({
|
|
2427
|
+
* supervisor: {
|
|
2428
|
+
* strategy: 'llm-based',
|
|
2429
|
+
* model: chatModel,
|
|
2430
|
+
* tools: [createAskHumanTool()],
|
|
2431
|
+
* },
|
|
2432
|
+
* // ...
|
|
2433
|
+
* });
|
|
2434
|
+
* ```
|
|
2435
|
+
*/
|
|
2436
|
+
tools?: Tool<any, any>[];
|
|
2437
|
+
/**
|
|
2438
|
+
* Maximum number of tool call retries before requiring routing decision
|
|
2439
|
+
*
|
|
2440
|
+
* Prevents infinite loops where the supervisor keeps calling tools without making a routing decision.
|
|
2441
|
+
*
|
|
2442
|
+
* @default 3
|
|
2443
|
+
*/
|
|
2444
|
+
maxToolRetries?: number;
|
|
2285
2445
|
}
|
|
2286
2446
|
/**
|
|
2287
2447
|
* Configuration for a worker agent node
|
|
@@ -2387,6 +2547,23 @@ interface MultiAgentSystemConfig {
|
|
|
2387
2547
|
* Whether to include verbose logging
|
|
2388
2548
|
*/
|
|
2389
2549
|
verbose?: boolean;
|
|
2550
|
+
/**
|
|
2551
|
+
* Optional checkpointer for state persistence
|
|
2552
|
+
* Required for human-in-the-loop workflows (askHuman tool), interrupts, and conversation continuity
|
|
2553
|
+
*
|
|
2554
|
+
* @example
|
|
2555
|
+
* ```typescript
|
|
2556
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
2557
|
+
*
|
|
2558
|
+
* const checkpointer = new MemorySaver();
|
|
2559
|
+
* const system = createMultiAgentSystem({
|
|
2560
|
+
* supervisor: { strategy: 'skill-based', model },
|
|
2561
|
+
* workers: [...],
|
|
2562
|
+
* checkpointer
|
|
2563
|
+
* });
|
|
2564
|
+
* ```
|
|
2565
|
+
*/
|
|
2566
|
+
checkpointer?: BaseCheckpointSaver;
|
|
2390
2567
|
}
|
|
2391
2568
|
/**
|
|
2392
2569
|
* Node type for multi-agent graph
|
|
@@ -2430,6 +2607,8 @@ declare const DEFAULT_SUPERVISOR_SYSTEM_PROMPT = "You are a supervisor agent res
|
|
|
2430
2607
|
/**
|
|
2431
2608
|
* LLM-based routing strategy
|
|
2432
2609
|
* Uses an LLM to intelligently route tasks based on worker capabilities
|
|
2610
|
+
*
|
|
2611
|
+
* Supports tool calls (e.g., askHuman) for gathering additional information before routing.
|
|
2433
2612
|
*/
|
|
2434
2613
|
declare const llmBasedRouting: RoutingStrategyImpl;
|
|
2435
2614
|
/**
|
|
@@ -2494,6 +2673,7 @@ declare function createAggregatorNode(config?: AggregatorConfig): (state: MultiA
|
|
|
2494
2673
|
* @returns Compiled LangGraph workflow
|
|
2495
2674
|
*
|
|
2496
2675
|
* @example
|
|
2676
|
+
* Basic usage:
|
|
2497
2677
|
* ```typescript
|
|
2498
2678
|
* const system = createMultiAgentSystem({
|
|
2499
2679
|
* supervisor: {
|
|
@@ -2531,6 +2711,39 @@ declare function createAggregatorNode(config?: AggregatorConfig): (state: MultiA
|
|
|
2531
2711
|
* input: 'Research AI trends and write a summary',
|
|
2532
2712
|
* });
|
|
2533
2713
|
* ```
|
|
2714
|
+
*
|
|
2715
|
+
* @example
|
|
2716
|
+
* With checkpointer for human-in-the-loop workflows:
|
|
2717
|
+
* ```typescript
|
|
2718
|
+
* import { createMultiAgentSystem } from '@agentforge/patterns';
|
|
2719
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
2720
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
2721
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
2722
|
+
*
|
|
2723
|
+
* const checkpointer = new MemorySaver();
|
|
2724
|
+
* const askHuman = createAskHumanTool();
|
|
2725
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
2726
|
+
*
|
|
2727
|
+
* const system = createMultiAgentSystem({
|
|
2728
|
+
* supervisor: { strategy: 'skill-based', model },
|
|
2729
|
+
* workers: [
|
|
2730
|
+
* {
|
|
2731
|
+
* id: 'hr',
|
|
2732
|
+
* capabilities: { skills: ['hr'], tools: ['askHuman'], available: true, currentWorkload: 0 },
|
|
2733
|
+
* tools: [askHuman],
|
|
2734
|
+
* model,
|
|
2735
|
+
* },
|
|
2736
|
+
* ],
|
|
2737
|
+
* aggregator: { model },
|
|
2738
|
+
* checkpointer // Required for askHuman tool
|
|
2739
|
+
* });
|
|
2740
|
+
*
|
|
2741
|
+
* // Invoke with thread_id for conversation continuity
|
|
2742
|
+
* const result = await system.invoke(
|
|
2743
|
+
* { input: 'Help me with HR policy question' },
|
|
2744
|
+
* { configurable: { thread_id: 'conversation-123' } }
|
|
2745
|
+
* );
|
|
2746
|
+
* ```
|
|
2534
2747
|
*/
|
|
2535
2748
|
declare function createMultiAgentSystem(config: MultiAgentSystemConfig): CompiledStateGraph<{
|
|
2536
2749
|
[x: string]: unknown;
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { z } from 'zod';
|
|
2
2
|
import * as _langchain_langgraph from '@langchain/langgraph';
|
|
3
|
-
import { CompiledStateGraph } from '@langchain/langgraph';
|
|
3
|
+
import { BaseCheckpointSaver, CompiledStateGraph } from '@langchain/langgraph';
|
|
4
4
|
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
5
5
|
import { ToolRegistry, Tool } from '@agentforge/core';
|
|
6
6
|
|
|
@@ -164,6 +164,23 @@ interface ReActAgentConfig {
|
|
|
164
164
|
* Return true to stop the ReAct loop
|
|
165
165
|
*/
|
|
166
166
|
stopCondition?: (state: ReActStateType) => boolean;
|
|
167
|
+
/**
|
|
168
|
+
* Optional checkpointer for state persistence
|
|
169
|
+
* Required for human-in-the-loop workflows (askHuman tool), interrupts, and conversation continuity
|
|
170
|
+
*
|
|
171
|
+
* @example
|
|
172
|
+
* ```typescript
|
|
173
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
174
|
+
*
|
|
175
|
+
* const checkpointer = new MemorySaver();
|
|
176
|
+
* const agent = createReActAgent({
|
|
177
|
+
* model,
|
|
178
|
+
* tools,
|
|
179
|
+
* checkpointer
|
|
180
|
+
* });
|
|
181
|
+
* ```
|
|
182
|
+
*/
|
|
183
|
+
checkpointer?: BaseCheckpointSaver;
|
|
167
184
|
}
|
|
168
185
|
/**
|
|
169
186
|
* Options for the ReAct agent builder
|
|
@@ -221,6 +238,7 @@ declare const DEFAULT_REACT_SYSTEM_PROMPT = "You are a helpful assistant that us
|
|
|
221
238
|
* @returns A compiled LangGraph StateGraph
|
|
222
239
|
*
|
|
223
240
|
* @example
|
|
241
|
+
* Basic usage:
|
|
224
242
|
* ```typescript
|
|
225
243
|
* import { createReActAgent } from '@agentforge/patterns';
|
|
226
244
|
* import { ChatOpenAI } from '@langchain/openai';
|
|
@@ -236,6 +254,30 @@ declare const DEFAULT_REACT_SYSTEM_PROMPT = "You are a helpful assistant that us
|
|
|
236
254
|
* messages: [{ role: 'user', content: 'What is the weather?' }]
|
|
237
255
|
* });
|
|
238
256
|
* ```
|
|
257
|
+
*
|
|
258
|
+
* @example
|
|
259
|
+
* With checkpointer for human-in-the-loop workflows:
|
|
260
|
+
* ```typescript
|
|
261
|
+
* import { createReActAgent } from '@agentforge/patterns';
|
|
262
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
263
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
264
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
265
|
+
*
|
|
266
|
+
* const checkpointer = new MemorySaver();
|
|
267
|
+
* const askHuman = createAskHumanTool();
|
|
268
|
+
*
|
|
269
|
+
* const agent = createReActAgent({
|
|
270
|
+
* model: new ChatOpenAI({ model: 'gpt-4' }),
|
|
271
|
+
* tools: [askHuman, ...otherTools],
|
|
272
|
+
* checkpointer // Required for askHuman tool
|
|
273
|
+
* });
|
|
274
|
+
*
|
|
275
|
+
* // Invoke with thread_id for conversation continuity
|
|
276
|
+
* const result = await agent.invoke(
|
|
277
|
+
* { messages: [{ role: 'user', content: 'Help me with this task' }] },
|
|
278
|
+
* { configurable: { thread_id: 'conversation-123' } }
|
|
279
|
+
* );
|
|
280
|
+
* ```
|
|
239
281
|
*/
|
|
240
282
|
declare function createReActAgent(config: ReActAgentConfig, options?: ReActBuilderOptions): CompiledStateGraph<any, any>;
|
|
241
283
|
|
|
@@ -887,6 +929,23 @@ interface PlanExecuteAgentConfig {
|
|
|
887
929
|
* Verbose logging
|
|
888
930
|
*/
|
|
889
931
|
verbose?: boolean;
|
|
932
|
+
/**
|
|
933
|
+
* Optional checkpointer for state persistence
|
|
934
|
+
* Required for human-in-the-loop workflows (askHuman tool), interrupts, and conversation continuity
|
|
935
|
+
*
|
|
936
|
+
* @example
|
|
937
|
+
* ```typescript
|
|
938
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
939
|
+
*
|
|
940
|
+
* const checkpointer = new MemorySaver();
|
|
941
|
+
* const agent = createPlanExecuteAgent({
|
|
942
|
+
* planner: { model },
|
|
943
|
+
* executor: { tools },
|
|
944
|
+
* checkpointer
|
|
945
|
+
* });
|
|
946
|
+
* ```
|
|
947
|
+
*/
|
|
948
|
+
checkpointer?: BaseCheckpointSaver;
|
|
890
949
|
}
|
|
891
950
|
/**
|
|
892
951
|
* Node function type for Plan-Execute pattern
|
|
@@ -919,6 +978,7 @@ type PlanExecuteRouter = (state: PlanExecuteStateType) => PlanExecuteRoute;
|
|
|
919
978
|
* @returns A compiled LangGraph StateGraph
|
|
920
979
|
*
|
|
921
980
|
* @example
|
|
981
|
+
* Basic usage:
|
|
922
982
|
* ```typescript
|
|
923
983
|
* import { createPlanExecuteAgent } from '@agentforge/patterns';
|
|
924
984
|
* import { ChatOpenAI } from '@langchain/openai';
|
|
@@ -942,6 +1002,30 @@ type PlanExecuteRouter = (state: PlanExecuteStateType) => PlanExecuteRoute;
|
|
|
942
1002
|
* input: 'Research the latest AI developments and summarize them'
|
|
943
1003
|
* });
|
|
944
1004
|
* ```
|
|
1005
|
+
*
|
|
1006
|
+
* @example
|
|
1007
|
+
* With checkpointer for human-in-the-loop workflows:
|
|
1008
|
+
* ```typescript
|
|
1009
|
+
* import { createPlanExecuteAgent } from '@agentforge/patterns';
|
|
1010
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
1011
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
1012
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
1013
|
+
*
|
|
1014
|
+
* const checkpointer = new MemorySaver();
|
|
1015
|
+
* const askHuman = createAskHumanTool();
|
|
1016
|
+
*
|
|
1017
|
+
* const agent = createPlanExecuteAgent({
|
|
1018
|
+
* planner: { model: new ChatOpenAI({ model: 'gpt-4' }), maxSteps: 5 },
|
|
1019
|
+
* executor: { tools: [askHuman, ...otherTools] },
|
|
1020
|
+
* checkpointer // Required for askHuman tool
|
|
1021
|
+
* });
|
|
1022
|
+
*
|
|
1023
|
+
* // Invoke with thread_id for conversation continuity
|
|
1024
|
+
* const result = await agent.invoke(
|
|
1025
|
+
* { input: 'Help me plan this project' },
|
|
1026
|
+
* { configurable: { thread_id: 'conversation-123' } }
|
|
1027
|
+
* );
|
|
1028
|
+
* ```
|
|
945
1029
|
*/
|
|
946
1030
|
declare function createPlanExecuteAgent(config: PlanExecuteAgentConfig): any;
|
|
947
1031
|
|
|
@@ -1525,6 +1609,24 @@ interface ReflectionAgentConfig {
|
|
|
1525
1609
|
* Whether to include verbose logging
|
|
1526
1610
|
*/
|
|
1527
1611
|
verbose?: boolean;
|
|
1612
|
+
/**
|
|
1613
|
+
* Optional checkpointer for state persistence
|
|
1614
|
+
* Required for human-in-the-loop workflows (askHuman tool), interrupts, and conversation continuity
|
|
1615
|
+
*
|
|
1616
|
+
* @example
|
|
1617
|
+
* ```typescript
|
|
1618
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
1619
|
+
*
|
|
1620
|
+
* const checkpointer = new MemorySaver();
|
|
1621
|
+
* const agent = createReflectionAgent({
|
|
1622
|
+
* generator: { model },
|
|
1623
|
+
* reflector: { model },
|
|
1624
|
+
* reviser: { model },
|
|
1625
|
+
* checkpointer
|
|
1626
|
+
* });
|
|
1627
|
+
* ```
|
|
1628
|
+
*/
|
|
1629
|
+
checkpointer?: BaseCheckpointSaver;
|
|
1528
1630
|
}
|
|
1529
1631
|
/**
|
|
1530
1632
|
* Node function type for reflection pattern
|
|
@@ -1630,6 +1732,7 @@ declare function createFinisherNode(): (state: ReflectionStateType) => Promise<P
|
|
|
1630
1732
|
* @returns A compiled LangGraph StateGraph
|
|
1631
1733
|
*
|
|
1632
1734
|
* @example
|
|
1735
|
+
* Basic usage:
|
|
1633
1736
|
* ```typescript
|
|
1634
1737
|
* import { createReflectionAgent } from '@agentforge/patterns';
|
|
1635
1738
|
* import { ChatOpenAI } from '@langchain/openai';
|
|
@@ -1651,6 +1754,32 @@ declare function createFinisherNode(): (state: ReflectionStateType) => Promise<P
|
|
|
1651
1754
|
* input: 'Write an essay about AI safety'
|
|
1652
1755
|
* });
|
|
1653
1756
|
* ```
|
|
1757
|
+
*
|
|
1758
|
+
* @example
|
|
1759
|
+
* With checkpointer for human-in-the-loop workflows:
|
|
1760
|
+
* ```typescript
|
|
1761
|
+
* import { createReflectionAgent } from '@agentforge/patterns';
|
|
1762
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
1763
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
1764
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
1765
|
+
*
|
|
1766
|
+
* const checkpointer = new MemorySaver();
|
|
1767
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
1768
|
+
*
|
|
1769
|
+
* const agent = createReflectionAgent({
|
|
1770
|
+
* generator: { model },
|
|
1771
|
+
* reflector: { model },
|
|
1772
|
+
* reviser: { model },
|
|
1773
|
+
* maxIterations: 3,
|
|
1774
|
+
* checkpointer // Required for askHuman tool
|
|
1775
|
+
* });
|
|
1776
|
+
*
|
|
1777
|
+
* // Invoke with thread_id for conversation continuity
|
|
1778
|
+
* const result = await agent.invoke(
|
|
1779
|
+
* { input: 'Write a report on this topic' },
|
|
1780
|
+
* { configurable: { thread_id: 'conversation-123' } }
|
|
1781
|
+
* );
|
|
1782
|
+
* ```
|
|
1654
1783
|
*/
|
|
1655
1784
|
declare function createReflectionAgent(config: ReflectionAgentConfig): any;
|
|
1656
1785
|
|
|
@@ -2282,6 +2411,37 @@ interface SupervisorConfig {
|
|
|
2282
2411
|
* Maximum number of routing iterations
|
|
2283
2412
|
*/
|
|
2284
2413
|
maxIterations?: number;
|
|
2414
|
+
/**
|
|
2415
|
+
* Optional tools the supervisor can use during routing
|
|
2416
|
+
*
|
|
2417
|
+
* Enables the supervisor to gather additional information before making routing decisions.
|
|
2418
|
+
* Common use case: askHuman tool for clarifying ambiguous queries.
|
|
2419
|
+
*
|
|
2420
|
+
* Note: Only works with LLM-based routing strategy.
|
|
2421
|
+
*
|
|
2422
|
+
* @example
|
|
2423
|
+
* ```typescript
|
|
2424
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
2425
|
+
*
|
|
2426
|
+
* const system = createMultiAgentSystem({
|
|
2427
|
+
* supervisor: {
|
|
2428
|
+
* strategy: 'llm-based',
|
|
2429
|
+
* model: chatModel,
|
|
2430
|
+
* tools: [createAskHumanTool()],
|
|
2431
|
+
* },
|
|
2432
|
+
* // ...
|
|
2433
|
+
* });
|
|
2434
|
+
* ```
|
|
2435
|
+
*/
|
|
2436
|
+
tools?: Tool<any, any>[];
|
|
2437
|
+
/**
|
|
2438
|
+
* Maximum number of tool call retries before requiring routing decision
|
|
2439
|
+
*
|
|
2440
|
+
* Prevents infinite loops where the supervisor keeps calling tools without making a routing decision.
|
|
2441
|
+
*
|
|
2442
|
+
* @default 3
|
|
2443
|
+
*/
|
|
2444
|
+
maxToolRetries?: number;
|
|
2285
2445
|
}
|
|
2286
2446
|
/**
|
|
2287
2447
|
* Configuration for a worker agent node
|
|
@@ -2387,6 +2547,23 @@ interface MultiAgentSystemConfig {
|
|
|
2387
2547
|
* Whether to include verbose logging
|
|
2388
2548
|
*/
|
|
2389
2549
|
verbose?: boolean;
|
|
2550
|
+
/**
|
|
2551
|
+
* Optional checkpointer for state persistence
|
|
2552
|
+
* Required for human-in-the-loop workflows (askHuman tool), interrupts, and conversation continuity
|
|
2553
|
+
*
|
|
2554
|
+
* @example
|
|
2555
|
+
* ```typescript
|
|
2556
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
2557
|
+
*
|
|
2558
|
+
* const checkpointer = new MemorySaver();
|
|
2559
|
+
* const system = createMultiAgentSystem({
|
|
2560
|
+
* supervisor: { strategy: 'skill-based', model },
|
|
2561
|
+
* workers: [...],
|
|
2562
|
+
* checkpointer
|
|
2563
|
+
* });
|
|
2564
|
+
* ```
|
|
2565
|
+
*/
|
|
2566
|
+
checkpointer?: BaseCheckpointSaver;
|
|
2390
2567
|
}
|
|
2391
2568
|
/**
|
|
2392
2569
|
* Node type for multi-agent graph
|
|
@@ -2430,6 +2607,8 @@ declare const DEFAULT_SUPERVISOR_SYSTEM_PROMPT = "You are a supervisor agent res
|
|
|
2430
2607
|
/**
|
|
2431
2608
|
* LLM-based routing strategy
|
|
2432
2609
|
* Uses an LLM to intelligently route tasks based on worker capabilities
|
|
2610
|
+
*
|
|
2611
|
+
* Supports tool calls (e.g., askHuman) for gathering additional information before routing.
|
|
2433
2612
|
*/
|
|
2434
2613
|
declare const llmBasedRouting: RoutingStrategyImpl;
|
|
2435
2614
|
/**
|
|
@@ -2494,6 +2673,7 @@ declare function createAggregatorNode(config?: AggregatorConfig): (state: MultiA
|
|
|
2494
2673
|
* @returns Compiled LangGraph workflow
|
|
2495
2674
|
*
|
|
2496
2675
|
* @example
|
|
2676
|
+
* Basic usage:
|
|
2497
2677
|
* ```typescript
|
|
2498
2678
|
* const system = createMultiAgentSystem({
|
|
2499
2679
|
* supervisor: {
|
|
@@ -2531,6 +2711,39 @@ declare function createAggregatorNode(config?: AggregatorConfig): (state: MultiA
|
|
|
2531
2711
|
* input: 'Research AI trends and write a summary',
|
|
2532
2712
|
* });
|
|
2533
2713
|
* ```
|
|
2714
|
+
*
|
|
2715
|
+
* @example
|
|
2716
|
+
* With checkpointer for human-in-the-loop workflows:
|
|
2717
|
+
* ```typescript
|
|
2718
|
+
* import { createMultiAgentSystem } from '@agentforge/patterns';
|
|
2719
|
+
* import { createAskHumanTool } from '@agentforge/tools';
|
|
2720
|
+
* import { MemorySaver } from '@langchain/langgraph';
|
|
2721
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
2722
|
+
*
|
|
2723
|
+
* const checkpointer = new MemorySaver();
|
|
2724
|
+
* const askHuman = createAskHumanTool();
|
|
2725
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
2726
|
+
*
|
|
2727
|
+
* const system = createMultiAgentSystem({
|
|
2728
|
+
* supervisor: { strategy: 'skill-based', model },
|
|
2729
|
+
* workers: [
|
|
2730
|
+
* {
|
|
2731
|
+
* id: 'hr',
|
|
2732
|
+
* capabilities: { skills: ['hr'], tools: ['askHuman'], available: true, currentWorkload: 0 },
|
|
2733
|
+
* tools: [askHuman],
|
|
2734
|
+
* model,
|
|
2735
|
+
* },
|
|
2736
|
+
* ],
|
|
2737
|
+
* aggregator: { model },
|
|
2738
|
+
* checkpointer // Required for askHuman tool
|
|
2739
|
+
* });
|
|
2740
|
+
*
|
|
2741
|
+
* // Invoke with thread_id for conversation continuity
|
|
2742
|
+
* const result = await system.invoke(
|
|
2743
|
+
* { input: 'Help me with HR policy question' },
|
|
2744
|
+
* { configurable: { thread_id: 'conversation-123' } }
|
|
2745
|
+
* );
|
|
2746
|
+
* ```
|
|
2534
2747
|
*/
|
|
2535
2748
|
declare function createMultiAgentSystem(config: MultiAgentSystemConfig): CompiledStateGraph<{
|
|
2536
2749
|
[x: string]: unknown;
|
package/dist/index.js
CHANGED
|
@@ -295,7 +295,8 @@ function createReActAgent(config, options) {
|
|
|
295
295
|
systemPrompt = DEFAULT_REACT_SYSTEM_PROMPT,
|
|
296
296
|
maxIterations = 10,
|
|
297
297
|
returnIntermediateSteps = false,
|
|
298
|
-
stopCondition
|
|
298
|
+
stopCondition,
|
|
299
|
+
checkpointer
|
|
299
300
|
} = config;
|
|
300
301
|
const {
|
|
301
302
|
verbose = false,
|
|
@@ -330,7 +331,7 @@ function createReActAgent(config, options) {
|
|
|
330
331
|
return ACTION_NODE;
|
|
331
332
|
};
|
|
332
333
|
const workflow = new StateGraph(ReActState).addNode(REASONING_NODE, reasoningNode).addNode(ACTION_NODE, actionNode).addNode(OBSERVATION_NODE, observationNode).addEdge("__start__", REASONING_NODE).addConditionalEdges(REASONING_NODE, shouldContinue).addEdge(ACTION_NODE, OBSERVATION_NODE).addEdge(OBSERVATION_NODE, REASONING_NODE);
|
|
333
|
-
return workflow.compile();
|
|
334
|
+
return workflow.compile(checkpointer ? { checkpointer } : void 0);
|
|
334
335
|
}
|
|
335
336
|
|
|
336
337
|
// src/react/builder.ts
|
|
@@ -871,7 +872,8 @@ function createPlanExecuteAgent(config) {
|
|
|
871
872
|
executor,
|
|
872
873
|
replanner,
|
|
873
874
|
maxIterations = 5,
|
|
874
|
-
verbose = false
|
|
875
|
+
verbose = false,
|
|
876
|
+
checkpointer
|
|
875
877
|
} = config;
|
|
876
878
|
const plannerNode = createPlannerNode(planner);
|
|
877
879
|
const executorNode = createExecutorNode(executor);
|
|
@@ -931,7 +933,7 @@ function createPlanExecuteAgent(config) {
|
|
|
931
933
|
finish: END2
|
|
932
934
|
}
|
|
933
935
|
);
|
|
934
|
-
return workflow.compile();
|
|
936
|
+
return workflow.compile(checkpointer ? { checkpointer } : void 0);
|
|
935
937
|
}
|
|
936
938
|
|
|
937
939
|
// src/reflection/state.ts
|
|
@@ -1408,7 +1410,8 @@ function createReflectionAgent(config) {
|
|
|
1408
1410
|
reviser,
|
|
1409
1411
|
maxIterations = 3,
|
|
1410
1412
|
qualityCriteria,
|
|
1411
|
-
verbose = false
|
|
1413
|
+
verbose = false,
|
|
1414
|
+
checkpointer
|
|
1412
1415
|
} = config;
|
|
1413
1416
|
const generatorNode = createGeneratorNode({ ...generator, verbose });
|
|
1414
1417
|
const reflectorNode = createReflectorNode({ ...reflector, qualityCriteria, verbose });
|
|
@@ -1466,7 +1469,7 @@ function createReflectionAgent(config) {
|
|
|
1466
1469
|
error: END3
|
|
1467
1470
|
}
|
|
1468
1471
|
).addEdge("finisher", END3);
|
|
1469
|
-
return workflow.compile();
|
|
1472
|
+
return workflow.compile(checkpointer ? { checkpointer } : void 0);
|
|
1470
1473
|
}
|
|
1471
1474
|
|
|
1472
1475
|
// src/multi-agent/state.ts
|
|
@@ -1788,7 +1791,34 @@ var MultiAgentStateConfig = {
|
|
|
1788
1791
|
var MultiAgentState = createStateAnnotation4(MultiAgentStateConfig);
|
|
1789
1792
|
|
|
1790
1793
|
// src/multi-agent/routing.ts
|
|
1791
|
-
import { HumanMessage as HumanMessage4, SystemMessage as SystemMessage4 } from "@langchain/core/messages";
|
|
1794
|
+
import { HumanMessage as HumanMessage4, SystemMessage as SystemMessage4, AIMessage as AIMessage2, ToolMessage as ToolMessage2 } from "@langchain/core/messages";
|
|
1795
|
+
async function executeTools(toolCalls, tools) {
|
|
1796
|
+
const results = [];
|
|
1797
|
+
for (const toolCall of toolCalls) {
|
|
1798
|
+
const tool = tools.find((t) => t.metadata.name === toolCall.name);
|
|
1799
|
+
if (!tool) {
|
|
1800
|
+
results.push(new ToolMessage2({
|
|
1801
|
+
content: `Error: Tool '${toolCall.name}' not found`,
|
|
1802
|
+
tool_call_id: toolCall.id
|
|
1803
|
+
}));
|
|
1804
|
+
continue;
|
|
1805
|
+
}
|
|
1806
|
+
try {
|
|
1807
|
+
const result = await tool.execute(toolCall.args);
|
|
1808
|
+
const content = typeof result === "string" ? result : JSON.stringify(result);
|
|
1809
|
+
results.push(new ToolMessage2({
|
|
1810
|
+
content,
|
|
1811
|
+
tool_call_id: toolCall.id
|
|
1812
|
+
}));
|
|
1813
|
+
} catch (error) {
|
|
1814
|
+
results.push(new ToolMessage2({
|
|
1815
|
+
content: `Error executing tool: ${error.message}`,
|
|
1816
|
+
tool_call_id: toolCall.id
|
|
1817
|
+
}));
|
|
1818
|
+
}
|
|
1819
|
+
}
|
|
1820
|
+
return results;
|
|
1821
|
+
}
|
|
1792
1822
|
var DEFAULT_SUPERVISOR_SYSTEM_PROMPT = `You are a supervisor agent responsible for routing tasks to specialized worker agents.
|
|
1793
1823
|
|
|
1794
1824
|
Your job is to:
|
|
@@ -1811,11 +1841,13 @@ var llmBasedRouting = {
|
|
|
1811
1841
|
throw new Error("LLM-based routing requires a model to be configured");
|
|
1812
1842
|
}
|
|
1813
1843
|
const systemPrompt = config.systemPrompt || DEFAULT_SUPERVISOR_SYSTEM_PROMPT;
|
|
1844
|
+
const maxRetries = config.maxToolRetries || 3;
|
|
1845
|
+
const tools = config.tools || [];
|
|
1814
1846
|
const workerInfo = Object.entries(state.workers).map(([id, caps]) => {
|
|
1815
1847
|
const skills = caps.skills.join(", ");
|
|
1816
|
-
const
|
|
1848
|
+
const tools2 = caps.tools.join(", ");
|
|
1817
1849
|
const available = caps.available ? "available" : "busy";
|
|
1818
|
-
return `- ${id}: Skills: [${skills}], Tools: [${
|
|
1850
|
+
return `- ${id}: Skills: [${skills}], Tools: [${tools2}], Status: ${available}, Workload: ${caps.currentWorkload}`;
|
|
1819
1851
|
}).join("\n");
|
|
1820
1852
|
const lastMessage = state.messages[state.messages.length - 1];
|
|
1821
1853
|
const taskContext = lastMessage?.content || state.input;
|
|
@@ -1825,24 +1857,42 @@ Available workers:
|
|
|
1825
1857
|
${workerInfo}
|
|
1826
1858
|
|
|
1827
1859
|
Select the best worker for this task and explain your reasoning.`;
|
|
1828
|
-
const
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
|
|
1860
|
+
const conversationHistory = [];
|
|
1861
|
+
let attempt = 0;
|
|
1862
|
+
while (attempt < maxRetries) {
|
|
1863
|
+
const messages = [
|
|
1864
|
+
new SystemMessage4(systemPrompt),
|
|
1865
|
+
new HumanMessage4(userPrompt),
|
|
1866
|
+
...conversationHistory
|
|
1867
|
+
];
|
|
1868
|
+
const response = await config.model.invoke(messages);
|
|
1869
|
+
if (response.tool_calls && response.tool_calls.length > 0) {
|
|
1870
|
+
if (tools.length === 0) {
|
|
1871
|
+
throw new Error("LLM requested tool calls but no tools are configured");
|
|
1872
|
+
}
|
|
1873
|
+
const toolResults = await executeTools(response.tool_calls, tools);
|
|
1874
|
+
conversationHistory.push(
|
|
1875
|
+
new AIMessage2({ content: response.content || "", tool_calls: response.tool_calls }),
|
|
1876
|
+
...toolResults
|
|
1877
|
+
);
|
|
1878
|
+
attempt++;
|
|
1879
|
+
continue;
|
|
1880
|
+
}
|
|
1881
|
+
const content = typeof response.content === "string" ? response.content : JSON.stringify(response.content);
|
|
1882
|
+
try {
|
|
1883
|
+
const decision = JSON.parse(content);
|
|
1884
|
+
return {
|
|
1885
|
+
targetAgent: decision.targetAgent,
|
|
1886
|
+
reasoning: decision.reasoning,
|
|
1887
|
+
confidence: decision.confidence,
|
|
1888
|
+
strategy: "llm-based",
|
|
1889
|
+
timestamp: Date.now()
|
|
1890
|
+
};
|
|
1891
|
+
} catch (error) {
|
|
1892
|
+
throw new Error(`Failed to parse routing decision from LLM: ${error}`);
|
|
1893
|
+
}
|
|
1845
1894
|
}
|
|
1895
|
+
throw new Error(`Max tool retries (${maxRetries}) exceeded without routing decision`);
|
|
1846
1896
|
}
|
|
1847
1897
|
};
|
|
1848
1898
|
var roundRobinRouting = {
|
|
@@ -2317,20 +2367,24 @@ Please synthesize these results into a comprehensive response that addresses the
|
|
|
2317
2367
|
|
|
2318
2368
|
// src/multi-agent/agent.ts
|
|
2319
2369
|
import { StateGraph as StateGraph4, END as END4 } from "@langchain/langgraph";
|
|
2370
|
+
import { toLangChainTools as toLangChainTools3 } from "@agentforge/core";
|
|
2320
2371
|
function createMultiAgentSystem(config) {
|
|
2321
2372
|
const {
|
|
2322
2373
|
supervisor,
|
|
2323
2374
|
workers,
|
|
2324
2375
|
aggregator,
|
|
2325
2376
|
maxIterations = 10,
|
|
2326
|
-
verbose = false
|
|
2377
|
+
verbose = false,
|
|
2378
|
+
checkpointer
|
|
2327
2379
|
} = config;
|
|
2328
2380
|
const workflow = new StateGraph4(MultiAgentState);
|
|
2329
|
-
|
|
2330
|
-
|
|
2331
|
-
|
|
2332
|
-
|
|
2333
|
-
|
|
2381
|
+
let supervisorConfig = { ...supervisor, maxIterations, verbose };
|
|
2382
|
+
if (supervisor.model && supervisor.tools && supervisor.tools.length > 0) {
|
|
2383
|
+
const langchainTools = toLangChainTools3(supervisor.tools);
|
|
2384
|
+
const modelWithTools = supervisor.model.bindTools(langchainTools);
|
|
2385
|
+
supervisorConfig.model = modelWithTools;
|
|
2386
|
+
}
|
|
2387
|
+
const supervisorNode = createSupervisorNode(supervisorConfig);
|
|
2334
2388
|
workflow.addNode("supervisor", supervisorNode);
|
|
2335
2389
|
const workerIds = [];
|
|
2336
2390
|
const workerCapabilities = {};
|
|
@@ -2376,7 +2430,7 @@ function createMultiAgentSystem(config) {
|
|
|
2376
2430
|
workflow.addConditionalEdges(workerId, workerRouter, ["supervisor"]);
|
|
2377
2431
|
}
|
|
2378
2432
|
workflow.addConditionalEdges("aggregator", aggregatorRouter, [END4]);
|
|
2379
|
-
const compiled = workflow.compile();
|
|
2433
|
+
const compiled = workflow.compile(checkpointer ? { checkpointer } : void 0);
|
|
2380
2434
|
const originalInvoke = compiled.invoke.bind(compiled);
|
|
2381
2435
|
compiled.invoke = async function(input, config2) {
|
|
2382
2436
|
const mergedInput = {
|