@librechat/agents 2.4.322 → 3.0.0-rc2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/agents/AgentContext.cjs +218 -0
- package/dist/cjs/agents/AgentContext.cjs.map +1 -0
- package/dist/cjs/common/enum.cjs +14 -5
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/events.cjs +10 -6
- package/dist/cjs/events.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +309 -212
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs +422 -0
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/index.cjs +54 -9
- package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +52 -6
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +22 -2
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
- package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
- package/dist/cjs/llm/google/index.cjs +144 -0
- package/dist/cjs/llm/google/index.cjs.map +1 -0
- package/dist/cjs/llm/google/utils/common.cjs +477 -0
- package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
- package/dist/cjs/llm/ollama/index.cjs +67 -0
- package/dist/cjs/llm/ollama/index.cjs.map +1 -0
- package/dist/cjs/llm/ollama/utils.cjs +158 -0
- package/dist/cjs/llm/ollama/utils.cjs.map +1 -0
- package/dist/cjs/llm/openai/index.cjs +389 -3
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/openai/utils/index.cjs +672 -0
- package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
- package/dist/cjs/llm/providers.cjs +15 -15
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/cjs/llm/text.cjs +14 -3
- package/dist/cjs/llm/text.cjs.map +1 -1
- package/dist/cjs/llm/vertexai/index.cjs +330 -0
- package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
- package/dist/cjs/main.cjs +11 -0
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/run.cjs +120 -81
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/stream.cjs +85 -51
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs +10 -4
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/cjs/tools/handlers.cjs +119 -13
- package/dist/cjs/tools/handlers.cjs.map +1 -1
- package/dist/cjs/tools/search/anthropic.cjs +40 -0
- package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
- package/dist/cjs/tools/search/firecrawl.cjs +55 -9
- package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
- package/dist/cjs/tools/search/format.cjs +6 -6
- package/dist/cjs/tools/search/format.cjs.map +1 -1
- package/dist/cjs/tools/search/rerankers.cjs +7 -29
- package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
- package/dist/cjs/tools/search/search.cjs +86 -16
- package/dist/cjs/tools/search/search.cjs.map +1 -1
- package/dist/cjs/tools/search/tool.cjs +4 -2
- package/dist/cjs/tools/search/tool.cjs.map +1 -1
- package/dist/cjs/tools/search/utils.cjs +1 -1
- package/dist/cjs/tools/search/utils.cjs.map +1 -1
- package/dist/cjs/utils/events.cjs +31 -0
- package/dist/cjs/utils/events.cjs.map +1 -0
- package/dist/cjs/utils/title.cjs +57 -21
- package/dist/cjs/utils/title.cjs.map +1 -1
- package/dist/cjs/utils/tokens.cjs +54 -7
- package/dist/cjs/utils/tokens.cjs.map +1 -1
- package/dist/esm/agents/AgentContext.mjs +216 -0
- package/dist/esm/agents/AgentContext.mjs.map +1 -0
- package/dist/esm/common/enum.mjs +15 -6
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/events.mjs +10 -6
- package/dist/esm/events.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +311 -214
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs +420 -0
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
- package/dist/esm/llm/anthropic/index.mjs +54 -9
- package/dist/esm/llm/anthropic/index.mjs.map +1 -1
- package/dist/esm/llm/anthropic/types.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +52 -6
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs +22 -2
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
- package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
- package/dist/esm/llm/google/index.mjs +142 -0
- package/dist/esm/llm/google/index.mjs.map +1 -0
- package/dist/esm/llm/google/utils/common.mjs +471 -0
- package/dist/esm/llm/google/utils/common.mjs.map +1 -0
- package/dist/esm/llm/ollama/index.mjs +65 -0
- package/dist/esm/llm/ollama/index.mjs.map +1 -0
- package/dist/esm/llm/ollama/utils.mjs +155 -0
- package/dist/esm/llm/ollama/utils.mjs.map +1 -0
- package/dist/esm/llm/openai/index.mjs +388 -4
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/openai/utils/index.mjs +666 -0
- package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
- package/dist/esm/llm/providers.mjs +5 -5
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/esm/llm/text.mjs +14 -3
- package/dist/esm/llm/text.mjs.map +1 -1
- package/dist/esm/llm/vertexai/index.mjs +328 -0
- package/dist/esm/llm/vertexai/index.mjs.map +1 -0
- package/dist/esm/main.mjs +6 -5
- package/dist/esm/main.mjs.map +1 -1
- package/dist/esm/run.mjs +121 -83
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/stream.mjs +87 -54
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs +10 -4
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/esm/tools/handlers.mjs +119 -15
- package/dist/esm/tools/handlers.mjs.map +1 -1
- package/dist/esm/tools/search/anthropic.mjs +37 -0
- package/dist/esm/tools/search/anthropic.mjs.map +1 -0
- package/dist/esm/tools/search/firecrawl.mjs +55 -9
- package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
- package/dist/esm/tools/search/format.mjs +7 -7
- package/dist/esm/tools/search/format.mjs.map +1 -1
- package/dist/esm/tools/search/rerankers.mjs +7 -29
- package/dist/esm/tools/search/rerankers.mjs.map +1 -1
- package/dist/esm/tools/search/search.mjs +86 -16
- package/dist/esm/tools/search/search.mjs.map +1 -1
- package/dist/esm/tools/search/tool.mjs +4 -2
- package/dist/esm/tools/search/tool.mjs.map +1 -1
- package/dist/esm/tools/search/utils.mjs +1 -1
- package/dist/esm/tools/search/utils.mjs.map +1 -1
- package/dist/esm/utils/events.mjs +29 -0
- package/dist/esm/utils/events.mjs.map +1 -0
- package/dist/esm/utils/title.mjs +57 -22
- package/dist/esm/utils/title.mjs.map +1 -1
- package/dist/esm/utils/tokens.mjs +54 -8
- package/dist/esm/utils/tokens.mjs.map +1 -1
- package/dist/types/agents/AgentContext.d.ts +91 -0
- package/dist/types/common/enum.d.ts +15 -6
- package/dist/types/events.d.ts +5 -4
- package/dist/types/graphs/Graph.d.ts +64 -67
- package/dist/types/graphs/MultiAgentGraph.d.ts +37 -0
- package/dist/types/graphs/index.d.ts +1 -0
- package/dist/types/llm/anthropic/index.d.ts +11 -0
- package/dist/types/llm/anthropic/types.d.ts +9 -3
- package/dist/types/llm/anthropic/utils/message_inputs.d.ts +1 -1
- package/dist/types/llm/anthropic/utils/output_parsers.d.ts +4 -4
- package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
- package/dist/types/llm/google/index.d.ts +13 -0
- package/dist/types/llm/google/types.d.ts +32 -0
- package/dist/types/llm/google/utils/common.d.ts +19 -0
- package/dist/types/llm/google/utils/tools.d.ts +10 -0
- package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
- package/dist/types/llm/ollama/index.d.ts +7 -0
- package/dist/types/llm/ollama/utils.d.ts +7 -0
- package/dist/types/llm/openai/index.d.ts +72 -3
- package/dist/types/llm/openai/types.d.ts +10 -0
- package/dist/types/llm/openai/utils/index.d.ts +20 -0
- package/dist/types/llm/text.d.ts +1 -1
- package/dist/types/llm/vertexai/index.d.ts +293 -0
- package/dist/types/messages/reducer.d.ts +9 -0
- package/dist/types/run.d.ts +19 -12
- package/dist/types/scripts/ant_web_search.d.ts +1 -0
- package/dist/types/scripts/args.d.ts +2 -1
- package/dist/types/scripts/handoff-test.d.ts +1 -0
- package/dist/types/scripts/multi-agent-conditional.d.ts +1 -0
- package/dist/types/scripts/multi-agent-parallel.d.ts +1 -0
- package/dist/types/scripts/multi-agent-sequence.d.ts +1 -0
- package/dist/types/scripts/multi-agent-supervisor.d.ts +1 -0
- package/dist/types/scripts/multi-agent-test.d.ts +1 -0
- package/dist/types/scripts/test-custom-prompt-key.d.ts +2 -0
- package/dist/types/scripts/test-handoff-input.d.ts +2 -0
- package/dist/types/scripts/test-multi-agent-list-handoff.d.ts +2 -0
- package/dist/types/stream.d.ts +10 -3
- package/dist/types/tools/CodeExecutor.d.ts +2 -2
- package/dist/types/tools/ToolNode.d.ts +1 -1
- package/dist/types/tools/handlers.d.ts +17 -4
- package/dist/types/tools/search/anthropic.d.ts +16 -0
- package/dist/types/tools/search/firecrawl.d.ts +15 -0
- package/dist/types/tools/search/rerankers.d.ts +0 -1
- package/dist/types/tools/search/types.d.ts +30 -9
- package/dist/types/types/graph.d.ts +129 -15
- package/dist/types/types/llm.d.ts +24 -10
- package/dist/types/types/run.d.ts +46 -8
- package/dist/types/types/stream.d.ts +16 -2
- package/dist/types/types/tools.d.ts +1 -1
- package/dist/types/utils/events.d.ts +6 -0
- package/dist/types/utils/title.d.ts +2 -1
- package/dist/types/utils/tokens.d.ts +24 -0
- package/package.json +37 -17
- package/src/agents/AgentContext.ts +315 -0
- package/src/common/enum.ts +14 -5
- package/src/events.ts +24 -13
- package/src/graphs/Graph.ts +495 -312
- package/src/graphs/MultiAgentGraph.ts +498 -0
- package/src/graphs/index.ts +2 -1
- package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
- package/src/llm/anthropic/index.ts +78 -13
- package/src/llm/anthropic/llm.spec.ts +491 -115
- package/src/llm/anthropic/types.ts +39 -3
- package/src/llm/anthropic/utils/message_inputs.ts +67 -11
- package/src/llm/anthropic/utils/message_outputs.ts +21 -2
- package/src/llm/anthropic/utils/output_parsers.ts +25 -6
- package/src/llm/anthropic/utils/tools.ts +29 -0
- package/src/llm/google/index.ts +218 -0
- package/src/llm/google/types.ts +43 -0
- package/src/llm/google/utils/common.ts +646 -0
- package/src/llm/google/utils/tools.ts +160 -0
- package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
- package/src/llm/ollama/index.ts +89 -0
- package/src/llm/ollama/utils.ts +193 -0
- package/src/llm/openai/index.ts +600 -14
- package/src/llm/openai/types.ts +24 -0
- package/src/llm/openai/utils/index.ts +912 -0
- package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
- package/src/llm/providers.ts +10 -9
- package/src/llm/text.ts +26 -7
- package/src/llm/vertexai/index.ts +360 -0
- package/src/messages/reducer.ts +80 -0
- package/src/run.ts +181 -112
- package/src/scripts/ant_web_search.ts +158 -0
- package/src/scripts/args.ts +12 -8
- package/src/scripts/cli4.ts +29 -21
- package/src/scripts/cli5.ts +29 -21
- package/src/scripts/code_exec.ts +54 -23
- package/src/scripts/code_exec_files.ts +48 -17
- package/src/scripts/code_exec_simple.ts +46 -27
- package/src/scripts/handoff-test.ts +135 -0
- package/src/scripts/image.ts +52 -20
- package/src/scripts/multi-agent-conditional.ts +220 -0
- package/src/scripts/multi-agent-example-output.md +110 -0
- package/src/scripts/multi-agent-parallel.ts +341 -0
- package/src/scripts/multi-agent-sequence.ts +212 -0
- package/src/scripts/multi-agent-supervisor.ts +361 -0
- package/src/scripts/multi-agent-test.ts +186 -0
- package/src/scripts/search.ts +1 -9
- package/src/scripts/simple.ts +25 -10
- package/src/scripts/test-custom-prompt-key.ts +145 -0
- package/src/scripts/test-handoff-input.ts +110 -0
- package/src/scripts/test-multi-agent-list-handoff.ts +258 -0
- package/src/scripts/tools.ts +48 -18
- package/src/specs/anthropic.simple.test.ts +150 -34
- package/src/specs/azure.simple.test.ts +325 -0
- package/src/specs/openai.simple.test.ts +140 -33
- package/src/specs/openrouter.simple.test.ts +107 -0
- package/src/specs/prune.test.ts +4 -9
- package/src/specs/reasoning.test.ts +80 -44
- package/src/specs/token-memoization.test.ts +39 -0
- package/src/stream.test.ts +94 -0
- package/src/stream.ts +139 -60
- package/src/tools/ToolNode.ts +21 -7
- package/src/tools/handlers.ts +192 -18
- package/src/tools/search/anthropic.ts +51 -0
- package/src/tools/search/firecrawl.ts +69 -20
- package/src/tools/search/format.ts +6 -8
- package/src/tools/search/rerankers.ts +7 -40
- package/src/tools/search/search.ts +97 -16
- package/src/tools/search/tool.ts +5 -2
- package/src/tools/search/types.ts +30 -10
- package/src/tools/search/utils.ts +1 -1
- package/src/types/graph.ts +315 -103
- package/src/types/llm.ts +25 -12
- package/src/types/run.ts +51 -13
- package/src/types/stream.ts +22 -1
- package/src/types/tools.ts +16 -10
- package/src/utils/events.ts +32 -0
- package/src/utils/llmConfig.ts +19 -7
- package/src/utils/title.ts +104 -30
- package/src/utils/tokens.ts +69 -10
|
@@ -4,28 +4,26 @@ var nanoid = require('nanoid');
|
|
|
4
4
|
var stream = require('@langchain/core/utils/stream');
|
|
5
5
|
var googleVertexai = require('@langchain/google-vertexai');
|
|
6
6
|
var langgraph = require('@langchain/langgraph');
|
|
7
|
-
var
|
|
7
|
+
var runnables = require('@langchain/core/runnables');
|
|
8
8
|
var messages = require('@langchain/core/messages');
|
|
9
9
|
var _enum = require('../common/enum.cjs');
|
|
10
|
-
var providers = require('../llm/providers.cjs');
|
|
11
|
-
var ToolNode = require('../tools/ToolNode.cjs');
|
|
12
10
|
var core = require('../messages/core.cjs');
|
|
13
11
|
var prune = require('../messages/prune.cjs');
|
|
14
12
|
var graph = require('../utils/graph.cjs');
|
|
15
13
|
var llm = require('../utils/llm.cjs');
|
|
16
14
|
var run = require('../utils/run.cjs');
|
|
17
15
|
require('js-tiktoken/lite');
|
|
16
|
+
var providers = require('../llm/providers.cjs');
|
|
17
|
+
var ToolNode = require('../tools/ToolNode.cjs');
|
|
18
18
|
var index = require('../llm/openai/index.cjs');
|
|
19
|
+
var events = require('../utils/events.cjs');
|
|
20
|
+
var AgentContext = require('../agents/AgentContext.cjs');
|
|
19
21
|
var fake = require('../llm/fake.cjs');
|
|
20
22
|
|
|
21
23
|
/* eslint-disable no-console */
|
|
22
24
|
// src/graphs/Graph.ts
|
|
23
25
|
const { AGENT, TOOLS } = _enum.GraphNodeKeys;
|
|
24
26
|
class Graph {
|
|
25
|
-
lastToken;
|
|
26
|
-
tokenTypeSwitch;
|
|
27
|
-
reasoningKey = 'reasoning_content';
|
|
28
|
-
currentTokenType = _enum.ContentTypes.TEXT;
|
|
29
27
|
messageStepHasToolCalls = new Map();
|
|
30
28
|
messageIdsByStepKey = new Map();
|
|
31
29
|
prelimMessageIdsByStepKey = new Map();
|
|
@@ -34,71 +32,37 @@ class Graph {
|
|
|
34
32
|
stepKeyIds = new Map();
|
|
35
33
|
contentIndexMap = new Map();
|
|
36
34
|
toolCallStepIds = new Map();
|
|
37
|
-
currentUsage;
|
|
38
|
-
indexTokenCountMap = {};
|
|
39
|
-
maxContextTokens;
|
|
40
|
-
pruneMessages;
|
|
41
|
-
/** The amount of time that should pass before another consecutive API call */
|
|
42
|
-
streamBuffer;
|
|
43
|
-
tokenCounter;
|
|
44
35
|
signal;
|
|
36
|
+
/** Set of invoked tool call IDs from non-message run steps completed mid-run, if any */
|
|
37
|
+
invokedToolIds;
|
|
38
|
+
handlerRegistry;
|
|
45
39
|
}
|
|
46
40
|
class StandardGraph extends Graph {
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
/** The last recorded timestamp that a stream API call was invoked */
|
|
51
|
-
lastStreamCall;
|
|
52
|
-
handlerRegistry;
|
|
53
|
-
systemMessage;
|
|
41
|
+
overrideModel;
|
|
42
|
+
/** Optional compile options passed into workflow.compile() */
|
|
43
|
+
compileOptions;
|
|
54
44
|
messages = [];
|
|
55
45
|
runId;
|
|
56
|
-
tools;
|
|
57
|
-
toolMap;
|
|
58
46
|
startIndex = 0;
|
|
59
|
-
provider;
|
|
60
|
-
toolEnd;
|
|
61
47
|
signal;
|
|
62
|
-
|
|
48
|
+
/** Map of agent contexts by agent ID */
|
|
49
|
+
agentContexts = new Map();
|
|
50
|
+
/** Default agent ID to use */
|
|
51
|
+
defaultAgentId;
|
|
52
|
+
constructor({
|
|
53
|
+
// parent-level graph inputs
|
|
54
|
+
runId, signal, agents, tokenCounter, indexTokenCountMap, }) {
|
|
63
55
|
super();
|
|
64
56
|
this.runId = runId;
|
|
65
|
-
this.tools = tools;
|
|
66
57
|
this.signal = signal;
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
this.provider = provider;
|
|
70
|
-
this.streamBuffer = streamBuffer;
|
|
71
|
-
this.clientOptions = clientOptions;
|
|
72
|
-
this.graphState = this.createGraphState();
|
|
73
|
-
this.boundModel = this.initializeModel();
|
|
74
|
-
if (reasoningKey) {
|
|
75
|
-
this.reasoningKey = reasoningKey;
|
|
58
|
+
if (agents.length === 0) {
|
|
59
|
+
throw new Error('At least one agent configuration is required');
|
|
76
60
|
}
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
finalInstructions != null && finalInstructions
|
|
81
|
-
? `${finalInstructions}\n\n${additional_instructions}`
|
|
82
|
-
: additional_instructions;
|
|
83
|
-
}
|
|
84
|
-
if (finalInstructions != null &&
|
|
85
|
-
finalInstructions &&
|
|
86
|
-
provider === _enum.Providers.ANTHROPIC &&
|
|
87
|
-
(clientOptions.clientOptions?.defaultHeaders?.['anthropic-beta']?.includes('prompt-caching') ??
|
|
88
|
-
false)) {
|
|
89
|
-
finalInstructions = {
|
|
90
|
-
content: [
|
|
91
|
-
{
|
|
92
|
-
type: 'text',
|
|
93
|
-
text: instructions,
|
|
94
|
-
cache_control: { type: 'ephemeral' },
|
|
95
|
-
},
|
|
96
|
-
],
|
|
97
|
-
};
|
|
98
|
-
}
|
|
99
|
-
if (finalInstructions != null && finalInstructions !== '') {
|
|
100
|
-
this.systemMessage = new messages.SystemMessage(finalInstructions);
|
|
61
|
+
for (const agentConfig of agents) {
|
|
62
|
+
const agentContext = AgentContext.AgentContext.fromConfig(agentConfig, tokenCounter, indexTokenCountMap);
|
|
63
|
+
this.agentContexts.set(agentConfig.agentId, agentContext);
|
|
101
64
|
}
|
|
65
|
+
this.defaultAgentId = agents[0].agentId;
|
|
102
66
|
}
|
|
103
67
|
/* Init */
|
|
104
68
|
resetValues(keepContent) {
|
|
@@ -111,15 +75,12 @@ class StandardGraph extends Graph {
|
|
|
111
75
|
this.stepKeyIds = graph.resetIfNotEmpty(this.stepKeyIds, new Map());
|
|
112
76
|
this.toolCallStepIds = graph.resetIfNotEmpty(this.toolCallStepIds, new Map());
|
|
113
77
|
this.messageIdsByStepKey = graph.resetIfNotEmpty(this.messageIdsByStepKey, new Map());
|
|
114
|
-
this.messageStepHasToolCalls = graph.resetIfNotEmpty(this.
|
|
78
|
+
this.messageStepHasToolCalls = graph.resetIfNotEmpty(this.messageStepHasToolCalls, new Map());
|
|
115
79
|
this.prelimMessageIdsByStepKey = graph.resetIfNotEmpty(this.prelimMessageIdsByStepKey, new Map());
|
|
116
|
-
this.
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
this.currentUsage = graph.resetIfNotEmpty(this.currentUsage, undefined);
|
|
121
|
-
this.tokenCounter = graph.resetIfNotEmpty(this.tokenCounter, undefined);
|
|
122
|
-
this.maxContextTokens = graph.resetIfNotEmpty(this.maxContextTokens, undefined);
|
|
80
|
+
this.invokedToolIds = graph.resetIfNotEmpty(this.invokedToolIds, undefined);
|
|
81
|
+
for (const context of this.agentContexts.values()) {
|
|
82
|
+
context.reset();
|
|
83
|
+
}
|
|
123
84
|
}
|
|
124
85
|
/* Run Step Processing */
|
|
125
86
|
getRunStep(stepId) {
|
|
@@ -129,6 +90,27 @@ class StandardGraph extends Graph {
|
|
|
129
90
|
}
|
|
130
91
|
return undefined;
|
|
131
92
|
}
|
|
93
|
+
getAgentContext(metadata) {
|
|
94
|
+
if (!metadata) {
|
|
95
|
+
throw new Error('No metadata provided to retrieve agent context');
|
|
96
|
+
}
|
|
97
|
+
const currentNode = metadata.langgraph_node;
|
|
98
|
+
if (!currentNode) {
|
|
99
|
+
throw new Error('No langgraph_node in metadata to retrieve agent context');
|
|
100
|
+
}
|
|
101
|
+
let agentId;
|
|
102
|
+
if (currentNode.startsWith(AGENT)) {
|
|
103
|
+
agentId = currentNode.substring(AGENT.length);
|
|
104
|
+
}
|
|
105
|
+
else if (currentNode.startsWith(TOOLS)) {
|
|
106
|
+
agentId = currentNode.substring(TOOLS.length);
|
|
107
|
+
}
|
|
108
|
+
const agentContext = this.agentContexts.get(agentId ?? '');
|
|
109
|
+
if (!agentContext) {
|
|
110
|
+
throw new Error(`No agent context found for agent ID ${agentId}`);
|
|
111
|
+
}
|
|
112
|
+
return agentContext;
|
|
113
|
+
}
|
|
132
114
|
getStepKey(metadata) {
|
|
133
115
|
if (!metadata)
|
|
134
116
|
return '';
|
|
@@ -174,10 +156,14 @@ class StandardGraph extends Graph {
|
|
|
174
156
|
metadata.langgraph_step,
|
|
175
157
|
metadata.checkpoint_ns,
|
|
176
158
|
];
|
|
177
|
-
|
|
178
|
-
|
|
159
|
+
const agentContext = this.getAgentContext(metadata);
|
|
160
|
+
if (agentContext.currentTokenType === _enum.ContentTypes.THINK ||
|
|
161
|
+
agentContext.currentTokenType === 'think_and_text') {
|
|
179
162
|
keyList.push('reasoning');
|
|
180
163
|
}
|
|
164
|
+
if (this.invokedToolIds != null && this.invokedToolIds.size > 0) {
|
|
165
|
+
keyList.push(this.invokedToolIds.size + '');
|
|
166
|
+
}
|
|
181
167
|
return keyList;
|
|
182
168
|
}
|
|
183
169
|
checkKeyList(keyList) {
|
|
@@ -191,113 +177,159 @@ class StandardGraph extends Graph {
|
|
|
191
177
|
return core.convertMessagesToContent(this.messages.slice(this.startIndex));
|
|
192
178
|
}
|
|
193
179
|
/* Graph */
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
180
|
+
createSystemRunnable({ provider, clientOptions, instructions, additional_instructions, }) {
|
|
181
|
+
let finalInstructions = instructions;
|
|
182
|
+
if (additional_instructions != null && additional_instructions !== '') {
|
|
183
|
+
finalInstructions =
|
|
184
|
+
finalInstructions != null && finalInstructions
|
|
185
|
+
? `${finalInstructions}\n\n${additional_instructions}`
|
|
186
|
+
: additional_instructions;
|
|
187
|
+
}
|
|
188
|
+
if (finalInstructions != null &&
|
|
189
|
+
finalInstructions &&
|
|
190
|
+
provider === _enum.Providers.ANTHROPIC &&
|
|
191
|
+
(clientOptions.clientOptions
|
|
192
|
+
?.defaultHeaders?.['anthropic-beta']?.includes('prompt-caching') ??
|
|
193
|
+
false)) {
|
|
194
|
+
finalInstructions = {
|
|
195
|
+
content: [
|
|
196
|
+
{
|
|
197
|
+
type: 'text',
|
|
198
|
+
text: instructions,
|
|
199
|
+
cache_control: { type: 'ephemeral' },
|
|
200
|
+
},
|
|
201
|
+
],
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
if (finalInstructions != null && finalInstructions !== '') {
|
|
205
|
+
const systemMessage = new messages.SystemMessage(finalInstructions);
|
|
206
|
+
return runnables.RunnableLambda.from((messages) => {
|
|
207
|
+
return [systemMessage, ...messages];
|
|
208
|
+
}).withConfig({ runName: 'prompt' });
|
|
209
|
+
}
|
|
211
210
|
}
|
|
212
|
-
initializeTools() {
|
|
211
|
+
initializeTools({ currentTools, currentToolMap, }) {
|
|
213
212
|
// return new ToolNode<t.BaseGraphState>(this.tools);
|
|
214
213
|
return new ToolNode.ToolNode({
|
|
215
|
-
tools:
|
|
216
|
-
toolMap:
|
|
214
|
+
tools: currentTools ?? [],
|
|
215
|
+
toolMap: currentToolMap,
|
|
217
216
|
toolCallStepIds: this.toolCallStepIds,
|
|
218
217
|
errorHandler: (data, metadata) => StandardGraph.handleToolCallErrorStatic(this, data, metadata),
|
|
219
218
|
});
|
|
220
219
|
}
|
|
221
|
-
initializeModel() {
|
|
222
|
-
const ChatModelClass = providers.getChatModelClass(
|
|
223
|
-
const model = new ChatModelClass(
|
|
224
|
-
if (llm.isOpenAILike(
|
|
220
|
+
initializeModel({ provider, tools, clientOptions, }) {
|
|
221
|
+
const ChatModelClass = providers.getChatModelClass(provider);
|
|
222
|
+
const model = new ChatModelClass(clientOptions ?? {});
|
|
223
|
+
if (llm.isOpenAILike(provider) &&
|
|
225
224
|
(model instanceof index.ChatOpenAI || model instanceof index.AzureChatOpenAI)) {
|
|
226
|
-
model.temperature =
|
|
225
|
+
model.temperature = clientOptions
|
|
227
226
|
.temperature;
|
|
228
|
-
model.topP =
|
|
229
|
-
model.frequencyPenalty =
|
|
227
|
+
model.topP = clientOptions.topP;
|
|
228
|
+
model.frequencyPenalty = clientOptions
|
|
230
229
|
.frequencyPenalty;
|
|
231
|
-
model.presencePenalty =
|
|
230
|
+
model.presencePenalty = clientOptions
|
|
232
231
|
.presencePenalty;
|
|
233
|
-
model.n =
|
|
232
|
+
model.n = clientOptions.n;
|
|
234
233
|
}
|
|
235
|
-
else if (
|
|
234
|
+
else if (provider === _enum.Providers.VERTEXAI &&
|
|
236
235
|
model instanceof googleVertexai.ChatVertexAI) {
|
|
237
|
-
model.temperature =
|
|
236
|
+
model.temperature = clientOptions
|
|
238
237
|
.temperature;
|
|
239
|
-
model.topP =
|
|
240
|
-
|
|
241
|
-
model.
|
|
242
|
-
.topK;
|
|
243
|
-
model.topLogprobs = this.clientOptions
|
|
238
|
+
model.topP = clientOptions.topP;
|
|
239
|
+
model.topK = clientOptions.topK;
|
|
240
|
+
model.topLogprobs = clientOptions
|
|
244
241
|
.topLogprobs;
|
|
245
|
-
model.frequencyPenalty =
|
|
242
|
+
model.frequencyPenalty = clientOptions
|
|
246
243
|
.frequencyPenalty;
|
|
247
|
-
model.presencePenalty =
|
|
244
|
+
model.presencePenalty = clientOptions
|
|
248
245
|
.presencePenalty;
|
|
249
|
-
model.maxOutputTokens =
|
|
246
|
+
model.maxOutputTokens = clientOptions
|
|
250
247
|
.maxOutputTokens;
|
|
251
248
|
}
|
|
252
|
-
if (!
|
|
249
|
+
if (!tools || tools.length === 0) {
|
|
253
250
|
return model;
|
|
254
251
|
}
|
|
255
|
-
return model.bindTools(
|
|
252
|
+
return model.bindTools(tools);
|
|
256
253
|
}
|
|
257
254
|
overrideTestModel(responses, sleep, toolCalls) {
|
|
258
|
-
this.
|
|
255
|
+
this.overrideModel = fake.createFakeStreamingLLM({
|
|
259
256
|
responses,
|
|
260
257
|
sleep,
|
|
261
258
|
toolCalls,
|
|
262
259
|
});
|
|
263
260
|
}
|
|
264
|
-
getNewModel({
|
|
265
|
-
const ChatModelClass = providers.getChatModelClass(
|
|
266
|
-
|
|
267
|
-
? Object.fromEntries(Object.entries(this.clientOptions).filter(([key]) => !omitOriginalOptions.has(key)))
|
|
268
|
-
: this.clientOptions;
|
|
269
|
-
const options = Object.assign(_options, clientOptions);
|
|
270
|
-
return new ChatModelClass(options);
|
|
261
|
+
getNewModel({ provider, clientOptions, }) {
|
|
262
|
+
const ChatModelClass = providers.getChatModelClass(provider);
|
|
263
|
+
return new ChatModelClass(clientOptions ?? {});
|
|
271
264
|
}
|
|
272
|
-
|
|
265
|
+
getUsageMetadata(finalMessage) {
|
|
273
266
|
if (finalMessage &&
|
|
274
267
|
'usage_metadata' in finalMessage &&
|
|
275
268
|
finalMessage.usage_metadata != null) {
|
|
276
|
-
|
|
269
|
+
return finalMessage.usage_metadata;
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
/** Execute model invocation with streaming support */
|
|
273
|
+
async attemptInvoke({ currentModel, finalMessages, provider, tools, }, config) {
|
|
274
|
+
const model = this.overrideModel ?? currentModel;
|
|
275
|
+
if (!model) {
|
|
276
|
+
throw new Error('No model found');
|
|
277
|
+
}
|
|
278
|
+
if ((tools?.length ?? 0) > 0 && providers.manualToolStreamProviders.has(provider)) {
|
|
279
|
+
if (!model.stream) {
|
|
280
|
+
throw new Error('Model does not support stream');
|
|
281
|
+
}
|
|
282
|
+
const stream$1 = await model.stream(finalMessages, config);
|
|
283
|
+
let finalChunk;
|
|
284
|
+
for await (const chunk of stream$1) {
|
|
285
|
+
await events.safeDispatchCustomEvent(_enum.GraphEvents.CHAT_MODEL_STREAM, { chunk, emitted: true }, config);
|
|
286
|
+
finalChunk = finalChunk ? stream.concat(finalChunk, chunk) : chunk;
|
|
287
|
+
}
|
|
288
|
+
finalChunk = core.modifyDeltaProperties(provider, finalChunk);
|
|
289
|
+
return { messages: [finalChunk] };
|
|
290
|
+
}
|
|
291
|
+
else {
|
|
292
|
+
const finalMessage = await model.invoke(finalMessages, config);
|
|
293
|
+
if ((finalMessage.tool_calls?.length ?? 0) > 0) {
|
|
294
|
+
finalMessage.tool_calls = finalMessage.tool_calls?.filter((tool_call) => !!tool_call.name);
|
|
295
|
+
}
|
|
296
|
+
return { messages: [finalMessage] };
|
|
277
297
|
}
|
|
278
298
|
}
|
|
279
|
-
cleanupSignalListener() {
|
|
299
|
+
cleanupSignalListener(currentModel) {
|
|
280
300
|
if (!this.signal) {
|
|
281
301
|
return;
|
|
282
302
|
}
|
|
283
|
-
|
|
303
|
+
const model = this.overrideModel ?? currentModel;
|
|
304
|
+
if (!model) {
|
|
284
305
|
return;
|
|
285
306
|
}
|
|
286
|
-
const client =
|
|
307
|
+
const client = model?.exposedClient;
|
|
287
308
|
if (!client?.abortHandler) {
|
|
288
309
|
return;
|
|
289
310
|
}
|
|
290
311
|
this.signal.removeEventListener('abort', client.abortHandler);
|
|
291
312
|
client.abortHandler = undefined;
|
|
292
313
|
}
|
|
293
|
-
createCallModel() {
|
|
314
|
+
createCallModel(agentId = 'default', currentModel) {
|
|
294
315
|
return async (state, config) => {
|
|
295
|
-
|
|
296
|
-
|
|
316
|
+
/**
|
|
317
|
+
* Get agent context - it must exist by this point
|
|
318
|
+
*/
|
|
319
|
+
const agentContext = this.agentContexts.get(agentId);
|
|
320
|
+
if (!agentContext) {
|
|
321
|
+
throw new Error(`Agent context not found for agentId: ${agentId}`);
|
|
322
|
+
}
|
|
323
|
+
const model = this.overrideModel ?? currentModel;
|
|
324
|
+
if (!model) {
|
|
297
325
|
throw new Error('No Graph model found');
|
|
298
326
|
}
|
|
299
|
-
if (!config
|
|
300
|
-
throw new Error(
|
|
327
|
+
if (!config) {
|
|
328
|
+
throw new Error('No config provided');
|
|
329
|
+
}
|
|
330
|
+
// Ensure token calculations are complete before proceeding
|
|
331
|
+
if (agentContext.tokenCalculationPromise) {
|
|
332
|
+
await agentContext.tokenCalculationPromise;
|
|
301
333
|
}
|
|
302
334
|
if (!config.signal) {
|
|
303
335
|
config.signal = this.signal;
|
|
@@ -305,32 +337,32 @@ class StandardGraph extends Graph {
|
|
|
305
337
|
this.config = config;
|
|
306
338
|
const { messages: messages$1 } = state;
|
|
307
339
|
let messagesToUse = messages$1;
|
|
308
|
-
if (!
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
const isAnthropicWithThinking = (
|
|
313
|
-
|
|
340
|
+
if (!agentContext.pruneMessages &&
|
|
341
|
+
agentContext.tokenCounter &&
|
|
342
|
+
agentContext.maxContextTokens != null &&
|
|
343
|
+
agentContext.indexTokenCountMap[0] != null) {
|
|
344
|
+
const isAnthropicWithThinking = (agentContext.provider === _enum.Providers.ANTHROPIC &&
|
|
345
|
+
agentContext.clientOptions.thinking !=
|
|
314
346
|
null) ||
|
|
315
|
-
(
|
|
316
|
-
|
|
347
|
+
(agentContext.provider === _enum.Providers.BEDROCK &&
|
|
348
|
+
agentContext.clientOptions
|
|
317
349
|
.additionalModelRequestFields?.['thinking'] != null);
|
|
318
|
-
|
|
319
|
-
provider: this.provider,
|
|
320
|
-
indexTokenCountMap: this.indexTokenCountMap,
|
|
321
|
-
maxTokens: this.maxContextTokens,
|
|
322
|
-
tokenCounter: this.tokenCounter,
|
|
350
|
+
agentContext.pruneMessages = prune.createPruneMessages({
|
|
323
351
|
startIndex: this.startIndex,
|
|
352
|
+
provider: agentContext.provider,
|
|
353
|
+
tokenCounter: agentContext.tokenCounter,
|
|
354
|
+
maxTokens: agentContext.maxContextTokens,
|
|
324
355
|
thinkingEnabled: isAnthropicWithThinking,
|
|
356
|
+
indexTokenCountMap: agentContext.indexTokenCountMap,
|
|
325
357
|
});
|
|
326
358
|
}
|
|
327
|
-
if (
|
|
328
|
-
const { context, indexTokenCountMap } =
|
|
359
|
+
if (agentContext.pruneMessages) {
|
|
360
|
+
const { context, indexTokenCountMap } = agentContext.pruneMessages({
|
|
329
361
|
messages: messages$1,
|
|
330
|
-
usageMetadata:
|
|
362
|
+
usageMetadata: agentContext.currentUsage,
|
|
331
363
|
// startOnMessageType: 'human',
|
|
332
364
|
});
|
|
333
|
-
|
|
365
|
+
agentContext.indexTokenCountMap = indexTokenCountMap;
|
|
334
366
|
messagesToUse = context;
|
|
335
367
|
}
|
|
336
368
|
const finalMessages = messagesToUse;
|
|
@@ -340,87 +372,145 @@ class StandardGraph extends Graph {
|
|
|
340
372
|
const lastMessageY = finalMessages.length >= 1
|
|
341
373
|
? finalMessages[finalMessages.length - 1]
|
|
342
374
|
: null;
|
|
343
|
-
if (provider === _enum.Providers.BEDROCK &&
|
|
375
|
+
if (agentContext.provider === _enum.Providers.BEDROCK &&
|
|
344
376
|
lastMessageX instanceof messages.AIMessageChunk &&
|
|
345
377
|
lastMessageY instanceof messages.ToolMessage &&
|
|
346
378
|
typeof lastMessageX.content === 'string') {
|
|
347
379
|
finalMessages[finalMessages.length - 2].content = '';
|
|
348
380
|
}
|
|
349
381
|
const isLatestToolMessage = lastMessageY instanceof messages.ToolMessage;
|
|
350
|
-
if (isLatestToolMessage &&
|
|
382
|
+
if (isLatestToolMessage &&
|
|
383
|
+
agentContext.provider === _enum.Providers.ANTHROPIC) {
|
|
351
384
|
core.formatAnthropicArtifactContent(finalMessages);
|
|
352
385
|
}
|
|
353
386
|
else if (isLatestToolMessage &&
|
|
354
|
-
(llm.isOpenAILike(provider) ||
|
|
387
|
+
(llm.isOpenAILike(agentContext.provider) ||
|
|
388
|
+
llm.isGoogleLike(agentContext.provider))) {
|
|
355
389
|
core.formatArtifactPayload(finalMessages);
|
|
356
390
|
}
|
|
357
|
-
if (
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
391
|
+
if (agentContext.lastStreamCall != null &&
|
|
392
|
+
agentContext.streamBuffer != null) {
|
|
393
|
+
const timeSinceLastCall = Date.now() - agentContext.lastStreamCall;
|
|
394
|
+
if (timeSinceLastCall < agentContext.streamBuffer) {
|
|
395
|
+
const timeToWait = Math.ceil((agentContext.streamBuffer - timeSinceLastCall) / 1000) *
|
|
396
|
+
1000;
|
|
361
397
|
await run.sleep(timeToWait);
|
|
362
398
|
}
|
|
363
399
|
}
|
|
364
|
-
|
|
400
|
+
agentContext.lastStreamCall = Date.now();
|
|
365
401
|
let result;
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
402
|
+
const fallbacks = agentContext.clientOptions?.fallbacks ??
|
|
403
|
+
[];
|
|
404
|
+
try {
|
|
405
|
+
result = await this.attemptInvoke({
|
|
406
|
+
currentModel: model,
|
|
407
|
+
finalMessages,
|
|
408
|
+
provider: agentContext.provider,
|
|
409
|
+
tools: agentContext.tools,
|
|
410
|
+
}, config);
|
|
411
|
+
}
|
|
412
|
+
catch (primaryError) {
|
|
413
|
+
let lastError = primaryError;
|
|
414
|
+
for (const fb of fallbacks) {
|
|
415
|
+
try {
|
|
416
|
+
let model = this.getNewModel({
|
|
417
|
+
provider: fb.provider,
|
|
418
|
+
clientOptions: fb.clientOptions,
|
|
419
|
+
});
|
|
420
|
+
const bindableTools = agentContext.tools;
|
|
421
|
+
model = (!bindableTools || bindableTools.length === 0
|
|
422
|
+
? model
|
|
423
|
+
: model.bindTools(bindableTools));
|
|
424
|
+
result = await this.attemptInvoke({
|
|
425
|
+
currentModel: model,
|
|
426
|
+
finalMessages,
|
|
427
|
+
provider: fb.provider,
|
|
428
|
+
tools: agentContext.tools,
|
|
429
|
+
}, config);
|
|
430
|
+
lastError = undefined;
|
|
431
|
+
break;
|
|
374
432
|
}
|
|
375
|
-
|
|
376
|
-
|
|
433
|
+
catch (e) {
|
|
434
|
+
lastError = e;
|
|
435
|
+
continue;
|
|
377
436
|
}
|
|
378
437
|
}
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
}
|
|
382
|
-
else {
|
|
383
|
-
const finalMessage = (await this.boundModel.invoke(finalMessages, config));
|
|
384
|
-
if ((finalMessage.tool_calls?.length ?? 0) > 0) {
|
|
385
|
-
finalMessage.tool_calls = finalMessage.tool_calls?.filter((tool_call) => {
|
|
386
|
-
if (!tool_call.name) {
|
|
387
|
-
return false;
|
|
388
|
-
}
|
|
389
|
-
return true;
|
|
390
|
-
});
|
|
438
|
+
if (lastError !== undefined) {
|
|
439
|
+
throw lastError;
|
|
391
440
|
}
|
|
392
|
-
result = { messages: [finalMessage] };
|
|
393
441
|
}
|
|
394
|
-
|
|
442
|
+
if (!result) {
|
|
443
|
+
throw new Error('No result after model invocation');
|
|
444
|
+
}
|
|
445
|
+
agentContext.currentUsage = this.getUsageMetadata(result.messages?.[0]);
|
|
395
446
|
this.cleanupSignalListener();
|
|
396
447
|
return result;
|
|
397
448
|
};
|
|
398
449
|
}
|
|
399
|
-
|
|
450
|
+
createAgentNode(agentId) {
|
|
451
|
+
const agentContext = this.agentContexts.get(agentId);
|
|
452
|
+
if (!agentContext) {
|
|
453
|
+
throw new Error(`Agent context not found for agentId: ${agentId}`);
|
|
454
|
+
}
|
|
455
|
+
let currentModel = this.initializeModel({
|
|
456
|
+
tools: agentContext.tools,
|
|
457
|
+
provider: agentContext.provider,
|
|
458
|
+
clientOptions: agentContext.clientOptions,
|
|
459
|
+
});
|
|
460
|
+
if (agentContext.systemRunnable) {
|
|
461
|
+
currentModel = agentContext.systemRunnable.pipe(currentModel);
|
|
462
|
+
}
|
|
463
|
+
const agentNode = `${AGENT}${agentId}`;
|
|
464
|
+
const toolNode = `${TOOLS}${agentId}`;
|
|
400
465
|
const routeMessage = (state, config) => {
|
|
401
466
|
this.config = config;
|
|
402
|
-
|
|
403
|
-
// if (!lastMessage?.tool_calls?.length) {
|
|
404
|
-
// return END;
|
|
405
|
-
// }
|
|
406
|
-
// return TOOLS;
|
|
407
|
-
return ToolNode.toolsCondition(state);
|
|
467
|
+
return ToolNode.toolsCondition(state, toolNode, this.invokedToolIds);
|
|
408
468
|
};
|
|
409
|
-
const
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
.
|
|
417
|
-
|
|
469
|
+
const StateAnnotation = langgraph.Annotation.Root({
|
|
470
|
+
messages: langgraph.Annotation({
|
|
471
|
+
reducer: langgraph.messagesStateReducer,
|
|
472
|
+
default: () => [],
|
|
473
|
+
}),
|
|
474
|
+
});
|
|
475
|
+
const workflow = new langgraph.StateGraph(StateAnnotation)
|
|
476
|
+
.addNode(agentNode, this.createCallModel(agentId, currentModel))
|
|
477
|
+
.addNode(toolNode, this.initializeTools({
|
|
478
|
+
currentTools: agentContext.tools,
|
|
479
|
+
currentToolMap: agentContext.toolMap,
|
|
480
|
+
}))
|
|
481
|
+
.addEdge(langgraph.START, agentNode)
|
|
482
|
+
.addConditionalEdges(agentNode, routeMessage)
|
|
483
|
+
.addEdge(toolNode, agentContext.toolEnd ? langgraph.END : agentNode);
|
|
484
|
+
// Cast to unknown to avoid tight coupling to external types; options are opt-in
|
|
485
|
+
return workflow.compile(this.compileOptions);
|
|
486
|
+
}
|
|
487
|
+
createWorkflow() {
|
|
488
|
+
/** Use the default (first) agent for now */
|
|
489
|
+
const agentNode = this.createAgentNode(this.defaultAgentId);
|
|
490
|
+
const StateAnnotation = langgraph.Annotation.Root({
|
|
491
|
+
messages: langgraph.Annotation({
|
|
492
|
+
reducer: (a, b) => {
|
|
493
|
+
if (!a.length) {
|
|
494
|
+
this.startIndex = a.length + b.length;
|
|
495
|
+
}
|
|
496
|
+
const result = langgraph.messagesStateReducer(a, b);
|
|
497
|
+
this.messages = result;
|
|
498
|
+
return result;
|
|
499
|
+
},
|
|
500
|
+
default: () => [],
|
|
501
|
+
}),
|
|
502
|
+
});
|
|
503
|
+
const workflow = new langgraph.StateGraph(StateAnnotation)
|
|
504
|
+
.addNode(this.defaultAgentId, agentNode, { ends: [langgraph.END] })
|
|
505
|
+
.addEdge(langgraph.START, this.defaultAgentId)
|
|
506
|
+
.compile();
|
|
507
|
+
return workflow;
|
|
418
508
|
}
|
|
419
509
|
/* Dispatchers */
|
|
420
510
|
/**
|
|
421
511
|
* Dispatches a run step to the client, returns the step ID
|
|
422
512
|
*/
|
|
423
|
-
dispatchRunStep(stepKey, stepDetails) {
|
|
513
|
+
async dispatchRunStep(stepKey, stepDetails) {
|
|
424
514
|
if (!this.config) {
|
|
425
515
|
throw new Error('No config provided');
|
|
426
516
|
}
|
|
@@ -448,17 +538,21 @@ class StandardGraph extends Graph {
|
|
|
448
538
|
}
|
|
449
539
|
this.contentData.push(runStep);
|
|
450
540
|
this.contentIndexMap.set(stepId, runStep.index);
|
|
451
|
-
|
|
541
|
+
await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_RUN_STEP, runStep, this.config);
|
|
452
542
|
return stepId;
|
|
453
543
|
}
|
|
454
|
-
handleToolCallCompleted(data, metadata) {
|
|
544
|
+
async handleToolCallCompleted(data, metadata, omitOutput) {
|
|
455
545
|
if (!this.config) {
|
|
456
546
|
throw new Error('No config provided');
|
|
457
547
|
}
|
|
458
548
|
if (!data.output) {
|
|
459
549
|
return;
|
|
460
550
|
}
|
|
461
|
-
const { input, output } = data;
|
|
551
|
+
const { input, output: _output } = data;
|
|
552
|
+
if (_output?.lg_name === 'Command') {
|
|
553
|
+
return;
|
|
554
|
+
}
|
|
555
|
+
const output = _output;
|
|
462
556
|
const { tool_call_id } = output;
|
|
463
557
|
const stepId = this.toolCallStepIds.get(tool_call_id) ?? '';
|
|
464
558
|
if (!stepId) {
|
|
@@ -468,17 +562,20 @@ class StandardGraph extends Graph {
|
|
|
468
562
|
if (!runStep) {
|
|
469
563
|
throw new Error(`No run step found for stepId ${stepId}`);
|
|
470
564
|
}
|
|
565
|
+
const dispatchedOutput = typeof output.content === 'string'
|
|
566
|
+
? output.content
|
|
567
|
+
: JSON.stringify(output.content);
|
|
471
568
|
const args = typeof input === 'string' ? input : input.input;
|
|
472
569
|
const tool_call = {
|
|
473
570
|
args: typeof args === 'string' ? args : JSON.stringify(args),
|
|
474
571
|
name: output.name ?? '',
|
|
475
572
|
id: output.tool_call_id,
|
|
476
|
-
output:
|
|
477
|
-
? output.content
|
|
478
|
-
: JSON.stringify(output.content),
|
|
573
|
+
output: omitOutput === true ? '' : dispatchedOutput,
|
|
479
574
|
progress: 1,
|
|
480
575
|
};
|
|
481
|
-
this.handlerRegistry
|
|
576
|
+
await this.handlerRegistry
|
|
577
|
+
?.getHandler(_enum.GraphEvents.ON_RUN_STEP_COMPLETED)
|
|
578
|
+
?.handle(_enum.GraphEvents.ON_RUN_STEP_COMPLETED, {
|
|
482
579
|
result: {
|
|
483
580
|
id: stepId,
|
|
484
581
|
index: runStep.index,
|
|
@@ -491,7 +588,7 @@ class StandardGraph extends Graph {
|
|
|
491
588
|
* Static version of handleToolCallError to avoid creating strong references
|
|
492
589
|
* that prevent garbage collection
|
|
493
590
|
*/
|
|
494
|
-
static handleToolCallErrorStatic(graph, data, metadata) {
|
|
591
|
+
static async handleToolCallErrorStatic(graph, data, metadata) {
|
|
495
592
|
if (!graph.config) {
|
|
496
593
|
throw new Error('No config provided');
|
|
497
594
|
}
|
|
@@ -515,7 +612,7 @@ class StandardGraph extends Graph {
|
|
|
515
612
|
output: `Error processing tool${error?.message != null ? `: ${error.message}` : ''}`,
|
|
516
613
|
progress: 1,
|
|
517
614
|
};
|
|
518
|
-
graph.handlerRegistry
|
|
615
|
+
await graph.handlerRegistry
|
|
519
616
|
?.getHandler(_enum.GraphEvents.ON_RUN_STEP_COMPLETED)
|
|
520
617
|
?.handle(_enum.GraphEvents.ON_RUN_STEP_COMPLETED, {
|
|
521
618
|
result: {
|
|
@@ -530,10 +627,10 @@ class StandardGraph extends Graph {
|
|
|
530
627
|
* Instance method that delegates to the static method
|
|
531
628
|
* Kept for backward compatibility
|
|
532
629
|
*/
|
|
533
|
-
handleToolCallError(data, metadata) {
|
|
534
|
-
StandardGraph.handleToolCallErrorStatic(this, data, metadata);
|
|
630
|
+
async handleToolCallError(data, metadata) {
|
|
631
|
+
await StandardGraph.handleToolCallErrorStatic(this, data, metadata);
|
|
535
632
|
}
|
|
536
|
-
dispatchRunStepDelta(id, delta) {
|
|
633
|
+
async dispatchRunStepDelta(id, delta) {
|
|
537
634
|
if (!this.config) {
|
|
538
635
|
throw new Error('No config provided');
|
|
539
636
|
}
|
|
@@ -544,9 +641,9 @@ class StandardGraph extends Graph {
|
|
|
544
641
|
id,
|
|
545
642
|
delta,
|
|
546
643
|
};
|
|
547
|
-
|
|
644
|
+
await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_RUN_STEP_DELTA, runStepDelta, this.config);
|
|
548
645
|
}
|
|
549
|
-
dispatchMessageDelta(id, delta) {
|
|
646
|
+
async dispatchMessageDelta(id, delta) {
|
|
550
647
|
if (!this.config) {
|
|
551
648
|
throw new Error('No config provided');
|
|
552
649
|
}
|
|
@@ -554,9 +651,9 @@ class StandardGraph extends Graph {
|
|
|
554
651
|
id,
|
|
555
652
|
delta,
|
|
556
653
|
};
|
|
557
|
-
|
|
654
|
+
await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_MESSAGE_DELTA, messageDelta, this.config);
|
|
558
655
|
}
|
|
559
|
-
dispatchReasoningDelta = (stepId, delta) => {
|
|
656
|
+
dispatchReasoningDelta = async (stepId, delta) => {
|
|
560
657
|
if (!this.config) {
|
|
561
658
|
throw new Error('No config provided');
|
|
562
659
|
}
|
|
@@ -564,7 +661,7 @@ class StandardGraph extends Graph {
|
|
|
564
661
|
id: stepId,
|
|
565
662
|
delta,
|
|
566
663
|
};
|
|
567
|
-
|
|
664
|
+
await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_REASONING_DELTA, reasoningDelta, this.config);
|
|
568
665
|
};
|
|
569
666
|
}
|
|
570
667
|
|