@librechat/agents 2.4.322 → 3.0.0-rc2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/agents/AgentContext.cjs +218 -0
- package/dist/cjs/agents/AgentContext.cjs.map +1 -0
- package/dist/cjs/common/enum.cjs +14 -5
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/events.cjs +10 -6
- package/dist/cjs/events.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +309 -212
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs +422 -0
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/index.cjs +54 -9
- package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +52 -6
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +22 -2
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
- package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
- package/dist/cjs/llm/google/index.cjs +144 -0
- package/dist/cjs/llm/google/index.cjs.map +1 -0
- package/dist/cjs/llm/google/utils/common.cjs +477 -0
- package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
- package/dist/cjs/llm/ollama/index.cjs +67 -0
- package/dist/cjs/llm/ollama/index.cjs.map +1 -0
- package/dist/cjs/llm/ollama/utils.cjs +158 -0
- package/dist/cjs/llm/ollama/utils.cjs.map +1 -0
- package/dist/cjs/llm/openai/index.cjs +389 -3
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/openai/utils/index.cjs +672 -0
- package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
- package/dist/cjs/llm/providers.cjs +15 -15
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/cjs/llm/text.cjs +14 -3
- package/dist/cjs/llm/text.cjs.map +1 -1
- package/dist/cjs/llm/vertexai/index.cjs +330 -0
- package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
- package/dist/cjs/main.cjs +11 -0
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/run.cjs +120 -81
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/stream.cjs +85 -51
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs +10 -4
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/cjs/tools/handlers.cjs +119 -13
- package/dist/cjs/tools/handlers.cjs.map +1 -1
- package/dist/cjs/tools/search/anthropic.cjs +40 -0
- package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
- package/dist/cjs/tools/search/firecrawl.cjs +55 -9
- package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
- package/dist/cjs/tools/search/format.cjs +6 -6
- package/dist/cjs/tools/search/format.cjs.map +1 -1
- package/dist/cjs/tools/search/rerankers.cjs +7 -29
- package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
- package/dist/cjs/tools/search/search.cjs +86 -16
- package/dist/cjs/tools/search/search.cjs.map +1 -1
- package/dist/cjs/tools/search/tool.cjs +4 -2
- package/dist/cjs/tools/search/tool.cjs.map +1 -1
- package/dist/cjs/tools/search/utils.cjs +1 -1
- package/dist/cjs/tools/search/utils.cjs.map +1 -1
- package/dist/cjs/utils/events.cjs +31 -0
- package/dist/cjs/utils/events.cjs.map +1 -0
- package/dist/cjs/utils/title.cjs +57 -21
- package/dist/cjs/utils/title.cjs.map +1 -1
- package/dist/cjs/utils/tokens.cjs +54 -7
- package/dist/cjs/utils/tokens.cjs.map +1 -1
- package/dist/esm/agents/AgentContext.mjs +216 -0
- package/dist/esm/agents/AgentContext.mjs.map +1 -0
- package/dist/esm/common/enum.mjs +15 -6
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/events.mjs +10 -6
- package/dist/esm/events.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +311 -214
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs +420 -0
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
- package/dist/esm/llm/anthropic/index.mjs +54 -9
- package/dist/esm/llm/anthropic/index.mjs.map +1 -1
- package/dist/esm/llm/anthropic/types.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +52 -6
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs +22 -2
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
- package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
- package/dist/esm/llm/google/index.mjs +142 -0
- package/dist/esm/llm/google/index.mjs.map +1 -0
- package/dist/esm/llm/google/utils/common.mjs +471 -0
- package/dist/esm/llm/google/utils/common.mjs.map +1 -0
- package/dist/esm/llm/ollama/index.mjs +65 -0
- package/dist/esm/llm/ollama/index.mjs.map +1 -0
- package/dist/esm/llm/ollama/utils.mjs +155 -0
- package/dist/esm/llm/ollama/utils.mjs.map +1 -0
- package/dist/esm/llm/openai/index.mjs +388 -4
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/openai/utils/index.mjs +666 -0
- package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
- package/dist/esm/llm/providers.mjs +5 -5
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/esm/llm/text.mjs +14 -3
- package/dist/esm/llm/text.mjs.map +1 -1
- package/dist/esm/llm/vertexai/index.mjs +328 -0
- package/dist/esm/llm/vertexai/index.mjs.map +1 -0
- package/dist/esm/main.mjs +6 -5
- package/dist/esm/main.mjs.map +1 -1
- package/dist/esm/run.mjs +121 -83
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/stream.mjs +87 -54
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs +10 -4
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/esm/tools/handlers.mjs +119 -15
- package/dist/esm/tools/handlers.mjs.map +1 -1
- package/dist/esm/tools/search/anthropic.mjs +37 -0
- package/dist/esm/tools/search/anthropic.mjs.map +1 -0
- package/dist/esm/tools/search/firecrawl.mjs +55 -9
- package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
- package/dist/esm/tools/search/format.mjs +7 -7
- package/dist/esm/tools/search/format.mjs.map +1 -1
- package/dist/esm/tools/search/rerankers.mjs +7 -29
- package/dist/esm/tools/search/rerankers.mjs.map +1 -1
- package/dist/esm/tools/search/search.mjs +86 -16
- package/dist/esm/tools/search/search.mjs.map +1 -1
- package/dist/esm/tools/search/tool.mjs +4 -2
- package/dist/esm/tools/search/tool.mjs.map +1 -1
- package/dist/esm/tools/search/utils.mjs +1 -1
- package/dist/esm/tools/search/utils.mjs.map +1 -1
- package/dist/esm/utils/events.mjs +29 -0
- package/dist/esm/utils/events.mjs.map +1 -0
- package/dist/esm/utils/title.mjs +57 -22
- package/dist/esm/utils/title.mjs.map +1 -1
- package/dist/esm/utils/tokens.mjs +54 -8
- package/dist/esm/utils/tokens.mjs.map +1 -1
- package/dist/types/agents/AgentContext.d.ts +91 -0
- package/dist/types/common/enum.d.ts +15 -6
- package/dist/types/events.d.ts +5 -4
- package/dist/types/graphs/Graph.d.ts +64 -67
- package/dist/types/graphs/MultiAgentGraph.d.ts +37 -0
- package/dist/types/graphs/index.d.ts +1 -0
- package/dist/types/llm/anthropic/index.d.ts +11 -0
- package/dist/types/llm/anthropic/types.d.ts +9 -3
- package/dist/types/llm/anthropic/utils/message_inputs.d.ts +1 -1
- package/dist/types/llm/anthropic/utils/output_parsers.d.ts +4 -4
- package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
- package/dist/types/llm/google/index.d.ts +13 -0
- package/dist/types/llm/google/types.d.ts +32 -0
- package/dist/types/llm/google/utils/common.d.ts +19 -0
- package/dist/types/llm/google/utils/tools.d.ts +10 -0
- package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
- package/dist/types/llm/ollama/index.d.ts +7 -0
- package/dist/types/llm/ollama/utils.d.ts +7 -0
- package/dist/types/llm/openai/index.d.ts +72 -3
- package/dist/types/llm/openai/types.d.ts +10 -0
- package/dist/types/llm/openai/utils/index.d.ts +20 -0
- package/dist/types/llm/text.d.ts +1 -1
- package/dist/types/llm/vertexai/index.d.ts +293 -0
- package/dist/types/messages/reducer.d.ts +9 -0
- package/dist/types/run.d.ts +19 -12
- package/dist/types/scripts/ant_web_search.d.ts +1 -0
- package/dist/types/scripts/args.d.ts +2 -1
- package/dist/types/scripts/handoff-test.d.ts +1 -0
- package/dist/types/scripts/multi-agent-conditional.d.ts +1 -0
- package/dist/types/scripts/multi-agent-parallel.d.ts +1 -0
- package/dist/types/scripts/multi-agent-sequence.d.ts +1 -0
- package/dist/types/scripts/multi-agent-supervisor.d.ts +1 -0
- package/dist/types/scripts/multi-agent-test.d.ts +1 -0
- package/dist/types/scripts/test-custom-prompt-key.d.ts +2 -0
- package/dist/types/scripts/test-handoff-input.d.ts +2 -0
- package/dist/types/scripts/test-multi-agent-list-handoff.d.ts +2 -0
- package/dist/types/stream.d.ts +10 -3
- package/dist/types/tools/CodeExecutor.d.ts +2 -2
- package/dist/types/tools/ToolNode.d.ts +1 -1
- package/dist/types/tools/handlers.d.ts +17 -4
- package/dist/types/tools/search/anthropic.d.ts +16 -0
- package/dist/types/tools/search/firecrawl.d.ts +15 -0
- package/dist/types/tools/search/rerankers.d.ts +0 -1
- package/dist/types/tools/search/types.d.ts +30 -9
- package/dist/types/types/graph.d.ts +129 -15
- package/dist/types/types/llm.d.ts +24 -10
- package/dist/types/types/run.d.ts +46 -8
- package/dist/types/types/stream.d.ts +16 -2
- package/dist/types/types/tools.d.ts +1 -1
- package/dist/types/utils/events.d.ts +6 -0
- package/dist/types/utils/title.d.ts +2 -1
- package/dist/types/utils/tokens.d.ts +24 -0
- package/package.json +37 -17
- package/src/agents/AgentContext.ts +315 -0
- package/src/common/enum.ts +14 -5
- package/src/events.ts +24 -13
- package/src/graphs/Graph.ts +495 -312
- package/src/graphs/MultiAgentGraph.ts +498 -0
- package/src/graphs/index.ts +2 -1
- package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
- package/src/llm/anthropic/index.ts +78 -13
- package/src/llm/anthropic/llm.spec.ts +491 -115
- package/src/llm/anthropic/types.ts +39 -3
- package/src/llm/anthropic/utils/message_inputs.ts +67 -11
- package/src/llm/anthropic/utils/message_outputs.ts +21 -2
- package/src/llm/anthropic/utils/output_parsers.ts +25 -6
- package/src/llm/anthropic/utils/tools.ts +29 -0
- package/src/llm/google/index.ts +218 -0
- package/src/llm/google/types.ts +43 -0
- package/src/llm/google/utils/common.ts +646 -0
- package/src/llm/google/utils/tools.ts +160 -0
- package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
- package/src/llm/ollama/index.ts +89 -0
- package/src/llm/ollama/utils.ts +193 -0
- package/src/llm/openai/index.ts +600 -14
- package/src/llm/openai/types.ts +24 -0
- package/src/llm/openai/utils/index.ts +912 -0
- package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
- package/src/llm/providers.ts +10 -9
- package/src/llm/text.ts +26 -7
- package/src/llm/vertexai/index.ts +360 -0
- package/src/messages/reducer.ts +80 -0
- package/src/run.ts +181 -112
- package/src/scripts/ant_web_search.ts +158 -0
- package/src/scripts/args.ts +12 -8
- package/src/scripts/cli4.ts +29 -21
- package/src/scripts/cli5.ts +29 -21
- package/src/scripts/code_exec.ts +54 -23
- package/src/scripts/code_exec_files.ts +48 -17
- package/src/scripts/code_exec_simple.ts +46 -27
- package/src/scripts/handoff-test.ts +135 -0
- package/src/scripts/image.ts +52 -20
- package/src/scripts/multi-agent-conditional.ts +220 -0
- package/src/scripts/multi-agent-example-output.md +110 -0
- package/src/scripts/multi-agent-parallel.ts +341 -0
- package/src/scripts/multi-agent-sequence.ts +212 -0
- package/src/scripts/multi-agent-supervisor.ts +361 -0
- package/src/scripts/multi-agent-test.ts +186 -0
- package/src/scripts/search.ts +1 -9
- package/src/scripts/simple.ts +25 -10
- package/src/scripts/test-custom-prompt-key.ts +145 -0
- package/src/scripts/test-handoff-input.ts +110 -0
- package/src/scripts/test-multi-agent-list-handoff.ts +258 -0
- package/src/scripts/tools.ts +48 -18
- package/src/specs/anthropic.simple.test.ts +150 -34
- package/src/specs/azure.simple.test.ts +325 -0
- package/src/specs/openai.simple.test.ts +140 -33
- package/src/specs/openrouter.simple.test.ts +107 -0
- package/src/specs/prune.test.ts +4 -9
- package/src/specs/reasoning.test.ts +80 -44
- package/src/specs/token-memoization.test.ts +39 -0
- package/src/stream.test.ts +94 -0
- package/src/stream.ts +139 -60
- package/src/tools/ToolNode.ts +21 -7
- package/src/tools/handlers.ts +192 -18
- package/src/tools/search/anthropic.ts +51 -0
- package/src/tools/search/firecrawl.ts +69 -20
- package/src/tools/search/format.ts +6 -8
- package/src/tools/search/rerankers.ts +7 -40
- package/src/tools/search/search.ts +97 -16
- package/src/tools/search/tool.ts +5 -2
- package/src/tools/search/types.ts +30 -10
- package/src/tools/search/utils.ts +1 -1
- package/src/types/graph.ts +315 -103
- package/src/types/llm.ts +25 -12
- package/src/types/run.ts +51 -13
- package/src/types/stream.ts +22 -1
- package/src/types/tools.ts +16 -10
- package/src/utils/events.ts +32 -0
- package/src/utils/llmConfig.ts +19 -7
- package/src/utils/title.ts +104 -30
- package/src/utils/tokens.ts +69 -10
|
@@ -1,29 +1,27 @@
|
|
|
1
1
|
import { nanoid } from 'nanoid';
|
|
2
2
|
import { concat } from '@langchain/core/utils/stream';
|
|
3
3
|
import { ChatVertexAI } from '@langchain/google-vertexai';
|
|
4
|
-
import { StateGraph, START, END } from '@langchain/langgraph';
|
|
5
|
-
import {
|
|
4
|
+
import { Annotation, messagesStateReducer, StateGraph, START, END } from '@langchain/langgraph';
|
|
5
|
+
import { RunnableLambda } from '@langchain/core/runnables';
|
|
6
6
|
import { SystemMessage, AIMessageChunk, ToolMessage } from '@langchain/core/messages';
|
|
7
7
|
import { GraphNodeKeys, ContentTypes, Providers, GraphEvents, StepTypes } from '../common/enum.mjs';
|
|
8
|
-
import {
|
|
9
|
-
import { ToolNode, toolsCondition } from '../tools/ToolNode.mjs';
|
|
10
|
-
import { convertMessagesToContent, formatAnthropicArtifactContent, formatArtifactPayload, modifyDeltaProperties } from '../messages/core.mjs';
|
|
8
|
+
import { convertMessagesToContent, modifyDeltaProperties, formatAnthropicArtifactContent, formatArtifactPayload } from '../messages/core.mjs';
|
|
11
9
|
import { createPruneMessages } from '../messages/prune.mjs';
|
|
12
10
|
import { resetIfNotEmpty, joinKeys } from '../utils/graph.mjs';
|
|
13
11
|
import { isOpenAILike, isGoogleLike } from '../utils/llm.mjs';
|
|
14
12
|
import { sleep } from '../utils/run.mjs';
|
|
15
13
|
import 'js-tiktoken/lite';
|
|
14
|
+
import { getChatModelClass, manualToolStreamProviders } from '../llm/providers.mjs';
|
|
15
|
+
import { ToolNode, toolsCondition } from '../tools/ToolNode.mjs';
|
|
16
16
|
import { ChatOpenAI, AzureChatOpenAI } from '../llm/openai/index.mjs';
|
|
17
|
+
import { safeDispatchCustomEvent } from '../utils/events.mjs';
|
|
18
|
+
import { AgentContext } from '../agents/AgentContext.mjs';
|
|
17
19
|
import { createFakeStreamingLLM } from '../llm/fake.mjs';
|
|
18
20
|
|
|
19
21
|
/* eslint-disable no-console */
|
|
20
22
|
// src/graphs/Graph.ts
|
|
21
23
|
const { AGENT, TOOLS } = GraphNodeKeys;
|
|
22
24
|
class Graph {
|
|
23
|
-
lastToken;
|
|
24
|
-
tokenTypeSwitch;
|
|
25
|
-
reasoningKey = 'reasoning_content';
|
|
26
|
-
currentTokenType = ContentTypes.TEXT;
|
|
27
25
|
messageStepHasToolCalls = new Map();
|
|
28
26
|
messageIdsByStepKey = new Map();
|
|
29
27
|
prelimMessageIdsByStepKey = new Map();
|
|
@@ -32,71 +30,37 @@ class Graph {
|
|
|
32
30
|
stepKeyIds = new Map();
|
|
33
31
|
contentIndexMap = new Map();
|
|
34
32
|
toolCallStepIds = new Map();
|
|
35
|
-
currentUsage;
|
|
36
|
-
indexTokenCountMap = {};
|
|
37
|
-
maxContextTokens;
|
|
38
|
-
pruneMessages;
|
|
39
|
-
/** The amount of time that should pass before another consecutive API call */
|
|
40
|
-
streamBuffer;
|
|
41
|
-
tokenCounter;
|
|
42
33
|
signal;
|
|
34
|
+
/** Set of invoked tool call IDs from non-message run steps completed mid-run, if any */
|
|
35
|
+
invokedToolIds;
|
|
36
|
+
handlerRegistry;
|
|
43
37
|
}
|
|
44
38
|
class StandardGraph extends Graph {
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
/** The last recorded timestamp that a stream API call was invoked */
|
|
49
|
-
lastStreamCall;
|
|
50
|
-
handlerRegistry;
|
|
51
|
-
systemMessage;
|
|
39
|
+
overrideModel;
|
|
40
|
+
/** Optional compile options passed into workflow.compile() */
|
|
41
|
+
compileOptions;
|
|
52
42
|
messages = [];
|
|
53
43
|
runId;
|
|
54
|
-
tools;
|
|
55
|
-
toolMap;
|
|
56
44
|
startIndex = 0;
|
|
57
|
-
provider;
|
|
58
|
-
toolEnd;
|
|
59
45
|
signal;
|
|
60
|
-
|
|
46
|
+
/** Map of agent contexts by agent ID */
|
|
47
|
+
agentContexts = new Map();
|
|
48
|
+
/** Default agent ID to use */
|
|
49
|
+
defaultAgentId;
|
|
50
|
+
constructor({
|
|
51
|
+
// parent-level graph inputs
|
|
52
|
+
runId, signal, agents, tokenCounter, indexTokenCountMap, }) {
|
|
61
53
|
super();
|
|
62
54
|
this.runId = runId;
|
|
63
|
-
this.tools = tools;
|
|
64
55
|
this.signal = signal;
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
this.provider = provider;
|
|
68
|
-
this.streamBuffer = streamBuffer;
|
|
69
|
-
this.clientOptions = clientOptions;
|
|
70
|
-
this.graphState = this.createGraphState();
|
|
71
|
-
this.boundModel = this.initializeModel();
|
|
72
|
-
if (reasoningKey) {
|
|
73
|
-
this.reasoningKey = reasoningKey;
|
|
56
|
+
if (agents.length === 0) {
|
|
57
|
+
throw new Error('At least one agent configuration is required');
|
|
74
58
|
}
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
finalInstructions != null && finalInstructions
|
|
79
|
-
? `${finalInstructions}\n\n${additional_instructions}`
|
|
80
|
-
: additional_instructions;
|
|
81
|
-
}
|
|
82
|
-
if (finalInstructions != null &&
|
|
83
|
-
finalInstructions &&
|
|
84
|
-
provider === Providers.ANTHROPIC &&
|
|
85
|
-
(clientOptions.clientOptions?.defaultHeaders?.['anthropic-beta']?.includes('prompt-caching') ??
|
|
86
|
-
false)) {
|
|
87
|
-
finalInstructions = {
|
|
88
|
-
content: [
|
|
89
|
-
{
|
|
90
|
-
type: 'text',
|
|
91
|
-
text: instructions,
|
|
92
|
-
cache_control: { type: 'ephemeral' },
|
|
93
|
-
},
|
|
94
|
-
],
|
|
95
|
-
};
|
|
96
|
-
}
|
|
97
|
-
if (finalInstructions != null && finalInstructions !== '') {
|
|
98
|
-
this.systemMessage = new SystemMessage(finalInstructions);
|
|
59
|
+
for (const agentConfig of agents) {
|
|
60
|
+
const agentContext = AgentContext.fromConfig(agentConfig, tokenCounter, indexTokenCountMap);
|
|
61
|
+
this.agentContexts.set(agentConfig.agentId, agentContext);
|
|
99
62
|
}
|
|
63
|
+
this.defaultAgentId = agents[0].agentId;
|
|
100
64
|
}
|
|
101
65
|
/* Init */
|
|
102
66
|
resetValues(keepContent) {
|
|
@@ -109,15 +73,12 @@ class StandardGraph extends Graph {
|
|
|
109
73
|
this.stepKeyIds = resetIfNotEmpty(this.stepKeyIds, new Map());
|
|
110
74
|
this.toolCallStepIds = resetIfNotEmpty(this.toolCallStepIds, new Map());
|
|
111
75
|
this.messageIdsByStepKey = resetIfNotEmpty(this.messageIdsByStepKey, new Map());
|
|
112
|
-
this.messageStepHasToolCalls = resetIfNotEmpty(this.
|
|
76
|
+
this.messageStepHasToolCalls = resetIfNotEmpty(this.messageStepHasToolCalls, new Map());
|
|
113
77
|
this.prelimMessageIdsByStepKey = resetIfNotEmpty(this.prelimMessageIdsByStepKey, new Map());
|
|
114
|
-
this.
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
this.currentUsage = resetIfNotEmpty(this.currentUsage, undefined);
|
|
119
|
-
this.tokenCounter = resetIfNotEmpty(this.tokenCounter, undefined);
|
|
120
|
-
this.maxContextTokens = resetIfNotEmpty(this.maxContextTokens, undefined);
|
|
78
|
+
this.invokedToolIds = resetIfNotEmpty(this.invokedToolIds, undefined);
|
|
79
|
+
for (const context of this.agentContexts.values()) {
|
|
80
|
+
context.reset();
|
|
81
|
+
}
|
|
121
82
|
}
|
|
122
83
|
/* Run Step Processing */
|
|
123
84
|
getRunStep(stepId) {
|
|
@@ -127,6 +88,27 @@ class StandardGraph extends Graph {
|
|
|
127
88
|
}
|
|
128
89
|
return undefined;
|
|
129
90
|
}
|
|
91
|
+
getAgentContext(metadata) {
|
|
92
|
+
if (!metadata) {
|
|
93
|
+
throw new Error('No metadata provided to retrieve agent context');
|
|
94
|
+
}
|
|
95
|
+
const currentNode = metadata.langgraph_node;
|
|
96
|
+
if (!currentNode) {
|
|
97
|
+
throw new Error('No langgraph_node in metadata to retrieve agent context');
|
|
98
|
+
}
|
|
99
|
+
let agentId;
|
|
100
|
+
if (currentNode.startsWith(AGENT)) {
|
|
101
|
+
agentId = currentNode.substring(AGENT.length);
|
|
102
|
+
}
|
|
103
|
+
else if (currentNode.startsWith(TOOLS)) {
|
|
104
|
+
agentId = currentNode.substring(TOOLS.length);
|
|
105
|
+
}
|
|
106
|
+
const agentContext = this.agentContexts.get(agentId ?? '');
|
|
107
|
+
if (!agentContext) {
|
|
108
|
+
throw new Error(`No agent context found for agent ID ${agentId}`);
|
|
109
|
+
}
|
|
110
|
+
return agentContext;
|
|
111
|
+
}
|
|
130
112
|
getStepKey(metadata) {
|
|
131
113
|
if (!metadata)
|
|
132
114
|
return '';
|
|
@@ -172,10 +154,14 @@ class StandardGraph extends Graph {
|
|
|
172
154
|
metadata.langgraph_step,
|
|
173
155
|
metadata.checkpoint_ns,
|
|
174
156
|
];
|
|
175
|
-
|
|
176
|
-
|
|
157
|
+
const agentContext = this.getAgentContext(metadata);
|
|
158
|
+
if (agentContext.currentTokenType === ContentTypes.THINK ||
|
|
159
|
+
agentContext.currentTokenType === 'think_and_text') {
|
|
177
160
|
keyList.push('reasoning');
|
|
178
161
|
}
|
|
162
|
+
if (this.invokedToolIds != null && this.invokedToolIds.size > 0) {
|
|
163
|
+
keyList.push(this.invokedToolIds.size + '');
|
|
164
|
+
}
|
|
179
165
|
return keyList;
|
|
180
166
|
}
|
|
181
167
|
checkKeyList(keyList) {
|
|
@@ -189,113 +175,159 @@ class StandardGraph extends Graph {
|
|
|
189
175
|
return convertMessagesToContent(this.messages.slice(this.startIndex));
|
|
190
176
|
}
|
|
191
177
|
/* Graph */
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
178
|
+
createSystemRunnable({ provider, clientOptions, instructions, additional_instructions, }) {
|
|
179
|
+
let finalInstructions = instructions;
|
|
180
|
+
if (additional_instructions != null && additional_instructions !== '') {
|
|
181
|
+
finalInstructions =
|
|
182
|
+
finalInstructions != null && finalInstructions
|
|
183
|
+
? `${finalInstructions}\n\n${additional_instructions}`
|
|
184
|
+
: additional_instructions;
|
|
185
|
+
}
|
|
186
|
+
if (finalInstructions != null &&
|
|
187
|
+
finalInstructions &&
|
|
188
|
+
provider === Providers.ANTHROPIC &&
|
|
189
|
+
(clientOptions.clientOptions
|
|
190
|
+
?.defaultHeaders?.['anthropic-beta']?.includes('prompt-caching') ??
|
|
191
|
+
false)) {
|
|
192
|
+
finalInstructions = {
|
|
193
|
+
content: [
|
|
194
|
+
{
|
|
195
|
+
type: 'text',
|
|
196
|
+
text: instructions,
|
|
197
|
+
cache_control: { type: 'ephemeral' },
|
|
198
|
+
},
|
|
199
|
+
],
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
if (finalInstructions != null && finalInstructions !== '') {
|
|
203
|
+
const systemMessage = new SystemMessage(finalInstructions);
|
|
204
|
+
return RunnableLambda.from((messages) => {
|
|
205
|
+
return [systemMessage, ...messages];
|
|
206
|
+
}).withConfig({ runName: 'prompt' });
|
|
207
|
+
}
|
|
209
208
|
}
|
|
210
|
-
initializeTools() {
|
|
209
|
+
initializeTools({ currentTools, currentToolMap, }) {
|
|
211
210
|
// return new ToolNode<t.BaseGraphState>(this.tools);
|
|
212
211
|
return new ToolNode({
|
|
213
|
-
tools:
|
|
214
|
-
toolMap:
|
|
212
|
+
tools: currentTools ?? [],
|
|
213
|
+
toolMap: currentToolMap,
|
|
215
214
|
toolCallStepIds: this.toolCallStepIds,
|
|
216
215
|
errorHandler: (data, metadata) => StandardGraph.handleToolCallErrorStatic(this, data, metadata),
|
|
217
216
|
});
|
|
218
217
|
}
|
|
219
|
-
initializeModel() {
|
|
220
|
-
const ChatModelClass = getChatModelClass(
|
|
221
|
-
const model = new ChatModelClass(
|
|
222
|
-
if (isOpenAILike(
|
|
218
|
+
initializeModel({ provider, tools, clientOptions, }) {
|
|
219
|
+
const ChatModelClass = getChatModelClass(provider);
|
|
220
|
+
const model = new ChatModelClass(clientOptions ?? {});
|
|
221
|
+
if (isOpenAILike(provider) &&
|
|
223
222
|
(model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)) {
|
|
224
|
-
model.temperature =
|
|
223
|
+
model.temperature = clientOptions
|
|
225
224
|
.temperature;
|
|
226
|
-
model.topP =
|
|
227
|
-
model.frequencyPenalty =
|
|
225
|
+
model.topP = clientOptions.topP;
|
|
226
|
+
model.frequencyPenalty = clientOptions
|
|
228
227
|
.frequencyPenalty;
|
|
229
|
-
model.presencePenalty =
|
|
228
|
+
model.presencePenalty = clientOptions
|
|
230
229
|
.presencePenalty;
|
|
231
|
-
model.n =
|
|
230
|
+
model.n = clientOptions.n;
|
|
232
231
|
}
|
|
233
|
-
else if (
|
|
232
|
+
else if (provider === Providers.VERTEXAI &&
|
|
234
233
|
model instanceof ChatVertexAI) {
|
|
235
|
-
model.temperature =
|
|
234
|
+
model.temperature = clientOptions
|
|
236
235
|
.temperature;
|
|
237
|
-
model.topP =
|
|
238
|
-
|
|
239
|
-
model.
|
|
240
|
-
.topK;
|
|
241
|
-
model.topLogprobs = this.clientOptions
|
|
236
|
+
model.topP = clientOptions.topP;
|
|
237
|
+
model.topK = clientOptions.topK;
|
|
238
|
+
model.topLogprobs = clientOptions
|
|
242
239
|
.topLogprobs;
|
|
243
|
-
model.frequencyPenalty =
|
|
240
|
+
model.frequencyPenalty = clientOptions
|
|
244
241
|
.frequencyPenalty;
|
|
245
|
-
model.presencePenalty =
|
|
242
|
+
model.presencePenalty = clientOptions
|
|
246
243
|
.presencePenalty;
|
|
247
|
-
model.maxOutputTokens =
|
|
244
|
+
model.maxOutputTokens = clientOptions
|
|
248
245
|
.maxOutputTokens;
|
|
249
246
|
}
|
|
250
|
-
if (!
|
|
247
|
+
if (!tools || tools.length === 0) {
|
|
251
248
|
return model;
|
|
252
249
|
}
|
|
253
|
-
return model.bindTools(
|
|
250
|
+
return model.bindTools(tools);
|
|
254
251
|
}
|
|
255
252
|
overrideTestModel(responses, sleep, toolCalls) {
|
|
256
|
-
this.
|
|
253
|
+
this.overrideModel = createFakeStreamingLLM({
|
|
257
254
|
responses,
|
|
258
255
|
sleep,
|
|
259
256
|
toolCalls,
|
|
260
257
|
});
|
|
261
258
|
}
|
|
262
|
-
getNewModel({
|
|
263
|
-
const ChatModelClass = getChatModelClass(
|
|
264
|
-
|
|
265
|
-
? Object.fromEntries(Object.entries(this.clientOptions).filter(([key]) => !omitOriginalOptions.has(key)))
|
|
266
|
-
: this.clientOptions;
|
|
267
|
-
const options = Object.assign(_options, clientOptions);
|
|
268
|
-
return new ChatModelClass(options);
|
|
259
|
+
getNewModel({ provider, clientOptions, }) {
|
|
260
|
+
const ChatModelClass = getChatModelClass(provider);
|
|
261
|
+
return new ChatModelClass(clientOptions ?? {});
|
|
269
262
|
}
|
|
270
|
-
|
|
263
|
+
getUsageMetadata(finalMessage) {
|
|
271
264
|
if (finalMessage &&
|
|
272
265
|
'usage_metadata' in finalMessage &&
|
|
273
266
|
finalMessage.usage_metadata != null) {
|
|
274
|
-
|
|
267
|
+
return finalMessage.usage_metadata;
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
/** Execute model invocation with streaming support */
|
|
271
|
+
async attemptInvoke({ currentModel, finalMessages, provider, tools, }, config) {
|
|
272
|
+
const model = this.overrideModel ?? currentModel;
|
|
273
|
+
if (!model) {
|
|
274
|
+
throw new Error('No model found');
|
|
275
|
+
}
|
|
276
|
+
if ((tools?.length ?? 0) > 0 && manualToolStreamProviders.has(provider)) {
|
|
277
|
+
if (!model.stream) {
|
|
278
|
+
throw new Error('Model does not support stream');
|
|
279
|
+
}
|
|
280
|
+
const stream = await model.stream(finalMessages, config);
|
|
281
|
+
let finalChunk;
|
|
282
|
+
for await (const chunk of stream) {
|
|
283
|
+
await safeDispatchCustomEvent(GraphEvents.CHAT_MODEL_STREAM, { chunk, emitted: true }, config);
|
|
284
|
+
finalChunk = finalChunk ? concat(finalChunk, chunk) : chunk;
|
|
285
|
+
}
|
|
286
|
+
finalChunk = modifyDeltaProperties(provider, finalChunk);
|
|
287
|
+
return { messages: [finalChunk] };
|
|
288
|
+
}
|
|
289
|
+
else {
|
|
290
|
+
const finalMessage = await model.invoke(finalMessages, config);
|
|
291
|
+
if ((finalMessage.tool_calls?.length ?? 0) > 0) {
|
|
292
|
+
finalMessage.tool_calls = finalMessage.tool_calls?.filter((tool_call) => !!tool_call.name);
|
|
293
|
+
}
|
|
294
|
+
return { messages: [finalMessage] };
|
|
275
295
|
}
|
|
276
296
|
}
|
|
277
|
-
cleanupSignalListener() {
|
|
297
|
+
cleanupSignalListener(currentModel) {
|
|
278
298
|
if (!this.signal) {
|
|
279
299
|
return;
|
|
280
300
|
}
|
|
281
|
-
|
|
301
|
+
const model = this.overrideModel ?? currentModel;
|
|
302
|
+
if (!model) {
|
|
282
303
|
return;
|
|
283
304
|
}
|
|
284
|
-
const client =
|
|
305
|
+
const client = model?.exposedClient;
|
|
285
306
|
if (!client?.abortHandler) {
|
|
286
307
|
return;
|
|
287
308
|
}
|
|
288
309
|
this.signal.removeEventListener('abort', client.abortHandler);
|
|
289
310
|
client.abortHandler = undefined;
|
|
290
311
|
}
|
|
291
|
-
createCallModel() {
|
|
312
|
+
createCallModel(agentId = 'default', currentModel) {
|
|
292
313
|
return async (state, config) => {
|
|
293
|
-
|
|
294
|
-
|
|
314
|
+
/**
|
|
315
|
+
* Get agent context - it must exist by this point
|
|
316
|
+
*/
|
|
317
|
+
const agentContext = this.agentContexts.get(agentId);
|
|
318
|
+
if (!agentContext) {
|
|
319
|
+
throw new Error(`Agent context not found for agentId: ${agentId}`);
|
|
320
|
+
}
|
|
321
|
+
const model = this.overrideModel ?? currentModel;
|
|
322
|
+
if (!model) {
|
|
295
323
|
throw new Error('No Graph model found');
|
|
296
324
|
}
|
|
297
|
-
if (!config
|
|
298
|
-
throw new Error(
|
|
325
|
+
if (!config) {
|
|
326
|
+
throw new Error('No config provided');
|
|
327
|
+
}
|
|
328
|
+
// Ensure token calculations are complete before proceeding
|
|
329
|
+
if (agentContext.tokenCalculationPromise) {
|
|
330
|
+
await agentContext.tokenCalculationPromise;
|
|
299
331
|
}
|
|
300
332
|
if (!config.signal) {
|
|
301
333
|
config.signal = this.signal;
|
|
@@ -303,32 +335,32 @@ class StandardGraph extends Graph {
|
|
|
303
335
|
this.config = config;
|
|
304
336
|
const { messages } = state;
|
|
305
337
|
let messagesToUse = messages;
|
|
306
|
-
if (!
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
const isAnthropicWithThinking = (
|
|
311
|
-
|
|
338
|
+
if (!agentContext.pruneMessages &&
|
|
339
|
+
agentContext.tokenCounter &&
|
|
340
|
+
agentContext.maxContextTokens != null &&
|
|
341
|
+
agentContext.indexTokenCountMap[0] != null) {
|
|
342
|
+
const isAnthropicWithThinking = (agentContext.provider === Providers.ANTHROPIC &&
|
|
343
|
+
agentContext.clientOptions.thinking !=
|
|
312
344
|
null) ||
|
|
313
|
-
(
|
|
314
|
-
|
|
345
|
+
(agentContext.provider === Providers.BEDROCK &&
|
|
346
|
+
agentContext.clientOptions
|
|
315
347
|
.additionalModelRequestFields?.['thinking'] != null);
|
|
316
|
-
|
|
317
|
-
provider: this.provider,
|
|
318
|
-
indexTokenCountMap: this.indexTokenCountMap,
|
|
319
|
-
maxTokens: this.maxContextTokens,
|
|
320
|
-
tokenCounter: this.tokenCounter,
|
|
348
|
+
agentContext.pruneMessages = createPruneMessages({
|
|
321
349
|
startIndex: this.startIndex,
|
|
350
|
+
provider: agentContext.provider,
|
|
351
|
+
tokenCounter: agentContext.tokenCounter,
|
|
352
|
+
maxTokens: agentContext.maxContextTokens,
|
|
322
353
|
thinkingEnabled: isAnthropicWithThinking,
|
|
354
|
+
indexTokenCountMap: agentContext.indexTokenCountMap,
|
|
323
355
|
});
|
|
324
356
|
}
|
|
325
|
-
if (
|
|
326
|
-
const { context, indexTokenCountMap } =
|
|
357
|
+
if (agentContext.pruneMessages) {
|
|
358
|
+
const { context, indexTokenCountMap } = agentContext.pruneMessages({
|
|
327
359
|
messages,
|
|
328
|
-
usageMetadata:
|
|
360
|
+
usageMetadata: agentContext.currentUsage,
|
|
329
361
|
// startOnMessageType: 'human',
|
|
330
362
|
});
|
|
331
|
-
|
|
363
|
+
agentContext.indexTokenCountMap = indexTokenCountMap;
|
|
332
364
|
messagesToUse = context;
|
|
333
365
|
}
|
|
334
366
|
const finalMessages = messagesToUse;
|
|
@@ -338,87 +370,145 @@ class StandardGraph extends Graph {
|
|
|
338
370
|
const lastMessageY = finalMessages.length >= 1
|
|
339
371
|
? finalMessages[finalMessages.length - 1]
|
|
340
372
|
: null;
|
|
341
|
-
if (provider === Providers.BEDROCK &&
|
|
373
|
+
if (agentContext.provider === Providers.BEDROCK &&
|
|
342
374
|
lastMessageX instanceof AIMessageChunk &&
|
|
343
375
|
lastMessageY instanceof ToolMessage &&
|
|
344
376
|
typeof lastMessageX.content === 'string') {
|
|
345
377
|
finalMessages[finalMessages.length - 2].content = '';
|
|
346
378
|
}
|
|
347
379
|
const isLatestToolMessage = lastMessageY instanceof ToolMessage;
|
|
348
|
-
if (isLatestToolMessage &&
|
|
380
|
+
if (isLatestToolMessage &&
|
|
381
|
+
agentContext.provider === Providers.ANTHROPIC) {
|
|
349
382
|
formatAnthropicArtifactContent(finalMessages);
|
|
350
383
|
}
|
|
351
384
|
else if (isLatestToolMessage &&
|
|
352
|
-
(isOpenAILike(provider) ||
|
|
385
|
+
(isOpenAILike(agentContext.provider) ||
|
|
386
|
+
isGoogleLike(agentContext.provider))) {
|
|
353
387
|
formatArtifactPayload(finalMessages);
|
|
354
388
|
}
|
|
355
|
-
if (
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
389
|
+
if (agentContext.lastStreamCall != null &&
|
|
390
|
+
agentContext.streamBuffer != null) {
|
|
391
|
+
const timeSinceLastCall = Date.now() - agentContext.lastStreamCall;
|
|
392
|
+
if (timeSinceLastCall < agentContext.streamBuffer) {
|
|
393
|
+
const timeToWait = Math.ceil((agentContext.streamBuffer - timeSinceLastCall) / 1000) *
|
|
394
|
+
1000;
|
|
359
395
|
await sleep(timeToWait);
|
|
360
396
|
}
|
|
361
397
|
}
|
|
362
|
-
|
|
398
|
+
agentContext.lastStreamCall = Date.now();
|
|
363
399
|
let result;
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
400
|
+
const fallbacks = agentContext.clientOptions?.fallbacks ??
|
|
401
|
+
[];
|
|
402
|
+
try {
|
|
403
|
+
result = await this.attemptInvoke({
|
|
404
|
+
currentModel: model,
|
|
405
|
+
finalMessages,
|
|
406
|
+
provider: agentContext.provider,
|
|
407
|
+
tools: agentContext.tools,
|
|
408
|
+
}, config);
|
|
409
|
+
}
|
|
410
|
+
catch (primaryError) {
|
|
411
|
+
let lastError = primaryError;
|
|
412
|
+
for (const fb of fallbacks) {
|
|
413
|
+
try {
|
|
414
|
+
let model = this.getNewModel({
|
|
415
|
+
provider: fb.provider,
|
|
416
|
+
clientOptions: fb.clientOptions,
|
|
417
|
+
});
|
|
418
|
+
const bindableTools = agentContext.tools;
|
|
419
|
+
model = (!bindableTools || bindableTools.length === 0
|
|
420
|
+
? model
|
|
421
|
+
: model.bindTools(bindableTools));
|
|
422
|
+
result = await this.attemptInvoke({
|
|
423
|
+
currentModel: model,
|
|
424
|
+
finalMessages,
|
|
425
|
+
provider: fb.provider,
|
|
426
|
+
tools: agentContext.tools,
|
|
427
|
+
}, config);
|
|
428
|
+
lastError = undefined;
|
|
429
|
+
break;
|
|
372
430
|
}
|
|
373
|
-
|
|
374
|
-
|
|
431
|
+
catch (e) {
|
|
432
|
+
lastError = e;
|
|
433
|
+
continue;
|
|
375
434
|
}
|
|
376
435
|
}
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
}
|
|
380
|
-
else {
|
|
381
|
-
const finalMessage = (await this.boundModel.invoke(finalMessages, config));
|
|
382
|
-
if ((finalMessage.tool_calls?.length ?? 0) > 0) {
|
|
383
|
-
finalMessage.tool_calls = finalMessage.tool_calls?.filter((tool_call) => {
|
|
384
|
-
if (!tool_call.name) {
|
|
385
|
-
return false;
|
|
386
|
-
}
|
|
387
|
-
return true;
|
|
388
|
-
});
|
|
436
|
+
if (lastError !== undefined) {
|
|
437
|
+
throw lastError;
|
|
389
438
|
}
|
|
390
|
-
result = { messages: [finalMessage] };
|
|
391
439
|
}
|
|
392
|
-
|
|
440
|
+
if (!result) {
|
|
441
|
+
throw new Error('No result after model invocation');
|
|
442
|
+
}
|
|
443
|
+
agentContext.currentUsage = this.getUsageMetadata(result.messages?.[0]);
|
|
393
444
|
this.cleanupSignalListener();
|
|
394
445
|
return result;
|
|
395
446
|
};
|
|
396
447
|
}
|
|
397
|
-
|
|
448
|
+
createAgentNode(agentId) {
|
|
449
|
+
const agentContext = this.agentContexts.get(agentId);
|
|
450
|
+
if (!agentContext) {
|
|
451
|
+
throw new Error(`Agent context not found for agentId: ${agentId}`);
|
|
452
|
+
}
|
|
453
|
+
let currentModel = this.initializeModel({
|
|
454
|
+
tools: agentContext.tools,
|
|
455
|
+
provider: agentContext.provider,
|
|
456
|
+
clientOptions: agentContext.clientOptions,
|
|
457
|
+
});
|
|
458
|
+
if (agentContext.systemRunnable) {
|
|
459
|
+
currentModel = agentContext.systemRunnable.pipe(currentModel);
|
|
460
|
+
}
|
|
461
|
+
const agentNode = `${AGENT}${agentId}`;
|
|
462
|
+
const toolNode = `${TOOLS}${agentId}`;
|
|
398
463
|
const routeMessage = (state, config) => {
|
|
399
464
|
this.config = config;
|
|
400
|
-
|
|
401
|
-
// if (!lastMessage?.tool_calls?.length) {
|
|
402
|
-
// return END;
|
|
403
|
-
// }
|
|
404
|
-
// return TOOLS;
|
|
405
|
-
return toolsCondition(state);
|
|
465
|
+
return toolsCondition(state, toolNode, this.invokedToolIds);
|
|
406
466
|
};
|
|
407
|
-
const
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
.
|
|
415
|
-
|
|
467
|
+
const StateAnnotation = Annotation.Root({
|
|
468
|
+
messages: Annotation({
|
|
469
|
+
reducer: messagesStateReducer,
|
|
470
|
+
default: () => [],
|
|
471
|
+
}),
|
|
472
|
+
});
|
|
473
|
+
const workflow = new StateGraph(StateAnnotation)
|
|
474
|
+
.addNode(agentNode, this.createCallModel(agentId, currentModel))
|
|
475
|
+
.addNode(toolNode, this.initializeTools({
|
|
476
|
+
currentTools: agentContext.tools,
|
|
477
|
+
currentToolMap: agentContext.toolMap,
|
|
478
|
+
}))
|
|
479
|
+
.addEdge(START, agentNode)
|
|
480
|
+
.addConditionalEdges(agentNode, routeMessage)
|
|
481
|
+
.addEdge(toolNode, agentContext.toolEnd ? END : agentNode);
|
|
482
|
+
// Cast to unknown to avoid tight coupling to external types; options are opt-in
|
|
483
|
+
return workflow.compile(this.compileOptions);
|
|
484
|
+
}
|
|
485
|
+
createWorkflow() {
|
|
486
|
+
/** Use the default (first) agent for now */
|
|
487
|
+
const agentNode = this.createAgentNode(this.defaultAgentId);
|
|
488
|
+
const StateAnnotation = Annotation.Root({
|
|
489
|
+
messages: Annotation({
|
|
490
|
+
reducer: (a, b) => {
|
|
491
|
+
if (!a.length) {
|
|
492
|
+
this.startIndex = a.length + b.length;
|
|
493
|
+
}
|
|
494
|
+
const result = messagesStateReducer(a, b);
|
|
495
|
+
this.messages = result;
|
|
496
|
+
return result;
|
|
497
|
+
},
|
|
498
|
+
default: () => [],
|
|
499
|
+
}),
|
|
500
|
+
});
|
|
501
|
+
const workflow = new StateGraph(StateAnnotation)
|
|
502
|
+
.addNode(this.defaultAgentId, agentNode, { ends: [END] })
|
|
503
|
+
.addEdge(START, this.defaultAgentId)
|
|
504
|
+
.compile();
|
|
505
|
+
return workflow;
|
|
416
506
|
}
|
|
417
507
|
/* Dispatchers */
|
|
418
508
|
/**
|
|
419
509
|
* Dispatches a run step to the client, returns the step ID
|
|
420
510
|
*/
|
|
421
|
-
dispatchRunStep(stepKey, stepDetails) {
|
|
511
|
+
async dispatchRunStep(stepKey, stepDetails) {
|
|
422
512
|
if (!this.config) {
|
|
423
513
|
throw new Error('No config provided');
|
|
424
514
|
}
|
|
@@ -446,17 +536,21 @@ class StandardGraph extends Graph {
|
|
|
446
536
|
}
|
|
447
537
|
this.contentData.push(runStep);
|
|
448
538
|
this.contentIndexMap.set(stepId, runStep.index);
|
|
449
|
-
|
|
539
|
+
await safeDispatchCustomEvent(GraphEvents.ON_RUN_STEP, runStep, this.config);
|
|
450
540
|
return stepId;
|
|
451
541
|
}
|
|
452
|
-
handleToolCallCompleted(data, metadata) {
|
|
542
|
+
async handleToolCallCompleted(data, metadata, omitOutput) {
|
|
453
543
|
if (!this.config) {
|
|
454
544
|
throw new Error('No config provided');
|
|
455
545
|
}
|
|
456
546
|
if (!data.output) {
|
|
457
547
|
return;
|
|
458
548
|
}
|
|
459
|
-
const { input, output } = data;
|
|
549
|
+
const { input, output: _output } = data;
|
|
550
|
+
if (_output?.lg_name === 'Command') {
|
|
551
|
+
return;
|
|
552
|
+
}
|
|
553
|
+
const output = _output;
|
|
460
554
|
const { tool_call_id } = output;
|
|
461
555
|
const stepId = this.toolCallStepIds.get(tool_call_id) ?? '';
|
|
462
556
|
if (!stepId) {
|
|
@@ -466,17 +560,20 @@ class StandardGraph extends Graph {
|
|
|
466
560
|
if (!runStep) {
|
|
467
561
|
throw new Error(`No run step found for stepId ${stepId}`);
|
|
468
562
|
}
|
|
563
|
+
const dispatchedOutput = typeof output.content === 'string'
|
|
564
|
+
? output.content
|
|
565
|
+
: JSON.stringify(output.content);
|
|
469
566
|
const args = typeof input === 'string' ? input : input.input;
|
|
470
567
|
const tool_call = {
|
|
471
568
|
args: typeof args === 'string' ? args : JSON.stringify(args),
|
|
472
569
|
name: output.name ?? '',
|
|
473
570
|
id: output.tool_call_id,
|
|
474
|
-
output:
|
|
475
|
-
? output.content
|
|
476
|
-
: JSON.stringify(output.content),
|
|
571
|
+
output: omitOutput === true ? '' : dispatchedOutput,
|
|
477
572
|
progress: 1,
|
|
478
573
|
};
|
|
479
|
-
this.handlerRegistry
|
|
574
|
+
await this.handlerRegistry
|
|
575
|
+
?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)
|
|
576
|
+
?.handle(GraphEvents.ON_RUN_STEP_COMPLETED, {
|
|
480
577
|
result: {
|
|
481
578
|
id: stepId,
|
|
482
579
|
index: runStep.index,
|
|
@@ -489,7 +586,7 @@ class StandardGraph extends Graph {
|
|
|
489
586
|
* Static version of handleToolCallError to avoid creating strong references
|
|
490
587
|
* that prevent garbage collection
|
|
491
588
|
*/
|
|
492
|
-
static handleToolCallErrorStatic(graph, data, metadata) {
|
|
589
|
+
static async handleToolCallErrorStatic(graph, data, metadata) {
|
|
493
590
|
if (!graph.config) {
|
|
494
591
|
throw new Error('No config provided');
|
|
495
592
|
}
|
|
@@ -513,7 +610,7 @@ class StandardGraph extends Graph {
|
|
|
513
610
|
output: `Error processing tool${error?.message != null ? `: ${error.message}` : ''}`,
|
|
514
611
|
progress: 1,
|
|
515
612
|
};
|
|
516
|
-
graph.handlerRegistry
|
|
613
|
+
await graph.handlerRegistry
|
|
517
614
|
?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)
|
|
518
615
|
?.handle(GraphEvents.ON_RUN_STEP_COMPLETED, {
|
|
519
616
|
result: {
|
|
@@ -528,10 +625,10 @@ class StandardGraph extends Graph {
|
|
|
528
625
|
* Instance method that delegates to the static method
|
|
529
626
|
* Kept for backward compatibility
|
|
530
627
|
*/
|
|
531
|
-
handleToolCallError(data, metadata) {
|
|
532
|
-
StandardGraph.handleToolCallErrorStatic(this, data, metadata);
|
|
628
|
+
async handleToolCallError(data, metadata) {
|
|
629
|
+
await StandardGraph.handleToolCallErrorStatic(this, data, metadata);
|
|
533
630
|
}
|
|
534
|
-
dispatchRunStepDelta(id, delta) {
|
|
631
|
+
async dispatchRunStepDelta(id, delta) {
|
|
535
632
|
if (!this.config) {
|
|
536
633
|
throw new Error('No config provided');
|
|
537
634
|
}
|
|
@@ -542,9 +639,9 @@ class StandardGraph extends Graph {
|
|
|
542
639
|
id,
|
|
543
640
|
delta,
|
|
544
641
|
};
|
|
545
|
-
|
|
642
|
+
await safeDispatchCustomEvent(GraphEvents.ON_RUN_STEP_DELTA, runStepDelta, this.config);
|
|
546
643
|
}
|
|
547
|
-
dispatchMessageDelta(id, delta) {
|
|
644
|
+
async dispatchMessageDelta(id, delta) {
|
|
548
645
|
if (!this.config) {
|
|
549
646
|
throw new Error('No config provided');
|
|
550
647
|
}
|
|
@@ -552,9 +649,9 @@ class StandardGraph extends Graph {
|
|
|
552
649
|
id,
|
|
553
650
|
delta,
|
|
554
651
|
};
|
|
555
|
-
|
|
652
|
+
await safeDispatchCustomEvent(GraphEvents.ON_MESSAGE_DELTA, messageDelta, this.config);
|
|
556
653
|
}
|
|
557
|
-
dispatchReasoningDelta = (stepId, delta) => {
|
|
654
|
+
dispatchReasoningDelta = async (stepId, delta) => {
|
|
558
655
|
if (!this.config) {
|
|
559
656
|
throw new Error('No config provided');
|
|
560
657
|
}
|
|
@@ -562,7 +659,7 @@ class StandardGraph extends Graph {
|
|
|
562
659
|
id: stepId,
|
|
563
660
|
delta,
|
|
564
661
|
};
|
|
565
|
-
|
|
662
|
+
await safeDispatchCustomEvent(GraphEvents.ON_REASONING_DELTA, reasoningDelta, this.config);
|
|
566
663
|
};
|
|
567
664
|
}
|
|
568
665
|
|