@illuma-ai/agents 1.1.21 → 1.1.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/graphs/Graph.cjs +12 -1
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs +85 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
- package/dist/cjs/run.cjs +20 -9
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +12 -1
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs +85 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
- package/dist/esm/run.mjs +20 -9
- package/dist/esm/run.mjs.map +1 -1
- package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
- package/package.json +1 -1
- package/src/graphs/Graph.ts +12 -1
- package/src/graphs/MultiAgentGraph.ts +105 -1
- package/src/graphs/__tests__/multi-agent-delegate.test.ts +191 -0
- package/src/run.ts +20 -11
- package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
- package/src/agents/AgentContext.js +0 -782
- package/src/agents/AgentContext.test.js +0 -421
- package/src/agents/__tests__/AgentContext.test.js +0 -678
- package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
- package/src/common/enum.js +0 -192
- package/src/common/index.js +0 -3
- package/src/events.js +0 -166
- package/src/graphs/Graph.js +0 -1857
- package/src/graphs/MultiAgentGraph.js +0 -1092
- package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
- package/src/graphs/__tests__/structured-output.test.js +0 -144
- package/src/graphs/contextManagement.e2e.test.js +0 -718
- package/src/graphs/contextManagement.test.js +0 -485
- package/src/graphs/handoffValidation.test.js +0 -276
- package/src/graphs/index.js +0 -3
- package/src/index.js +0 -28
- package/src/instrumentation.js +0 -21
- package/src/llm/anthropic/index.js +0 -319
- package/src/llm/anthropic/types.js +0 -46
- package/src/llm/anthropic/utils/message_inputs.js +0 -627
- package/src/llm/anthropic/utils/message_outputs.js +0 -290
- package/src/llm/anthropic/utils/output_parsers.js +0 -89
- package/src/llm/anthropic/utils/tools.js +0 -25
- package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
- package/src/llm/bedrock/index.js +0 -303
- package/src/llm/bedrock/types.js +0 -2
- package/src/llm/bedrock/utils/index.js +0 -6
- package/src/llm/bedrock/utils/message_inputs.js +0 -463
- package/src/llm/bedrock/utils/message_outputs.js +0 -269
- package/src/llm/fake.js +0 -92
- package/src/llm/google/index.js +0 -215
- package/src/llm/google/types.js +0 -12
- package/src/llm/google/utils/common.js +0 -670
- package/src/llm/google/utils/tools.js +0 -111
- package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
- package/src/llm/openai/index.js +0 -1033
- package/src/llm/openai/types.js +0 -2
- package/src/llm/openai/utils/index.js +0 -756
- package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
- package/src/llm/openrouter/index.js +0 -261
- package/src/llm/openrouter/reasoning.test.js +0 -181
- package/src/llm/providers.js +0 -36
- package/src/llm/text.js +0 -65
- package/src/llm/vertexai/index.js +0 -402
- package/src/messages/__tests__/tools.test.js +0 -392
- package/src/messages/cache.js +0 -404
- package/src/messages/cache.test.js +0 -1167
- package/src/messages/content.js +0 -48
- package/src/messages/content.test.js +0 -314
- package/src/messages/core.js +0 -359
- package/src/messages/ensureThinkingBlock.test.js +0 -997
- package/src/messages/format.js +0 -973
- package/src/messages/formatAgentMessages.test.js +0 -2278
- package/src/messages/formatAgentMessages.tools.test.js +0 -362
- package/src/messages/formatMessage.test.js +0 -608
- package/src/messages/ids.js +0 -18
- package/src/messages/index.js +0 -9
- package/src/messages/labelContentByAgent.test.js +0 -725
- package/src/messages/prune.js +0 -438
- package/src/messages/reducer.js +0 -60
- package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
- package/src/messages/summarize.js +0 -146
- package/src/messages/summarize.test.js +0 -332
- package/src/messages/tools.js +0 -90
- package/src/mockStream.js +0 -81
- package/src/prompts/collab.js +0 -7
- package/src/prompts/index.js +0 -3
- package/src/prompts/taskmanager.js +0 -58
- package/src/run.js +0 -427
- package/src/schemas/index.js +0 -3
- package/src/schemas/schema-preparation.test.js +0 -370
- package/src/schemas/validate.js +0 -314
- package/src/schemas/validate.test.js +0 -264
- package/src/scripts/abort.js +0 -127
- package/src/scripts/ant_web_search.js +0 -130
- package/src/scripts/ant_web_search_edge_case.js +0 -133
- package/src/scripts/ant_web_search_error_edge_case.js +0 -119
- package/src/scripts/args.js +0 -41
- package/src/scripts/bedrock-cache-debug.js +0 -186
- package/src/scripts/bedrock-content-aggregation-test.js +0 -195
- package/src/scripts/bedrock-merge-test.js +0 -80
- package/src/scripts/bedrock-parallel-tools-test.js +0 -150
- package/src/scripts/caching.js +0 -106
- package/src/scripts/cli.js +0 -152
- package/src/scripts/cli2.js +0 -119
- package/src/scripts/cli3.js +0 -163
- package/src/scripts/cli4.js +0 -165
- package/src/scripts/cli5.js +0 -165
- package/src/scripts/code_exec.js +0 -171
- package/src/scripts/code_exec_files.js +0 -180
- package/src/scripts/code_exec_multi_session.js +0 -185
- package/src/scripts/code_exec_ptc.js +0 -265
- package/src/scripts/code_exec_session.js +0 -217
- package/src/scripts/code_exec_simple.js +0 -120
- package/src/scripts/content.js +0 -111
- package/src/scripts/empty_input.js +0 -125
- package/src/scripts/handoff-test.js +0 -96
- package/src/scripts/image.js +0 -138
- package/src/scripts/memory.js +0 -83
- package/src/scripts/multi-agent-chain.js +0 -271
- package/src/scripts/multi-agent-conditional.js +0 -185
- package/src/scripts/multi-agent-document-review-chain.js +0 -171
- package/src/scripts/multi-agent-hybrid-flow.js +0 -264
- package/src/scripts/multi-agent-parallel-start.js +0 -214
- package/src/scripts/multi-agent-parallel.js +0 -346
- package/src/scripts/multi-agent-sequence.js +0 -184
- package/src/scripts/multi-agent-supervisor.js +0 -324
- package/src/scripts/multi-agent-test.js +0 -147
- package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
- package/src/scripts/parallel-full-metadata-test.js +0 -176
- package/src/scripts/parallel-tools-test.js +0 -256
- package/src/scripts/programmatic_exec.js +0 -277
- package/src/scripts/programmatic_exec_agent.js +0 -168
- package/src/scripts/search.js +0 -118
- package/src/scripts/sequential-full-metadata-test.js +0 -143
- package/src/scripts/simple.js +0 -174
- package/src/scripts/single-agent-metadata-test.js +0 -152
- package/src/scripts/stream.js +0 -113
- package/src/scripts/test-custom-prompt-key.js +0 -132
- package/src/scripts/test-handoff-input.js +0 -143
- package/src/scripts/test-handoff-preamble.js +0 -227
- package/src/scripts/test-handoff-steering.js +0 -353
- package/src/scripts/test-multi-agent-list-handoff.js +0 -318
- package/src/scripts/test-parallel-agent-labeling.js +0 -253
- package/src/scripts/test-parallel-handoffs.js +0 -229
- package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
- package/src/scripts/test-thinking-handoff.js +0 -132
- package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
- package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
- package/src/scripts/test-tools-before-handoff.js +0 -187
- package/src/scripts/test_code_api.js +0 -263
- package/src/scripts/thinking-bedrock.js +0 -128
- package/src/scripts/thinking-vertexai.js +0 -130
- package/src/scripts/thinking.js +0 -134
- package/src/scripts/tool_search.js +0 -114
- package/src/scripts/tools.js +0 -125
- package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
- package/src/specs/agent-handoffs.test.js +0 -924
- package/src/specs/anthropic.simple.test.js +0 -287
- package/src/specs/azure.simple.test.js +0 -381
- package/src/specs/cache.simple.test.js +0 -282
- package/src/specs/custom-event-await.test.js +0 -148
- package/src/specs/deepseek.simple.test.js +0 -189
- package/src/specs/emergency-prune.test.js +0 -308
- package/src/specs/moonshot.simple.test.js +0 -237
- package/src/specs/observability.integration.test.js +0 -1337
- package/src/specs/openai.simple.test.js +0 -233
- package/src/specs/openrouter.simple.test.js +0 -202
- package/src/specs/prune.test.js +0 -733
- package/src/specs/reasoning.test.js +0 -144
- package/src/specs/spec.utils.js +0 -4
- package/src/specs/thinking-handoff.test.js +0 -486
- package/src/specs/thinking-prune.test.js +0 -600
- package/src/specs/token-distribution-edge-case.test.js +0 -246
- package/src/specs/token-memoization.test.js +0 -32
- package/src/specs/tokens.test.js +0 -49
- package/src/specs/tool-error.test.js +0 -139
- package/src/splitStream.js +0 -204
- package/src/splitStream.test.js +0 -504
- package/src/stream.js +0 -650
- package/src/stream.test.js +0 -225
- package/src/test/mockTools.js +0 -340
- package/src/tools/BrowserTools.js +0 -245
- package/src/tools/Calculator.js +0 -38
- package/src/tools/Calculator.test.js +0 -225
- package/src/tools/CodeExecutor.js +0 -233
- package/src/tools/ProgrammaticToolCalling.js +0 -602
- package/src/tools/StreamingToolCallBuffer.js +0 -179
- package/src/tools/ToolNode.js +0 -930
- package/src/tools/ToolSearch.js +0 -904
- package/src/tools/__tests__/BrowserTools.test.js +0 -306
- package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
- package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
- package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
- package/src/tools/__tests__/ToolApproval.test.js +0 -675
- package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
- package/src/tools/__tests__/ToolNode.session.test.js +0 -319
- package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
- package/src/tools/__tests__/ToolSearch.test.js +0 -812
- package/src/tools/__tests__/handlers.test.js +0 -799
- package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
- package/src/tools/handlers.js +0 -306
- package/src/tools/schema.js +0 -25
- package/src/tools/search/anthropic.js +0 -34
- package/src/tools/search/content.js +0 -116
- package/src/tools/search/content.test.js +0 -133
- package/src/tools/search/firecrawl.js +0 -173
- package/src/tools/search/format.js +0 -198
- package/src/tools/search/highlights.js +0 -241
- package/src/tools/search/index.js +0 -3
- package/src/tools/search/jina-reranker.test.js +0 -106
- package/src/tools/search/rerankers.js +0 -165
- package/src/tools/search/schema.js +0 -102
- package/src/tools/search/search.js +0 -561
- package/src/tools/search/serper-scraper.js +0 -126
- package/src/tools/search/test.js +0 -129
- package/src/tools/search/tool.js +0 -453
- package/src/tools/search/types.js +0 -2
- package/src/tools/search/utils.js +0 -59
- package/src/types/graph.js +0 -24
- package/src/types/graph.test.js +0 -192
- package/src/types/index.js +0 -7
- package/src/types/llm.js +0 -2
- package/src/types/messages.js +0 -2
- package/src/types/run.js +0 -2
- package/src/types/stream.js +0 -2
- package/src/types/tools.js +0 -2
- package/src/utils/contextAnalytics.js +0 -79
- package/src/utils/contextAnalytics.test.js +0 -166
- package/src/utils/events.js +0 -26
- package/src/utils/graph.js +0 -11
- package/src/utils/handlers.js +0 -65
- package/src/utils/index.js +0 -10
- package/src/utils/llm.js +0 -21
- package/src/utils/llmConfig.js +0 -205
- package/src/utils/logging.js +0 -37
- package/src/utils/misc.js +0 -51
- package/src/utils/run.js +0 -69
- package/src/utils/schema.js +0 -21
- package/src/utils/title.js +0 -119
- package/src/utils/tokens.js +0 -92
- package/src/utils/toonFormat.js +0 -379
|
@@ -1,276 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Unit tests for MultiAgentGraph handoff validation and pre-handoff compaction.
|
|
3
|
-
*
|
|
4
|
-
* These tests exercise the handoff logic (destination validation, compaction threshold,
|
|
5
|
-
* briefing generation, error formatting) without instantiating the full MultiAgentGraph.
|
|
6
|
-
* We test the helper functions directly and verify the behavioral patterns that
|
|
7
|
-
* the graph's agent node closure relies on.
|
|
8
|
-
*/
|
|
9
|
-
import { HumanMessage, AIMessage, SystemMessage, ToolMessage, } from '@langchain/core/messages';
|
|
10
|
-
import { summarize, createEmergencySummary, buildFullSummaryPrompt, formatMessagesForSummary, } from '@/messages';
|
|
11
|
-
// ---------------------------------------------------------------------------
|
|
12
|
-
// 1. Handoff destination validation logic
|
|
13
|
-
// ---------------------------------------------------------------------------
|
|
14
|
-
describe('handoff destination validation', () => {
|
|
15
|
-
it('detects non-existent agent in contexts map', () => {
|
|
16
|
-
const agentContexts = new Map([
|
|
17
|
-
['agent-a', { agentId: 'agent-a', name: 'Agent A' }],
|
|
18
|
-
['agent-b', { agentId: 'agent-b', name: 'Agent B' }],
|
|
19
|
-
]);
|
|
20
|
-
const handoffDest = 'agent-c';
|
|
21
|
-
expect(agentContexts.has(handoffDest)).toBe(false);
|
|
22
|
-
const availableAgents = Array.from(agentContexts.keys()).join(', ');
|
|
23
|
-
expect(availableAgents).toBe('agent-a, agent-b');
|
|
24
|
-
});
|
|
25
|
-
it('passes validation for existing agent', () => {
|
|
26
|
-
const agentContexts = new Map([
|
|
27
|
-
['agent-a', { agentId: 'agent-a', name: 'Agent A' }],
|
|
28
|
-
]);
|
|
29
|
-
expect(agentContexts.has('agent-a')).toBe(true);
|
|
30
|
-
});
|
|
31
|
-
it('handles empty contexts map', () => {
|
|
32
|
-
const agentContexts = new Map();
|
|
33
|
-
expect(agentContexts.has('any-agent')).toBe(false);
|
|
34
|
-
const availableAgents = Array.from(agentContexts.keys()).join(', ');
|
|
35
|
-
expect(availableAgents).toBe('');
|
|
36
|
-
});
|
|
37
|
-
});
|
|
38
|
-
// ---------------------------------------------------------------------------
|
|
39
|
-
// 2. Pre-handoff compaction threshold
|
|
40
|
-
// ---------------------------------------------------------------------------
|
|
41
|
-
describe('pre-handoff compaction threshold', () => {
|
|
42
|
-
it('triggers compaction when context exceeds 70% of receiver budget', () => {
|
|
43
|
-
const receiverBudget = 100000;
|
|
44
|
-
const currentSize = 75000; // 75%
|
|
45
|
-
expect(currentSize > receiverBudget * 0.7).toBe(true);
|
|
46
|
-
});
|
|
47
|
-
it('skips compaction when context fits within 70% of receiver budget', () => {
|
|
48
|
-
const receiverBudget = 100000;
|
|
49
|
-
const currentSize = 60000; // 60%
|
|
50
|
-
expect(currentSize > receiverBudget * 0.7).toBe(false);
|
|
51
|
-
});
|
|
52
|
-
it('triggers compaction at exactly the boundary (>70%)', () => {
|
|
53
|
-
const receiverBudget = 100000;
|
|
54
|
-
// Exactly 70% should NOT trigger (condition is strictly >)
|
|
55
|
-
expect(70000 > receiverBudget * 0.7).toBe(false);
|
|
56
|
-
// Just above 70% should trigger
|
|
57
|
-
expect(70001 > receiverBudget * 0.7).toBe(true);
|
|
58
|
-
});
|
|
59
|
-
it('computes summary budget as 20% of receiver budget', () => {
|
|
60
|
-
const receiverBudget = 100000;
|
|
61
|
-
const summaryBudget = Math.floor(receiverBudget * 0.2);
|
|
62
|
-
expect(summaryBudget).toBe(20000);
|
|
63
|
-
});
|
|
64
|
-
});
|
|
65
|
-
// ---------------------------------------------------------------------------
|
|
66
|
-
// 3. Handoff briefing via summarize (multi-agent aware)
|
|
67
|
-
// ---------------------------------------------------------------------------
|
|
68
|
-
describe('handoff briefing generation', () => {
|
|
69
|
-
it('generates multi-agent aware briefing', async () => {
|
|
70
|
-
const messages = [
|
|
71
|
-
new HumanMessage('Analyze the data'),
|
|
72
|
-
new AIMessage('I found 3 key patterns...'),
|
|
73
|
-
new HumanMessage('Can you elaborate on pattern 2?'),
|
|
74
|
-
new AIMessage('Pattern 2 shows...'),
|
|
75
|
-
];
|
|
76
|
-
const callback = jest.fn().mockResolvedValue('## 5. Agent Workflow State\n- Agent A analyzed data and found 3 patterns');
|
|
77
|
-
const result = await summarize(messages, callback, {
|
|
78
|
-
isMultiAgent: true,
|
|
79
|
-
agentWorkflowState: {
|
|
80
|
-
currentAgentId: 'agent-b',
|
|
81
|
-
agentChain: ['agent-a', 'agent-b'],
|
|
82
|
-
pendingAgents: [],
|
|
83
|
-
},
|
|
84
|
-
summaryBudget: 2000,
|
|
85
|
-
});
|
|
86
|
-
// Without a tokenCounter the budget check is skipped, so full tier is returned
|
|
87
|
-
expect(result.tier).toBe('full');
|
|
88
|
-
expect(result.summary).toContain('Agent Workflow State');
|
|
89
|
-
// Verify callback was called with the full prompt containing agent workflow info
|
|
90
|
-
expect(callback).toHaveBeenCalledWith(expect.stringContaining('Agent Workflow State'), expect.any(Number));
|
|
91
|
-
});
|
|
92
|
-
it('includes agent chain in the prompt', () => {
|
|
93
|
-
const conversation = formatMessagesForSummary([
|
|
94
|
-
new HumanMessage('Hello'),
|
|
95
|
-
]);
|
|
96
|
-
const prompt = buildFullSummaryPrompt(conversation, {
|
|
97
|
-
isMultiAgent: true,
|
|
98
|
-
agentWorkflowState: {
|
|
99
|
-
currentAgentId: 'agent-b',
|
|
100
|
-
agentChain: ['agent-a', 'agent-b'],
|
|
101
|
-
pendingAgents: ['agent-c'],
|
|
102
|
-
},
|
|
103
|
-
});
|
|
104
|
-
expect(prompt).toContain('Current agent: agent-b');
|
|
105
|
-
expect(prompt).toContain('Agent chain: agent-a -> agent-b');
|
|
106
|
-
expect(prompt).toContain('Pending agents: agent-c');
|
|
107
|
-
});
|
|
108
|
-
it('shows N/A for single-agent conversations', () => {
|
|
109
|
-
const conversation = formatMessagesForSummary([
|
|
110
|
-
new HumanMessage('Hello'),
|
|
111
|
-
]);
|
|
112
|
-
const prompt = buildFullSummaryPrompt(conversation);
|
|
113
|
-
expect(prompt).toContain('N/A (single-agent conversation)');
|
|
114
|
-
});
|
|
115
|
-
});
|
|
116
|
-
// ---------------------------------------------------------------------------
|
|
117
|
-
// 4. Emergency briefing fallback
|
|
118
|
-
// ---------------------------------------------------------------------------
|
|
119
|
-
describe('emergency briefing fallback', () => {
|
|
120
|
-
it('generates briefing without LLM when callback fails', async () => {
|
|
121
|
-
const messages = [
|
|
122
|
-
new HumanMessage('Analyze this'),
|
|
123
|
-
new AIMessage('Here is the analysis...'),
|
|
124
|
-
];
|
|
125
|
-
const failingCallback = jest.fn().mockRejectedValue(new Error('LLM timeout'));
|
|
126
|
-
const result = await summarize(messages, failingCallback);
|
|
127
|
-
expect(result.tier).toBe('emergency');
|
|
128
|
-
expect(result.summary).toContain('[Emergency Context Summary]');
|
|
129
|
-
expect(result.summary).toContain('Analyze this');
|
|
130
|
-
});
|
|
131
|
-
it('falls to emergency when both full and simple callbacks fail', async () => {
|
|
132
|
-
let callCount = 0;
|
|
133
|
-
const failingCallback = jest.fn().mockImplementation(() => {
|
|
134
|
-
callCount++;
|
|
135
|
-
throw new Error(`Failure #${callCount}`);
|
|
136
|
-
});
|
|
137
|
-
const messages = [
|
|
138
|
-
new HumanMessage('Do something'),
|
|
139
|
-
new AIMessage('Working on it...'),
|
|
140
|
-
new ToolMessage({ content: 'result', tool_call_id: 'tc-1', name: 'search' }),
|
|
141
|
-
];
|
|
142
|
-
const result = await summarize(messages, failingCallback);
|
|
143
|
-
expect(result.tier).toBe('emergency');
|
|
144
|
-
expect(result.summary).toContain('Do something');
|
|
145
|
-
expect(result.summary).toContain('Working on it...');
|
|
146
|
-
expect(result.summary).toContain('search');
|
|
147
|
-
expect(result.messagesCompacted).toBe(3);
|
|
148
|
-
});
|
|
149
|
-
it('createEmergencySummary captures first user message and last AI message', () => {
|
|
150
|
-
const messages = [
|
|
151
|
-
new HumanMessage('First user message'),
|
|
152
|
-
new AIMessage('First AI response'),
|
|
153
|
-
new HumanMessage('Second user message'),
|
|
154
|
-
new AIMessage('Second AI response'),
|
|
155
|
-
];
|
|
156
|
-
const summary = createEmergencySummary(messages);
|
|
157
|
-
expect(summary).toContain('[Emergency Context Summary]');
|
|
158
|
-
expect(summary).toContain('Original request: First user message');
|
|
159
|
-
expect(summary).toContain('Last response: Second AI response');
|
|
160
|
-
expect(summary).toContain('Tools used: none');
|
|
161
|
-
expect(summary).toContain('Messages compacted: 4');
|
|
162
|
-
});
|
|
163
|
-
it('createEmergencySummary collects unique tool names', () => {
|
|
164
|
-
const messages = [
|
|
165
|
-
new HumanMessage('Test'),
|
|
166
|
-
new ToolMessage({ content: 'ok', tool_call_id: 'tc-1', name: 'search' }),
|
|
167
|
-
new ToolMessage({ content: 'ok', tool_call_id: 'tc-2', name: 'search' }),
|
|
168
|
-
new ToolMessage({ content: 'ok', tool_call_id: 'tc-3', name: 'calculator' }),
|
|
169
|
-
];
|
|
170
|
-
const summary = createEmergencySummary(messages);
|
|
171
|
-
expect(summary).toContain('search');
|
|
172
|
-
expect(summary).toContain('calculator');
|
|
173
|
-
});
|
|
174
|
-
});
|
|
175
|
-
// ---------------------------------------------------------------------------
|
|
176
|
-
// 5. Handoff error ToolMessage format
|
|
177
|
-
// ---------------------------------------------------------------------------
|
|
178
|
-
describe('handoff error message format', () => {
|
|
179
|
-
it('creates properly formatted error ToolMessage', () => {
|
|
180
|
-
const availableAgents = 'agent-a, agent-b, agent-c';
|
|
181
|
-
const handoffDest = 'nonexistent-agent';
|
|
182
|
-
const errorMsg = new ToolMessage({
|
|
183
|
-
content: `Transfer failed: agent "${handoffDest}" does not exist. Available agents: ${availableAgents}. Please choose a valid agent to transfer to.`,
|
|
184
|
-
tool_call_id: 'call-123',
|
|
185
|
-
name: `transfer_to_${handoffDest}`,
|
|
186
|
-
});
|
|
187
|
-
expect(errorMsg.content).toContain('does not exist');
|
|
188
|
-
expect(errorMsg.content).toContain(availableAgents);
|
|
189
|
-
expect(errorMsg.tool_call_id).toBe('call-123');
|
|
190
|
-
expect(errorMsg.name).toBe('transfer_to_nonexistent-agent');
|
|
191
|
-
});
|
|
192
|
-
it('error message includes all available agents', () => {
|
|
193
|
-
const agentContexts = new Map([
|
|
194
|
-
['alpha', {}],
|
|
195
|
-
['beta', {}],
|
|
196
|
-
['gamma', {}],
|
|
197
|
-
]);
|
|
198
|
-
const availableAgents = Array.from(agentContexts.keys()).join(', ');
|
|
199
|
-
const errorMsg = new ToolMessage({
|
|
200
|
-
content: `Transfer failed: agent "delta" does not exist. Available agents: ${availableAgents}. Please choose a valid agent to transfer to.`,
|
|
201
|
-
tool_call_id: 'call-456',
|
|
202
|
-
name: 'lc_transfer_to_delta',
|
|
203
|
-
});
|
|
204
|
-
expect(errorMsg.content).toContain('alpha, beta, gamma');
|
|
205
|
-
expect(errorMsg.getType()).toBe('tool');
|
|
206
|
-
});
|
|
207
|
-
});
|
|
208
|
-
// ---------------------------------------------------------------------------
|
|
209
|
-
// 6. Compacted handoff message structure
|
|
210
|
-
// ---------------------------------------------------------------------------
|
|
211
|
-
describe('compacted handoff message structure', () => {
|
|
212
|
-
it('keeps briefing + last 3 messages', () => {
|
|
213
|
-
const originalMessages = [
|
|
214
|
-
new HumanMessage('msg 1'),
|
|
215
|
-
new AIMessage('msg 2'),
|
|
216
|
-
new HumanMessage('msg 3'),
|
|
217
|
-
new AIMessage('msg 4'),
|
|
218
|
-
new HumanMessage('msg 5'),
|
|
219
|
-
new AIMessage('msg 6'),
|
|
220
|
-
];
|
|
221
|
-
const briefingMsg = new SystemMessage('[Handoff Briefing]\nAgent A completed analysis');
|
|
222
|
-
const keepCount = Math.min(3, originalMessages.length);
|
|
223
|
-
const compacted = [
|
|
224
|
-
briefingMsg,
|
|
225
|
-
...originalMessages.slice(originalMessages.length - keepCount),
|
|
226
|
-
];
|
|
227
|
-
expect(compacted).toHaveLength(4); // briefing + 3 messages
|
|
228
|
-
expect(compacted[0].getType()).toBe('system');
|
|
229
|
-
expect(compacted[0].content).toContain('Handoff Briefing');
|
|
230
|
-
expect(compacted[3].getType()).toBe('ai'); // last message preserved
|
|
231
|
-
});
|
|
232
|
-
it('keeps all messages when fewer than 3 remain', () => {
|
|
233
|
-
const originalMessages = [
|
|
234
|
-
new HumanMessage('only message'),
|
|
235
|
-
new AIMessage('only response'),
|
|
236
|
-
];
|
|
237
|
-
const briefingMsg = new SystemMessage('[Handoff Briefing]\nBriefing text');
|
|
238
|
-
const keepCount = Math.min(3, originalMessages.length);
|
|
239
|
-
const compacted = [
|
|
240
|
-
briefingMsg,
|
|
241
|
-
...originalMessages.slice(originalMessages.length - keepCount),
|
|
242
|
-
];
|
|
243
|
-
// briefing + 2 original messages
|
|
244
|
-
expect(compacted).toHaveLength(3);
|
|
245
|
-
expect(compacted[1].content).toBe('only message');
|
|
246
|
-
expect(compacted[2].content).toBe('only response');
|
|
247
|
-
});
|
|
248
|
-
it('preserves the handoff tool message as the last message', () => {
|
|
249
|
-
const originalMessages = [
|
|
250
|
-
new HumanMessage('do X'),
|
|
251
|
-
new AIMessage('calling agent-b'),
|
|
252
|
-
new ToolMessage({
|
|
253
|
-
content: 'Transferred to agent-b',
|
|
254
|
-
tool_call_id: 'tc-handoff',
|
|
255
|
-
name: 'lc_transfer_to_agent-b',
|
|
256
|
-
}),
|
|
257
|
-
];
|
|
258
|
-
const briefingMsg = new SystemMessage('[Handoff Briefing]\nContext');
|
|
259
|
-
const keepCount = Math.min(3, originalMessages.length);
|
|
260
|
-
const compacted = [
|
|
261
|
-
briefingMsg,
|
|
262
|
-
...originalMessages.slice(originalMessages.length - keepCount),
|
|
263
|
-
];
|
|
264
|
-
const lastMsg = compacted[compacted.length - 1];
|
|
265
|
-
expect(lastMsg.getType()).toBe('tool');
|
|
266
|
-
expect(lastMsg.name).toContain('lc_transfer_to_');
|
|
267
|
-
});
|
|
268
|
-
it('briefing contains sender name in the expected format', () => {
|
|
269
|
-
const senderName = 'Research Agent';
|
|
270
|
-
const summaryText = 'User asked about market trends. Found 5 data points.';
|
|
271
|
-
const briefingMsg = new SystemMessage(`[Handoff Briefing from "${senderName}"]\n${summaryText}`);
|
|
272
|
-
expect(briefingMsg.content).toContain(`Handoff Briefing from "Research Agent"`);
|
|
273
|
-
expect(briefingMsg.content).toContain(summaryText);
|
|
274
|
-
});
|
|
275
|
-
});
|
|
276
|
-
//# sourceMappingURL=handoffValidation.test.js.map
|
package/src/graphs/index.js
DELETED
package/src/index.js
DELETED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
/* Main Operations */
|
|
2
|
-
export * from './run';
|
|
3
|
-
export * from './stream';
|
|
4
|
-
export * from './splitStream';
|
|
5
|
-
export * from './events';
|
|
6
|
-
export * from './messages';
|
|
7
|
-
/* Graphs */
|
|
8
|
-
export * from './graphs';
|
|
9
|
-
/* Tools */
|
|
10
|
-
export * from './tools/Calculator';
|
|
11
|
-
export * from './tools/CodeExecutor';
|
|
12
|
-
export * from './tools/BrowserTools';
|
|
13
|
-
export * from './tools/ProgrammaticToolCalling';
|
|
14
|
-
export * from './tools/ToolSearch';
|
|
15
|
-
export * from './tools/ToolNode';
|
|
16
|
-
export * from './tools/schema';
|
|
17
|
-
export * from './tools/handlers';
|
|
18
|
-
export * from './tools/search';
|
|
19
|
-
/* Schemas */
|
|
20
|
-
export * from './schemas';
|
|
21
|
-
/* Misc. */
|
|
22
|
-
export * from './common';
|
|
23
|
-
export * from './utils';
|
|
24
|
-
/* LLM */
|
|
25
|
-
export { CustomOpenAIClient } from './llm/openai';
|
|
26
|
-
export { ChatOpenRouter } from './llm/openrouter';
|
|
27
|
-
export { getChatModelClass, llmProviders } from './llm/providers';
|
|
28
|
-
//# sourceMappingURL=index.js.map
|
package/src/instrumentation.js
DELETED
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
import { NodeSDK } from '@opentelemetry/sdk-node';
|
|
2
|
-
import { IllumaSpanProcessor } from '@illuma-ai/observability-otel';
|
|
3
|
-
import { isPresent } from '@/utils/misc';
|
|
4
|
-
// Support both ILLUMA_* and LANGFUSE_* env vars for backwards compatibility
|
|
5
|
-
const secretKey = process.env.ILLUMA_SECRET_KEY ?? process.env.LANGFUSE_SECRET_KEY;
|
|
6
|
-
const publicKey = process.env.ILLUMA_PUBLIC_KEY ?? process.env.LANGFUSE_PUBLIC_KEY;
|
|
7
|
-
const baseUrl = process.env.ILLUMA_BASE_URL ?? process.env.LANGFUSE_BASE_URL;
|
|
8
|
-
const environment = process.env.ILLUMA_ENVIRONMENT ?? process.env.LANGFUSE_TRACING_ENVIRONMENT ?? process.env.NODE_ENV ?? 'development';
|
|
9
|
-
if (isPresent(secretKey) && isPresent(publicKey) && isPresent(baseUrl)) {
|
|
10
|
-
const spanProcessor = new IllumaSpanProcessor({
|
|
11
|
-
publicKey,
|
|
12
|
-
secretKey,
|
|
13
|
-
baseUrl,
|
|
14
|
-
environment,
|
|
15
|
-
});
|
|
16
|
-
const sdk = new NodeSDK({
|
|
17
|
-
spanProcessors: [spanProcessor],
|
|
18
|
-
});
|
|
19
|
-
sdk.start();
|
|
20
|
-
}
|
|
21
|
-
//# sourceMappingURL=instrumentation.js.map
|
|
@@ -1,319 +0,0 @@
|
|
|
1
|
-
import { AIMessageChunk } from '@langchain/core/messages';
|
|
2
|
-
import { ChatAnthropicMessages } from '@langchain/anthropic';
|
|
3
|
-
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
-
import { _makeMessageChunkFromAnthropicEvent } from './utils/message_outputs';
|
|
5
|
-
import { _convertMessagesToAnthropicPayload } from './utils/message_inputs';
|
|
6
|
-
import { handleToolChoice } from './utils/tools';
|
|
7
|
-
import { TextStream } from '@/llm/text';
|
|
8
|
-
function _toolsInParams(params) {
|
|
9
|
-
return !!(params.tools && params.tools.length > 0);
|
|
10
|
-
}
|
|
11
|
-
function _documentsInParams(params) {
|
|
12
|
-
for (const message of params.messages ?? []) {
|
|
13
|
-
if (typeof message.content === 'string') {
|
|
14
|
-
continue;
|
|
15
|
-
}
|
|
16
|
-
for (const block of message.content ?? []) {
|
|
17
|
-
if (typeof block === 'object' &&
|
|
18
|
-
block != null &&
|
|
19
|
-
block.type === 'document' &&
|
|
20
|
-
block.citations != null &&
|
|
21
|
-
typeof block.citations === 'object' &&
|
|
22
|
-
block.citations.enabled) {
|
|
23
|
-
return true;
|
|
24
|
-
}
|
|
25
|
-
}
|
|
26
|
-
}
|
|
27
|
-
return false;
|
|
28
|
-
}
|
|
29
|
-
function _thinkingInParams(params) {
|
|
30
|
-
return !!(params.thinking &&
|
|
31
|
-
(params.thinking.type === 'enabled' || params.thinking.type === 'adaptive'));
|
|
32
|
-
}
|
|
33
|
-
function _compactionInParams(params) {
|
|
34
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
35
|
-
const cm = params.context_management;
|
|
36
|
-
return !!cm?.edits?.some((e) => e.type === 'compact_20260112');
|
|
37
|
-
}
|
|
38
|
-
function extractToken(chunk) {
|
|
39
|
-
if (typeof chunk.content === 'string') {
|
|
40
|
-
return [chunk.content, 'string'];
|
|
41
|
-
}
|
|
42
|
-
else if (Array.isArray(chunk.content) &&
|
|
43
|
-
chunk.content.length >= 1 &&
|
|
44
|
-
'input' in chunk.content[0]) {
|
|
45
|
-
return typeof chunk.content[0].input === 'string'
|
|
46
|
-
? [chunk.content[0].input, 'input']
|
|
47
|
-
: [JSON.stringify(chunk.content[0].input), 'input'];
|
|
48
|
-
}
|
|
49
|
-
else if (Array.isArray(chunk.content) &&
|
|
50
|
-
chunk.content.length >= 1 &&
|
|
51
|
-
'text' in chunk.content[0]) {
|
|
52
|
-
return [chunk.content[0].text, 'content'];
|
|
53
|
-
}
|
|
54
|
-
else if (Array.isArray(chunk.content) &&
|
|
55
|
-
chunk.content.length >= 1 &&
|
|
56
|
-
'thinking' in chunk.content[0]) {
|
|
57
|
-
return [chunk.content[0].thinking, 'content'];
|
|
58
|
-
}
|
|
59
|
-
return [undefined];
|
|
60
|
-
}
|
|
61
|
-
function cloneChunk(text, tokenType, chunk) {
|
|
62
|
-
if (tokenType === 'string') {
|
|
63
|
-
return new AIMessageChunk(Object.assign({}, chunk, { content: text }));
|
|
64
|
-
}
|
|
65
|
-
else if (tokenType === 'input') {
|
|
66
|
-
return chunk;
|
|
67
|
-
}
|
|
68
|
-
const content = chunk.content[0];
|
|
69
|
-
if (tokenType === 'content' && content.type === 'text') {
|
|
70
|
-
return new AIMessageChunk(Object.assign({}, chunk, {
|
|
71
|
-
content: [Object.assign({}, content, { text })],
|
|
72
|
-
}));
|
|
73
|
-
}
|
|
74
|
-
else if (tokenType === 'content' && content.type === 'text_delta') {
|
|
75
|
-
return new AIMessageChunk(Object.assign({}, chunk, {
|
|
76
|
-
content: [Object.assign({}, content, { text })],
|
|
77
|
-
}));
|
|
78
|
-
}
|
|
79
|
-
else if (tokenType === 'content' && content.type?.startsWith('thinking')) {
|
|
80
|
-
return new AIMessageChunk(Object.assign({}, chunk, {
|
|
81
|
-
content: [Object.assign({}, content, { thinking: text })],
|
|
82
|
-
}));
|
|
83
|
-
}
|
|
84
|
-
return chunk;
|
|
85
|
-
}
|
|
86
|
-
export class CustomAnthropic extends ChatAnthropicMessages {
|
|
87
|
-
_lc_stream_delay;
|
|
88
|
-
message_start;
|
|
89
|
-
message_delta;
|
|
90
|
-
tools_in_params;
|
|
91
|
-
emitted_usage;
|
|
92
|
-
top_k;
|
|
93
|
-
outputConfig;
|
|
94
|
-
inferenceGeo;
|
|
95
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
96
|
-
contextManagement;
|
|
97
|
-
constructor(fields) {
|
|
98
|
-
super(fields);
|
|
99
|
-
this.resetTokenEvents();
|
|
100
|
-
this.setDirectFields(fields);
|
|
101
|
-
this._lc_stream_delay = fields?._lc_stream_delay ?? 25;
|
|
102
|
-
this.outputConfig = fields?.outputConfig;
|
|
103
|
-
this.inferenceGeo = fields?.inferenceGeo;
|
|
104
|
-
this.contextManagement = fields?.contextManagement;
|
|
105
|
-
}
|
|
106
|
-
static lc_name() {
|
|
107
|
-
return 'IllumaAnthropic';
|
|
108
|
-
}
|
|
109
|
-
/**
|
|
110
|
-
* Get the parameters used to invoke the model
|
|
111
|
-
*/
|
|
112
|
-
invocationParams(options) {
|
|
113
|
-
const tool_choice = handleToolChoice(options?.tool_choice);
|
|
114
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
115
|
-
const callOptions = options;
|
|
116
|
-
const mergedOutputConfig = (() => {
|
|
117
|
-
const base = {
|
|
118
|
-
...this.outputConfig,
|
|
119
|
-
...callOptions?.outputConfig,
|
|
120
|
-
};
|
|
121
|
-
if (callOptions?.outputFormat && !base.format) {
|
|
122
|
-
base.format = callOptions.outputFormat;
|
|
123
|
-
}
|
|
124
|
-
return Object.keys(base).length > 0 ? base : undefined;
|
|
125
|
-
})();
|
|
126
|
-
const inferenceGeo = callOptions?.inferenceGeo ?? this.inferenceGeo;
|
|
127
|
-
const contextManagement = this.contextManagement;
|
|
128
|
-
const sharedParams = {
|
|
129
|
-
tools: this.formatStructuredToolToAnthropic(options?.tools),
|
|
130
|
-
tool_choice,
|
|
131
|
-
thinking: this.thinking,
|
|
132
|
-
...(mergedOutputConfig ? { output_config: mergedOutputConfig } : {}),
|
|
133
|
-
...(inferenceGeo ? { inference_geo: inferenceGeo } : {}),
|
|
134
|
-
...(contextManagement ? { context_management: contextManagement } : {}),
|
|
135
|
-
...this.invocationKwargs,
|
|
136
|
-
};
|
|
137
|
-
if (this.thinking.type === 'enabled' || this.thinking.type === 'adaptive') {
|
|
138
|
-
if (this.top_k !== -1 && this.top_k != null) {
|
|
139
|
-
throw new Error('topK is not supported when thinking is enabled');
|
|
140
|
-
}
|
|
141
|
-
if (this.topP !== -1 && this.topP != null) {
|
|
142
|
-
throw new Error('topP is not supported when thinking is enabled');
|
|
143
|
-
}
|
|
144
|
-
if (this.temperature !== 1 &&
|
|
145
|
-
this.temperature != null) {
|
|
146
|
-
throw new Error('temperature is not supported when thinking is enabled');
|
|
147
|
-
}
|
|
148
|
-
return {
|
|
149
|
-
model: this.model,
|
|
150
|
-
stop_sequences: options?.stop ?? this.stopSequences,
|
|
151
|
-
stream: this.streaming,
|
|
152
|
-
max_tokens: this.maxTokens,
|
|
153
|
-
...sharedParams,
|
|
154
|
-
};
|
|
155
|
-
}
|
|
156
|
-
return {
|
|
157
|
-
model: this.model,
|
|
158
|
-
temperature: this.temperature,
|
|
159
|
-
top_k: this.top_k,
|
|
160
|
-
top_p: this.topP,
|
|
161
|
-
stop_sequences: options?.stop ?? this.stopSequences,
|
|
162
|
-
stream: this.streaming,
|
|
163
|
-
max_tokens: this.maxTokens,
|
|
164
|
-
...sharedParams,
|
|
165
|
-
};
|
|
166
|
-
}
|
|
167
|
-
/**
|
|
168
|
-
* Get stream usage as returned by this client's API response.
|
|
169
|
-
* @returns The stream usage object.
|
|
170
|
-
*/
|
|
171
|
-
getStreamUsage() {
|
|
172
|
-
if (this.emitted_usage === true) {
|
|
173
|
-
return;
|
|
174
|
-
}
|
|
175
|
-
const inputUsage = this.message_start?.message.usage;
|
|
176
|
-
const outputUsage = this.message_delta?.usage;
|
|
177
|
-
if (!outputUsage) {
|
|
178
|
-
return;
|
|
179
|
-
}
|
|
180
|
-
const totalUsage = {
|
|
181
|
-
input_tokens: inputUsage?.input_tokens ?? 0,
|
|
182
|
-
output_tokens: outputUsage.output_tokens ?? 0,
|
|
183
|
-
total_tokens: (inputUsage?.input_tokens ?? 0) + (outputUsage.output_tokens ?? 0),
|
|
184
|
-
};
|
|
185
|
-
if (inputUsage?.cache_creation_input_tokens != null ||
|
|
186
|
-
inputUsage?.cache_read_input_tokens != null) {
|
|
187
|
-
totalUsage.input_token_details = {
|
|
188
|
-
cache_creation: inputUsage.cache_creation_input_tokens ?? 0,
|
|
189
|
-
cache_read: inputUsage.cache_read_input_tokens ?? 0,
|
|
190
|
-
};
|
|
191
|
-
}
|
|
192
|
-
this.emitted_usage = true;
|
|
193
|
-
return totalUsage;
|
|
194
|
-
}
|
|
195
|
-
resetTokenEvents() {
|
|
196
|
-
this.message_start = undefined;
|
|
197
|
-
this.message_delta = undefined;
|
|
198
|
-
this.emitted_usage = undefined;
|
|
199
|
-
this.tools_in_params = undefined;
|
|
200
|
-
}
|
|
201
|
-
setDirectFields(fields) {
|
|
202
|
-
this.temperature = fields?.temperature ?? undefined;
|
|
203
|
-
this.topP = fields?.topP ?? undefined;
|
|
204
|
-
this.top_k = fields?.topK;
|
|
205
|
-
if (this.temperature === -1 || this.temperature === 1) {
|
|
206
|
-
this.temperature = undefined;
|
|
207
|
-
}
|
|
208
|
-
if (this.topP === -1) {
|
|
209
|
-
this.topP = undefined;
|
|
210
|
-
}
|
|
211
|
-
if (this.top_k === -1) {
|
|
212
|
-
this.top_k = undefined;
|
|
213
|
-
}
|
|
214
|
-
}
|
|
215
|
-
createGenerationChunk({ token, chunk, usageMetadata, shouldStreamUsage, }) {
|
|
216
|
-
const usage_metadata = shouldStreamUsage
|
|
217
|
-
? (usageMetadata ?? chunk.usage_metadata)
|
|
218
|
-
: undefined;
|
|
219
|
-
return new ChatGenerationChunk({
|
|
220
|
-
message: new AIMessageChunk({
|
|
221
|
-
// Just yield chunk as it is and tool_use will be concat by BaseChatModel._generateUncached().
|
|
222
|
-
content: chunk.content,
|
|
223
|
-
additional_kwargs: chunk.additional_kwargs,
|
|
224
|
-
tool_call_chunks: chunk.tool_call_chunks,
|
|
225
|
-
response_metadata: chunk.response_metadata,
|
|
226
|
-
usage_metadata,
|
|
227
|
-
id: chunk.id,
|
|
228
|
-
}),
|
|
229
|
-
text: token ?? '',
|
|
230
|
-
});
|
|
231
|
-
}
|
|
232
|
-
async *_streamResponseChunks(messages, options, runManager) {
|
|
233
|
-
this.resetTokenEvents();
|
|
234
|
-
const params = this.invocationParams(options);
|
|
235
|
-
const formattedMessages = _convertMessagesToAnthropicPayload(messages);
|
|
236
|
-
const payload = {
|
|
237
|
-
...params,
|
|
238
|
-
...formattedMessages,
|
|
239
|
-
stream: true,
|
|
240
|
-
};
|
|
241
|
-
const coerceContentToString = !_toolsInParams(payload) &&
|
|
242
|
-
!_documentsInParams(payload) &&
|
|
243
|
-
!_thinkingInParams(payload) &&
|
|
244
|
-
!_compactionInParams(payload);
|
|
245
|
-
const stream = await this.createStreamWithRetry(payload, {
|
|
246
|
-
headers: options.headers,
|
|
247
|
-
});
|
|
248
|
-
const shouldStreamUsage = this.streamUsage ?? options.streamUsage;
|
|
249
|
-
for await (const data of stream) {
|
|
250
|
-
if (options.signal?.aborted === true) {
|
|
251
|
-
stream.controller.abort();
|
|
252
|
-
throw new Error('AbortError: User aborted the request.');
|
|
253
|
-
}
|
|
254
|
-
if (data.type === 'message_start') {
|
|
255
|
-
this.message_start = data;
|
|
256
|
-
}
|
|
257
|
-
else if (data.type === 'message_delta') {
|
|
258
|
-
this.message_delta = data;
|
|
259
|
-
}
|
|
260
|
-
let usageMetadata;
|
|
261
|
-
if (this.tools_in_params !== true && this.emitted_usage !== true) {
|
|
262
|
-
usageMetadata = this.getStreamUsage();
|
|
263
|
-
}
|
|
264
|
-
const result = _makeMessageChunkFromAnthropicEvent(data, {
|
|
265
|
-
streamUsage: shouldStreamUsage,
|
|
266
|
-
coerceContentToString,
|
|
267
|
-
});
|
|
268
|
-
if (!result)
|
|
269
|
-
continue;
|
|
270
|
-
const { chunk } = result;
|
|
271
|
-
const [token = '', tokenType] = extractToken(chunk);
|
|
272
|
-
if (!tokenType ||
|
|
273
|
-
tokenType === 'input' ||
|
|
274
|
-
(token === '' && (usageMetadata != null || chunk.id != null))) {
|
|
275
|
-
const generationChunk = this.createGenerationChunk({
|
|
276
|
-
token,
|
|
277
|
-
chunk,
|
|
278
|
-
usageMetadata,
|
|
279
|
-
shouldStreamUsage,
|
|
280
|
-
});
|
|
281
|
-
yield generationChunk;
|
|
282
|
-
await runManager?.handleLLMNewToken(token, undefined, undefined, undefined, undefined, { chunk: generationChunk });
|
|
283
|
-
continue;
|
|
284
|
-
}
|
|
285
|
-
const textStream = new TextStream(token, {
|
|
286
|
-
delay: this._lc_stream_delay,
|
|
287
|
-
firstWordChunk: true,
|
|
288
|
-
minChunkSize: 4,
|
|
289
|
-
maxChunkSize: 8,
|
|
290
|
-
});
|
|
291
|
-
const generator = textStream.generateText(options.signal);
|
|
292
|
-
try {
|
|
293
|
-
let emittedUsage = false;
|
|
294
|
-
for await (const currentToken of generator) {
|
|
295
|
-
if (options.signal?.aborted === true) {
|
|
296
|
-
break;
|
|
297
|
-
}
|
|
298
|
-
const newChunk = cloneChunk(currentToken, tokenType, chunk);
|
|
299
|
-
const generationChunk = this.createGenerationChunk({
|
|
300
|
-
token: currentToken,
|
|
301
|
-
chunk: newChunk,
|
|
302
|
-
usageMetadata: emittedUsage ? undefined : usageMetadata,
|
|
303
|
-
shouldStreamUsage,
|
|
304
|
-
});
|
|
305
|
-
if (usageMetadata && !emittedUsage) {
|
|
306
|
-
emittedUsage = true;
|
|
307
|
-
}
|
|
308
|
-
yield generationChunk;
|
|
309
|
-
await runManager?.handleLLMNewToken(currentToken, undefined, undefined, undefined, undefined, { chunk: generationChunk });
|
|
310
|
-
}
|
|
311
|
-
}
|
|
312
|
-
finally {
|
|
313
|
-
await generator.return();
|
|
314
|
-
}
|
|
315
|
-
}
|
|
316
|
-
this.resetTokenEvents();
|
|
317
|
-
}
|
|
318
|
-
}
|
|
319
|
-
//# sourceMappingURL=index.js.map
|