@illuma-ai/agents 1.1.21 → 1.1.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/graphs/Graph.cjs +12 -1
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs +105 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
- package/dist/cjs/run.cjs +20 -9
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/utils/llm.cjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +12 -1
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs +105 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
- package/dist/esm/run.mjs +20 -9
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/utils/llm.mjs.map +1 -1
- package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
- package/package.json +1 -1
- package/src/graphs/Graph.ts +13 -1
- package/src/graphs/MultiAgentGraph.ts +128 -1
- package/src/graphs/__tests__/multi-agent-delegate.test.ts +205 -0
- package/src/run.ts +20 -11
- package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
- package/src/utils/llm.ts +1 -0
- package/src/agents/AgentContext.js +0 -782
- package/src/agents/AgentContext.test.js +0 -421
- package/src/agents/__tests__/AgentContext.test.js +0 -678
- package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
- package/src/common/enum.js +0 -192
- package/src/common/index.js +0 -3
- package/src/events.js +0 -166
- package/src/graphs/Graph.js +0 -1857
- package/src/graphs/MultiAgentGraph.js +0 -1092
- package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
- package/src/graphs/__tests__/structured-output.test.js +0 -144
- package/src/graphs/contextManagement.e2e.test.js +0 -718
- package/src/graphs/contextManagement.test.js +0 -485
- package/src/graphs/handoffValidation.test.js +0 -276
- package/src/graphs/index.js +0 -3
- package/src/index.js +0 -28
- package/src/instrumentation.js +0 -21
- package/src/llm/anthropic/index.js +0 -319
- package/src/llm/anthropic/types.js +0 -46
- package/src/llm/anthropic/utils/message_inputs.js +0 -627
- package/src/llm/anthropic/utils/message_outputs.js +0 -290
- package/src/llm/anthropic/utils/output_parsers.js +0 -89
- package/src/llm/anthropic/utils/tools.js +0 -25
- package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
- package/src/llm/bedrock/index.js +0 -303
- package/src/llm/bedrock/types.js +0 -2
- package/src/llm/bedrock/utils/index.js +0 -6
- package/src/llm/bedrock/utils/message_inputs.js +0 -463
- package/src/llm/bedrock/utils/message_outputs.js +0 -269
- package/src/llm/fake.js +0 -92
- package/src/llm/google/index.js +0 -215
- package/src/llm/google/types.js +0 -12
- package/src/llm/google/utils/common.js +0 -670
- package/src/llm/google/utils/tools.js +0 -111
- package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
- package/src/llm/openai/index.js +0 -1033
- package/src/llm/openai/types.js +0 -2
- package/src/llm/openai/utils/index.js +0 -756
- package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
- package/src/llm/openrouter/index.js +0 -261
- package/src/llm/openrouter/reasoning.test.js +0 -181
- package/src/llm/providers.js +0 -36
- package/src/llm/text.js +0 -65
- package/src/llm/vertexai/index.js +0 -402
- package/src/messages/__tests__/tools.test.js +0 -392
- package/src/messages/cache.js +0 -404
- package/src/messages/cache.test.js +0 -1167
- package/src/messages/content.js +0 -48
- package/src/messages/content.test.js +0 -314
- package/src/messages/core.js +0 -359
- package/src/messages/ensureThinkingBlock.test.js +0 -997
- package/src/messages/format.js +0 -973
- package/src/messages/formatAgentMessages.test.js +0 -2278
- package/src/messages/formatAgentMessages.tools.test.js +0 -362
- package/src/messages/formatMessage.test.js +0 -608
- package/src/messages/ids.js +0 -18
- package/src/messages/index.js +0 -9
- package/src/messages/labelContentByAgent.test.js +0 -725
- package/src/messages/prune.js +0 -438
- package/src/messages/reducer.js +0 -60
- package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
- package/src/messages/summarize.js +0 -146
- package/src/messages/summarize.test.js +0 -332
- package/src/messages/tools.js +0 -90
- package/src/mockStream.js +0 -81
- package/src/prompts/collab.js +0 -7
- package/src/prompts/index.js +0 -3
- package/src/prompts/taskmanager.js +0 -58
- package/src/run.js +0 -427
- package/src/schemas/index.js +0 -3
- package/src/schemas/schema-preparation.test.js +0 -370
- package/src/schemas/validate.js +0 -314
- package/src/schemas/validate.test.js +0 -264
- package/src/scripts/abort.js +0 -127
- package/src/scripts/ant_web_search.js +0 -130
- package/src/scripts/ant_web_search_edge_case.js +0 -133
- package/src/scripts/ant_web_search_error_edge_case.js +0 -119
- package/src/scripts/args.js +0 -41
- package/src/scripts/bedrock-cache-debug.js +0 -186
- package/src/scripts/bedrock-content-aggregation-test.js +0 -195
- package/src/scripts/bedrock-merge-test.js +0 -80
- package/src/scripts/bedrock-parallel-tools-test.js +0 -150
- package/src/scripts/caching.js +0 -106
- package/src/scripts/cli.js +0 -152
- package/src/scripts/cli2.js +0 -119
- package/src/scripts/cli3.js +0 -163
- package/src/scripts/cli4.js +0 -165
- package/src/scripts/cli5.js +0 -165
- package/src/scripts/code_exec.js +0 -171
- package/src/scripts/code_exec_files.js +0 -180
- package/src/scripts/code_exec_multi_session.js +0 -185
- package/src/scripts/code_exec_ptc.js +0 -265
- package/src/scripts/code_exec_session.js +0 -217
- package/src/scripts/code_exec_simple.js +0 -120
- package/src/scripts/content.js +0 -111
- package/src/scripts/empty_input.js +0 -125
- package/src/scripts/handoff-test.js +0 -96
- package/src/scripts/image.js +0 -138
- package/src/scripts/memory.js +0 -83
- package/src/scripts/multi-agent-chain.js +0 -271
- package/src/scripts/multi-agent-conditional.js +0 -185
- package/src/scripts/multi-agent-document-review-chain.js +0 -171
- package/src/scripts/multi-agent-hybrid-flow.js +0 -264
- package/src/scripts/multi-agent-parallel-start.js +0 -214
- package/src/scripts/multi-agent-parallel.js +0 -346
- package/src/scripts/multi-agent-sequence.js +0 -184
- package/src/scripts/multi-agent-supervisor.js +0 -324
- package/src/scripts/multi-agent-test.js +0 -147
- package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
- package/src/scripts/parallel-full-metadata-test.js +0 -176
- package/src/scripts/parallel-tools-test.js +0 -256
- package/src/scripts/programmatic_exec.js +0 -277
- package/src/scripts/programmatic_exec_agent.js +0 -168
- package/src/scripts/search.js +0 -118
- package/src/scripts/sequential-full-metadata-test.js +0 -143
- package/src/scripts/simple.js +0 -174
- package/src/scripts/single-agent-metadata-test.js +0 -152
- package/src/scripts/stream.js +0 -113
- package/src/scripts/test-custom-prompt-key.js +0 -132
- package/src/scripts/test-handoff-input.js +0 -143
- package/src/scripts/test-handoff-preamble.js +0 -227
- package/src/scripts/test-handoff-steering.js +0 -353
- package/src/scripts/test-multi-agent-list-handoff.js +0 -318
- package/src/scripts/test-parallel-agent-labeling.js +0 -253
- package/src/scripts/test-parallel-handoffs.js +0 -229
- package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
- package/src/scripts/test-thinking-handoff.js +0 -132
- package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
- package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
- package/src/scripts/test-tools-before-handoff.js +0 -187
- package/src/scripts/test_code_api.js +0 -263
- package/src/scripts/thinking-bedrock.js +0 -128
- package/src/scripts/thinking-vertexai.js +0 -130
- package/src/scripts/thinking.js +0 -134
- package/src/scripts/tool_search.js +0 -114
- package/src/scripts/tools.js +0 -125
- package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
- package/src/specs/agent-handoffs.test.js +0 -924
- package/src/specs/anthropic.simple.test.js +0 -287
- package/src/specs/azure.simple.test.js +0 -381
- package/src/specs/cache.simple.test.js +0 -282
- package/src/specs/custom-event-await.test.js +0 -148
- package/src/specs/deepseek.simple.test.js +0 -189
- package/src/specs/emergency-prune.test.js +0 -308
- package/src/specs/moonshot.simple.test.js +0 -237
- package/src/specs/observability.integration.test.js +0 -1337
- package/src/specs/openai.simple.test.js +0 -233
- package/src/specs/openrouter.simple.test.js +0 -202
- package/src/specs/prune.test.js +0 -733
- package/src/specs/reasoning.test.js +0 -144
- package/src/specs/spec.utils.js +0 -4
- package/src/specs/thinking-handoff.test.js +0 -486
- package/src/specs/thinking-prune.test.js +0 -600
- package/src/specs/token-distribution-edge-case.test.js +0 -246
- package/src/specs/token-memoization.test.js +0 -32
- package/src/specs/tokens.test.js +0 -49
- package/src/specs/tool-error.test.js +0 -139
- package/src/splitStream.js +0 -204
- package/src/splitStream.test.js +0 -504
- package/src/stream.js +0 -650
- package/src/stream.test.js +0 -225
- package/src/test/mockTools.js +0 -340
- package/src/tools/BrowserTools.js +0 -245
- package/src/tools/Calculator.js +0 -38
- package/src/tools/Calculator.test.js +0 -225
- package/src/tools/CodeExecutor.js +0 -233
- package/src/tools/ProgrammaticToolCalling.js +0 -602
- package/src/tools/StreamingToolCallBuffer.js +0 -179
- package/src/tools/ToolNode.js +0 -930
- package/src/tools/ToolSearch.js +0 -904
- package/src/tools/__tests__/BrowserTools.test.js +0 -306
- package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
- package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
- package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
- package/src/tools/__tests__/ToolApproval.test.js +0 -675
- package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
- package/src/tools/__tests__/ToolNode.session.test.js +0 -319
- package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
- package/src/tools/__tests__/ToolSearch.test.js +0 -812
- package/src/tools/__tests__/handlers.test.js +0 -799
- package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
- package/src/tools/handlers.js +0 -306
- package/src/tools/schema.js +0 -25
- package/src/tools/search/anthropic.js +0 -34
- package/src/tools/search/content.js +0 -116
- package/src/tools/search/content.test.js +0 -133
- package/src/tools/search/firecrawl.js +0 -173
- package/src/tools/search/format.js +0 -198
- package/src/tools/search/highlights.js +0 -241
- package/src/tools/search/index.js +0 -3
- package/src/tools/search/jina-reranker.test.js +0 -106
- package/src/tools/search/rerankers.js +0 -165
- package/src/tools/search/schema.js +0 -102
- package/src/tools/search/search.js +0 -561
- package/src/tools/search/serper-scraper.js +0 -126
- package/src/tools/search/test.js +0 -129
- package/src/tools/search/tool.js +0 -453
- package/src/tools/search/types.js +0 -2
- package/src/tools/search/utils.js +0 -59
- package/src/types/graph.js +0 -24
- package/src/types/graph.test.js +0 -192
- package/src/types/index.js +0 -7
- package/src/types/llm.js +0 -2
- package/src/types/messages.js +0 -2
- package/src/types/run.js +0 -2
- package/src/types/stream.js +0 -2
- package/src/types/tools.js +0 -2
- package/src/utils/contextAnalytics.js +0 -79
- package/src/utils/contextAnalytics.test.js +0 -166
- package/src/utils/events.js +0 -26
- package/src/utils/graph.js +0 -11
- package/src/utils/handlers.js +0 -65
- package/src/utils/index.js +0 -10
- package/src/utils/llm.js +0 -21
- package/src/utils/llmConfig.js +0 -205
- package/src/utils/logging.js +0 -37
- package/src/utils/misc.js +0 -51
- package/src/utils/run.js +0 -69
- package/src/utils/schema.js +0 -21
- package/src/utils/title.js +0 -119
- package/src/utils/tokens.js +0 -92
- package/src/utils/toonFormat.js +0 -379
|
@@ -1,287 +0,0 @@
|
|
|
1
|
-
/* eslint-disable no-console */
|
|
2
|
-
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
3
|
-
// src/scripts/cli.test.ts
|
|
4
|
-
import { config } from 'dotenv';
|
|
5
|
-
config();
|
|
6
|
-
import { Calculator } from '@/tools/Calculator';
|
|
7
|
-
import { HumanMessage, } from '@langchain/core/messages';
|
|
8
|
-
import { ToolEndHandler, ModelEndHandler, createMetadataAggregator, } from '@/events';
|
|
9
|
-
import { ContentTypes, GraphEvents, Providers, TitleMethod } from '@/common';
|
|
10
|
-
import { capitalizeFirstLetter } from './spec.utils';
|
|
11
|
-
import { createContentAggregator } from '@/stream';
|
|
12
|
-
import { getLLMConfig } from '@/utils/llmConfig';
|
|
13
|
-
import { getArgs } from '@/scripts/args';
|
|
14
|
-
import { Run } from '@/run';
|
|
15
|
-
const provider = Providers.ANTHROPIC;
|
|
16
|
-
describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
|
|
17
|
-
jest.setTimeout(60000);
|
|
18
|
-
let run;
|
|
19
|
-
let runningHistory;
|
|
20
|
-
let collectedUsage;
|
|
21
|
-
let conversationHistory;
|
|
22
|
-
let aggregateContent;
|
|
23
|
-
let contentParts;
|
|
24
|
-
const config = {
|
|
25
|
-
configurable: {
|
|
26
|
-
thread_id: 'conversation-num-1',
|
|
27
|
-
},
|
|
28
|
-
streamMode: 'values',
|
|
29
|
-
version: 'v2',
|
|
30
|
-
};
|
|
31
|
-
beforeEach(async () => {
|
|
32
|
-
conversationHistory = [];
|
|
33
|
-
collectedUsage = [];
|
|
34
|
-
const { contentParts: cp, aggregateContent: ac } = createContentAggregator();
|
|
35
|
-
contentParts = cp;
|
|
36
|
-
aggregateContent = ac;
|
|
37
|
-
});
|
|
38
|
-
const onMessageDeltaSpy = jest.fn();
|
|
39
|
-
const onRunStepSpy = jest.fn();
|
|
40
|
-
afterAll(() => {
|
|
41
|
-
onMessageDeltaSpy.mockReset();
|
|
42
|
-
onRunStepSpy.mockReset();
|
|
43
|
-
});
|
|
44
|
-
const setupCustomHandlers = () => ({
|
|
45
|
-
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
46
|
-
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
|
|
47
|
-
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
48
|
-
handle: (event, data) => {
|
|
49
|
-
aggregateContent({
|
|
50
|
-
event,
|
|
51
|
-
data: data,
|
|
52
|
-
});
|
|
53
|
-
},
|
|
54
|
-
},
|
|
55
|
-
[GraphEvents.ON_RUN_STEP]: {
|
|
56
|
-
handle: (event, data, metadata, graph) => {
|
|
57
|
-
onRunStepSpy(event, data, metadata, graph);
|
|
58
|
-
aggregateContent({ event, data: data });
|
|
59
|
-
},
|
|
60
|
-
},
|
|
61
|
-
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
62
|
-
handle: (event, data) => {
|
|
63
|
-
aggregateContent({ event, data: data });
|
|
64
|
-
},
|
|
65
|
-
},
|
|
66
|
-
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
67
|
-
handle: (event, data, metadata, graph) => {
|
|
68
|
-
onMessageDeltaSpy(event, data, metadata, graph);
|
|
69
|
-
aggregateContent({ event, data: data });
|
|
70
|
-
},
|
|
71
|
-
},
|
|
72
|
-
[GraphEvents.TOOL_START]: {
|
|
73
|
-
handle: (_event, _data, _metadata) => {
|
|
74
|
-
// Handle tool start
|
|
75
|
-
},
|
|
76
|
-
},
|
|
77
|
-
});
|
|
78
|
-
test(`${capitalizeFirstLetter(provider)}: should process a simple message, generate title`, async () => {
|
|
79
|
-
const { userName, location } = await getArgs();
|
|
80
|
-
const llmConfig = getLLMConfig(provider);
|
|
81
|
-
const customHandlers = setupCustomHandlers();
|
|
82
|
-
run = await Run.create({
|
|
83
|
-
runId: 'test-run-id',
|
|
84
|
-
graphConfig: {
|
|
85
|
-
type: 'standard',
|
|
86
|
-
llmConfig,
|
|
87
|
-
tools: [new Calculator()],
|
|
88
|
-
instructions: 'You are a friendly AI assistant. Always address the user by their name.',
|
|
89
|
-
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
90
|
-
},
|
|
91
|
-
returnContent: true,
|
|
92
|
-
skipCleanup: true,
|
|
93
|
-
customHandlers,
|
|
94
|
-
});
|
|
95
|
-
const userMessage = 'hi';
|
|
96
|
-
conversationHistory.push(new HumanMessage(userMessage));
|
|
97
|
-
const inputs = {
|
|
98
|
-
messages: conversationHistory,
|
|
99
|
-
};
|
|
100
|
-
const finalContentParts = await run.processStream(inputs, config);
|
|
101
|
-
expect(finalContentParts).toBeDefined();
|
|
102
|
-
const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
|
|
103
|
-
expect(allTextParts).toBe(true);
|
|
104
|
-
expect(collectedUsage.length).toBeGreaterThan(0);
|
|
105
|
-
expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
|
|
106
|
-
expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
|
|
107
|
-
const finalMessages = run.getRunMessages();
|
|
108
|
-
expect(finalMessages).toBeDefined();
|
|
109
|
-
conversationHistory.push(...(finalMessages ?? []));
|
|
110
|
-
expect(conversationHistory.length).toBeGreaterThan(1);
|
|
111
|
-
runningHistory = conversationHistory.slice();
|
|
112
|
-
expect(onMessageDeltaSpy).toHaveBeenCalled();
|
|
113
|
-
expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
|
|
114
|
-
expect(onMessageDeltaSpy.mock.calls[0][3]).toBeDefined(); // Graph exists
|
|
115
|
-
expect(onRunStepSpy).toHaveBeenCalled();
|
|
116
|
-
expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
|
|
117
|
-
expect(onRunStepSpy.mock.calls[0][3]).toBeDefined(); // Graph exists
|
|
118
|
-
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
119
|
-
const titleResult = await run.generateTitle({
|
|
120
|
-
provider,
|
|
121
|
-
inputText: userMessage,
|
|
122
|
-
titleMethod: TitleMethod.STRUCTURED,
|
|
123
|
-
contentParts,
|
|
124
|
-
clientOptions: {
|
|
125
|
-
...llmConfig,
|
|
126
|
-
model: 'claude-haiku-4-5',
|
|
127
|
-
},
|
|
128
|
-
chainOptions: {
|
|
129
|
-
callbacks: [
|
|
130
|
-
{
|
|
131
|
-
handleLLMEnd,
|
|
132
|
-
},
|
|
133
|
-
],
|
|
134
|
-
},
|
|
135
|
-
});
|
|
136
|
-
expect(titleResult).toBeDefined();
|
|
137
|
-
expect(titleResult.title).toBeDefined();
|
|
138
|
-
expect(titleResult.language).toBeDefined();
|
|
139
|
-
expect(collected).toBeDefined();
|
|
140
|
-
});
|
|
141
|
-
test(`${capitalizeFirstLetter(provider)}: should generate title using completion method`, async () => {
|
|
142
|
-
const { userName, location } = await getArgs();
|
|
143
|
-
const llmConfig = getLLMConfig(provider);
|
|
144
|
-
const customHandlers = setupCustomHandlers();
|
|
145
|
-
run = await Run.create({
|
|
146
|
-
runId: 'test-run-id-completion',
|
|
147
|
-
graphConfig: {
|
|
148
|
-
type: 'standard',
|
|
149
|
-
llmConfig,
|
|
150
|
-
tools: [new Calculator()],
|
|
151
|
-
instructions: 'You are a friendly AI assistant. Always address the user by their name.',
|
|
152
|
-
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
153
|
-
},
|
|
154
|
-
returnContent: true,
|
|
155
|
-
skipCleanup: true,
|
|
156
|
-
customHandlers,
|
|
157
|
-
});
|
|
158
|
-
const userMessage = 'Can you help me calculate the area of a circle with radius 5?';
|
|
159
|
-
conversationHistory = [];
|
|
160
|
-
conversationHistory.push(new HumanMessage(userMessage));
|
|
161
|
-
const inputs = {
|
|
162
|
-
messages: conversationHistory,
|
|
163
|
-
};
|
|
164
|
-
const finalContentParts = await run.processStream(inputs, config);
|
|
165
|
-
expect(finalContentParts).toBeDefined();
|
|
166
|
-
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
167
|
-
const titleResult = await run.generateTitle({
|
|
168
|
-
provider,
|
|
169
|
-
inputText: userMessage,
|
|
170
|
-
titleMethod: TitleMethod.COMPLETION, // Using completion method
|
|
171
|
-
contentParts,
|
|
172
|
-
clientOptions: {
|
|
173
|
-
...llmConfig,
|
|
174
|
-
model: 'claude-haiku-4-5',
|
|
175
|
-
},
|
|
176
|
-
chainOptions: {
|
|
177
|
-
callbacks: [
|
|
178
|
-
{
|
|
179
|
-
handleLLMEnd,
|
|
180
|
-
},
|
|
181
|
-
],
|
|
182
|
-
},
|
|
183
|
-
});
|
|
184
|
-
expect(titleResult).toBeDefined();
|
|
185
|
-
expect(titleResult.title).toBeDefined();
|
|
186
|
-
expect(titleResult.title).not.toBe('');
|
|
187
|
-
// Completion method doesn't return language
|
|
188
|
-
expect(titleResult.language).toBeUndefined();
|
|
189
|
-
expect(collected).toBeDefined();
|
|
190
|
-
console.log(`Completion method generated title: "${titleResult.title}"`);
|
|
191
|
-
});
|
|
192
|
-
test(`${capitalizeFirstLetter(provider)}: should follow-up`, async () => {
|
|
193
|
-
console.log('Previous conversation length:', runningHistory.length);
|
|
194
|
-
console.log('Last message:', runningHistory[runningHistory.length - 1].content);
|
|
195
|
-
const { userName, location } = await getArgs();
|
|
196
|
-
const llmConfig = getLLMConfig(provider);
|
|
197
|
-
const customHandlers = setupCustomHandlers();
|
|
198
|
-
run = await Run.create({
|
|
199
|
-
runId: 'test-run-id',
|
|
200
|
-
graphConfig: {
|
|
201
|
-
type: 'standard',
|
|
202
|
-
llmConfig,
|
|
203
|
-
tools: [new Calculator()],
|
|
204
|
-
instructions: 'You are a friendly AI assistant. Always address the user by their name.',
|
|
205
|
-
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
206
|
-
},
|
|
207
|
-
returnContent: true,
|
|
208
|
-
skipCleanup: true,
|
|
209
|
-
customHandlers,
|
|
210
|
-
});
|
|
211
|
-
conversationHistory = runningHistory.slice();
|
|
212
|
-
conversationHistory.push(new HumanMessage('how are you?'));
|
|
213
|
-
const inputs = {
|
|
214
|
-
messages: conversationHistory,
|
|
215
|
-
};
|
|
216
|
-
const finalContentParts = await run.processStream(inputs, config);
|
|
217
|
-
expect(finalContentParts).toBeDefined();
|
|
218
|
-
const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
|
|
219
|
-
expect(allTextParts).toBe(true);
|
|
220
|
-
expect(collectedUsage.length).toBeGreaterThan(0);
|
|
221
|
-
expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
|
|
222
|
-
expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
|
|
223
|
-
const finalMessages = run.getRunMessages();
|
|
224
|
-
expect(finalMessages).toBeDefined();
|
|
225
|
-
expect(finalMessages?.length).toBeGreaterThan(0);
|
|
226
|
-
console.log(`${capitalizeFirstLetter(provider)} follow-up message:`, finalMessages?.[finalMessages.length - 1]?.content);
|
|
227
|
-
expect(onMessageDeltaSpy).toHaveBeenCalled();
|
|
228
|
-
expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
|
|
229
|
-
expect(onRunStepSpy).toHaveBeenCalled();
|
|
230
|
-
expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
|
|
231
|
-
});
|
|
232
|
-
test(`${capitalizeFirstLetter(provider)}: should handle parallel tool usage (web search + calculator)`, async () => {
|
|
233
|
-
const llmConfig = getLLMConfig(provider);
|
|
234
|
-
const customHandlers = setupCustomHandlers();
|
|
235
|
-
run = await Run.create({
|
|
236
|
-
runId: 'test-parallel-tools',
|
|
237
|
-
graphConfig: {
|
|
238
|
-
type: 'standard',
|
|
239
|
-
llmConfig,
|
|
240
|
-
tools: [
|
|
241
|
-
{
|
|
242
|
-
type: 'web_search_20250305',
|
|
243
|
-
name: 'web_search',
|
|
244
|
-
max_uses: 5,
|
|
245
|
-
},
|
|
246
|
-
new Calculator(),
|
|
247
|
-
],
|
|
248
|
-
instructions: 'You are a helpful AI assistant.',
|
|
249
|
-
},
|
|
250
|
-
returnContent: true,
|
|
251
|
-
skipCleanup: true,
|
|
252
|
-
customHandlers,
|
|
253
|
-
});
|
|
254
|
-
// Use the same query as the edge case script to test actual parallel tool usage
|
|
255
|
-
const userMessage = 'Can you search the web for the current population of Tokyo, and also calculate what 15% of that population would be? Do both at the same time.';
|
|
256
|
-
conversationHistory = [];
|
|
257
|
-
conversationHistory.push(new HumanMessage(userMessage));
|
|
258
|
-
const inputs = {
|
|
259
|
-
messages: conversationHistory,
|
|
260
|
-
};
|
|
261
|
-
// This should complete without errors despite using both server tools and regular tools in parallel
|
|
262
|
-
const finalContentParts = await run.processStream(inputs, config);
|
|
263
|
-
expect(finalContentParts).toBeDefined();
|
|
264
|
-
const finalMessages = run.getRunMessages();
|
|
265
|
-
expect(finalMessages).toBeDefined();
|
|
266
|
-
expect(finalMessages?.length).toBeGreaterThan(0);
|
|
267
|
-
const hasWebSearch = contentParts.some((part) => !!(part.type === 'tool_call' &&
|
|
268
|
-
part.tool_call?.name === 'web_search' &&
|
|
269
|
-
part.tool_call?.id?.startsWith('srvtoolu_') === true));
|
|
270
|
-
const hasCalculator = contentParts.some((part) => !!(part.type === 'tool_call' &&
|
|
271
|
-
part.tool_call?.name === 'calculator' &&
|
|
272
|
-
part.tool_call?.id?.startsWith('toolu_') === true));
|
|
273
|
-
// Both tools should have been used for this query
|
|
274
|
-
expect(hasWebSearch).toBe(true);
|
|
275
|
-
expect(hasCalculator).toBe(true);
|
|
276
|
-
console.log(`${capitalizeFirstLetter(provider)} parallel tools test: web_search (server tool) + calculator (regular tool) both used successfully`);
|
|
277
|
-
});
|
|
278
|
-
test('should handle errors appropriately', async () => {
|
|
279
|
-
// Test error scenarios
|
|
280
|
-
await expect(async () => {
|
|
281
|
-
await run.processStream({
|
|
282
|
-
messages: [],
|
|
283
|
-
}, {});
|
|
284
|
-
}).rejects.toThrow();
|
|
285
|
-
});
|
|
286
|
-
});
|
|
287
|
-
//# sourceMappingURL=anthropic.simple.test.js.map
|
|
@@ -1,381 +0,0 @@
|
|
|
1
|
-
/* eslint-disable no-console */
|
|
2
|
-
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
3
|
-
// src/specs/azure.simple.test.ts
|
|
4
|
-
import { config } from 'dotenv';
|
|
5
|
-
config();
|
|
6
|
-
import { Calculator } from '@/tools/Calculator';
|
|
7
|
-
import { HumanMessage, } from '@langchain/core/messages';
|
|
8
|
-
import { ToolEndHandler, ModelEndHandler, createMetadataAggregator, } from '@/events';
|
|
9
|
-
import { ContentTypes, GraphEvents, Providers, TitleMethod } from '@/common';
|
|
10
|
-
import { capitalizeFirstLetter } from './spec.utils';
|
|
11
|
-
import { createContentAggregator } from '@/stream';
|
|
12
|
-
import { getLLMConfig } from '@/utils/llmConfig';
|
|
13
|
-
import { Run } from '@/run';
|
|
14
|
-
const requiredAzureEnv = [
|
|
15
|
-
'AZURE_OPENAI_API_KEY',
|
|
16
|
-
'AZURE_OPENAI_API_INSTANCE',
|
|
17
|
-
'AZURE_OPENAI_API_DEPLOYMENT',
|
|
18
|
-
'AZURE_OPENAI_API_VERSION',
|
|
19
|
-
];
|
|
20
|
-
const hasAzure = requiredAzureEnv.every((k) => (process.env[k] ?? '').trim() !== '');
|
|
21
|
-
const describeIfAzure = hasAzure ? describe : describe.skip;
|
|
22
|
-
const isContentFilterError = (error) => {
|
|
23
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
24
|
-
return (message.includes('content management policy') ||
|
|
25
|
-
message.includes('content filtering'));
|
|
26
|
-
};
|
|
27
|
-
const provider = Providers.AZURE;
|
|
28
|
-
let contentFilterTriggered = false;
|
|
29
|
-
describeIfAzure(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
|
|
30
|
-
jest.setTimeout(30000);
|
|
31
|
-
let run;
|
|
32
|
-
let collectedUsage;
|
|
33
|
-
let conversationHistory;
|
|
34
|
-
let aggregateContent;
|
|
35
|
-
let contentParts;
|
|
36
|
-
let runningHistory = null;
|
|
37
|
-
const config = {
|
|
38
|
-
configurable: {
|
|
39
|
-
thread_id: 'conversation-num-1',
|
|
40
|
-
},
|
|
41
|
-
streamMode: 'values',
|
|
42
|
-
version: 'v2',
|
|
43
|
-
};
|
|
44
|
-
beforeEach(async () => {
|
|
45
|
-
conversationHistory = [];
|
|
46
|
-
collectedUsage = [];
|
|
47
|
-
const { contentParts: cp, aggregateContent: ac } = createContentAggregator();
|
|
48
|
-
contentParts = cp;
|
|
49
|
-
aggregateContent = ac;
|
|
50
|
-
});
|
|
51
|
-
const onMessageDeltaSpy = jest.fn();
|
|
52
|
-
const onRunStepSpy = jest.fn();
|
|
53
|
-
afterAll(() => {
|
|
54
|
-
onMessageDeltaSpy.mockReset();
|
|
55
|
-
onRunStepSpy.mockReset();
|
|
56
|
-
});
|
|
57
|
-
const setupCustomHandlers = () => ({
|
|
58
|
-
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
59
|
-
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
|
|
60
|
-
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
61
|
-
handle: (event, data) => {
|
|
62
|
-
aggregateContent({
|
|
63
|
-
event,
|
|
64
|
-
data: data,
|
|
65
|
-
});
|
|
66
|
-
},
|
|
67
|
-
},
|
|
68
|
-
[GraphEvents.ON_RUN_STEP]: {
|
|
69
|
-
handle: (event, data, metadata, graph) => {
|
|
70
|
-
onRunStepSpy(event, data, metadata, graph);
|
|
71
|
-
aggregateContent({ event, data: data });
|
|
72
|
-
},
|
|
73
|
-
},
|
|
74
|
-
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
75
|
-
handle: (event, data) => {
|
|
76
|
-
aggregateContent({ event, data: data });
|
|
77
|
-
},
|
|
78
|
-
},
|
|
79
|
-
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
80
|
-
handle: (event, data, metadata, graph) => {
|
|
81
|
-
onMessageDeltaSpy(event, data, metadata, graph);
|
|
82
|
-
aggregateContent({ event, data: data });
|
|
83
|
-
},
|
|
84
|
-
},
|
|
85
|
-
[GraphEvents.TOOL_START]: {
|
|
86
|
-
handle: (_event, _data, _metadata) => {
|
|
87
|
-
// Handle tool start
|
|
88
|
-
},
|
|
89
|
-
},
|
|
90
|
-
});
|
|
91
|
-
test(`${capitalizeFirstLetter(provider)}: should process a simple message, generate title`, async () => {
|
|
92
|
-
try {
|
|
93
|
-
const llmConfig = getLLMConfig(provider);
|
|
94
|
-
const customHandlers = setupCustomHandlers();
|
|
95
|
-
run = await Run.create({
|
|
96
|
-
runId: 'test-run-id',
|
|
97
|
-
graphConfig: {
|
|
98
|
-
type: 'standard',
|
|
99
|
-
llmConfig,
|
|
100
|
-
tools: [new Calculator()],
|
|
101
|
-
instructions: 'You are a helpful AI assistant. Keep responses concise and friendly.',
|
|
102
|
-
},
|
|
103
|
-
returnContent: true,
|
|
104
|
-
skipCleanup: true,
|
|
105
|
-
customHandlers,
|
|
106
|
-
});
|
|
107
|
-
const userMessage = 'Hello, how are you today?';
|
|
108
|
-
conversationHistory.push(new HumanMessage(userMessage));
|
|
109
|
-
const inputs = {
|
|
110
|
-
messages: conversationHistory,
|
|
111
|
-
};
|
|
112
|
-
const finalContentParts = await run.processStream(inputs, config);
|
|
113
|
-
expect(finalContentParts).toBeDefined();
|
|
114
|
-
const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
|
|
115
|
-
expect(allTextParts).toBe(true);
|
|
116
|
-
expect(collectedUsage.length).toBeGreaterThan(0);
|
|
117
|
-
expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
|
|
118
|
-
expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
|
|
119
|
-
const finalMessages = run.getRunMessages();
|
|
120
|
-
expect(finalMessages).toBeDefined();
|
|
121
|
-
conversationHistory.push(...(finalMessages ?? []));
|
|
122
|
-
expect(conversationHistory.length).toBeGreaterThan(1);
|
|
123
|
-
runningHistory = conversationHistory.slice();
|
|
124
|
-
expect(onMessageDeltaSpy).toHaveBeenCalled();
|
|
125
|
-
expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
|
|
126
|
-
expect(onMessageDeltaSpy.mock.calls[0][3]).toBeDefined(); // Graph exists
|
|
127
|
-
expect(onRunStepSpy).toHaveBeenCalled();
|
|
128
|
-
expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
|
|
129
|
-
expect(onRunStepSpy.mock.calls[0][3]).toBeDefined(); // Graph exists
|
|
130
|
-
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
131
|
-
const titleResult = await run.generateTitle({
|
|
132
|
-
provider,
|
|
133
|
-
inputText: userMessage,
|
|
134
|
-
titleMethod: TitleMethod.STRUCTURED,
|
|
135
|
-
contentParts,
|
|
136
|
-
clientOptions: llmConfig,
|
|
137
|
-
chainOptions: {
|
|
138
|
-
callbacks: [
|
|
139
|
-
{
|
|
140
|
-
handleLLMEnd,
|
|
141
|
-
},
|
|
142
|
-
],
|
|
143
|
-
},
|
|
144
|
-
});
|
|
145
|
-
expect(titleResult).toBeDefined();
|
|
146
|
-
expect(titleResult.title).toBeDefined();
|
|
147
|
-
expect(titleResult.language).toBeDefined();
|
|
148
|
-
expect(collected).toBeDefined();
|
|
149
|
-
}
|
|
150
|
-
catch (error) {
|
|
151
|
-
if (isContentFilterError(error)) {
|
|
152
|
-
contentFilterTriggered = true;
|
|
153
|
-
console.warn('Skipping test: Azure content filter triggered');
|
|
154
|
-
return;
|
|
155
|
-
}
|
|
156
|
-
throw error;
|
|
157
|
-
}
|
|
158
|
-
});
|
|
159
|
-
test(`${capitalizeFirstLetter(provider)}: should generate title using completion method`, async () => {
|
|
160
|
-
if (contentFilterTriggered) {
|
|
161
|
-
console.warn('Skipping test: Azure content filter was triggered in previous test');
|
|
162
|
-
return;
|
|
163
|
-
}
|
|
164
|
-
try {
|
|
165
|
-
const llmConfig = getLLMConfig(provider);
|
|
166
|
-
const customHandlers = setupCustomHandlers();
|
|
167
|
-
run = await Run.create({
|
|
168
|
-
runId: 'test-run-id-completion',
|
|
169
|
-
graphConfig: {
|
|
170
|
-
type: 'standard',
|
|
171
|
-
llmConfig,
|
|
172
|
-
tools: [new Calculator()],
|
|
173
|
-
instructions: 'You are a helpful AI assistant. Keep responses concise and friendly.',
|
|
174
|
-
},
|
|
175
|
-
returnContent: true,
|
|
176
|
-
skipCleanup: true,
|
|
177
|
-
customHandlers,
|
|
178
|
-
});
|
|
179
|
-
const userMessage = 'What can you help me with today?';
|
|
180
|
-
conversationHistory = [];
|
|
181
|
-
conversationHistory.push(new HumanMessage(userMessage));
|
|
182
|
-
const inputs = {
|
|
183
|
-
messages: conversationHistory,
|
|
184
|
-
};
|
|
185
|
-
const finalContentParts = await run.processStream(inputs, config);
|
|
186
|
-
expect(finalContentParts).toBeDefined();
|
|
187
|
-
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
188
|
-
const titleResult = await run.generateTitle({
|
|
189
|
-
provider,
|
|
190
|
-
inputText: userMessage,
|
|
191
|
-
titleMethod: TitleMethod.COMPLETION,
|
|
192
|
-
contentParts,
|
|
193
|
-
clientOptions: llmConfig,
|
|
194
|
-
chainOptions: {
|
|
195
|
-
callbacks: [
|
|
196
|
-
{
|
|
197
|
-
handleLLMEnd,
|
|
198
|
-
},
|
|
199
|
-
],
|
|
200
|
-
},
|
|
201
|
-
});
|
|
202
|
-
expect(titleResult).toBeDefined();
|
|
203
|
-
expect(titleResult.title).toBeDefined();
|
|
204
|
-
expect(titleResult.title).not.toBe('');
|
|
205
|
-
expect(titleResult.language).toBeUndefined();
|
|
206
|
-
expect(collected).toBeDefined();
|
|
207
|
-
console.log(`Completion method generated title: "${titleResult.title}"`);
|
|
208
|
-
}
|
|
209
|
-
catch (error) {
|
|
210
|
-
if (isContentFilterError(error)) {
|
|
211
|
-
contentFilterTriggered = true;
|
|
212
|
-
console.warn('Skipping test: Azure content filter triggered');
|
|
213
|
-
return;
|
|
214
|
-
}
|
|
215
|
-
throw error;
|
|
216
|
-
}
|
|
217
|
-
});
|
|
218
|
-
test(`${capitalizeFirstLetter(provider)}: should follow-up`, async () => {
|
|
219
|
-
if (contentFilterTriggered || runningHistory == null) {
|
|
220
|
-
console.warn('Skipping test: Azure content filter was triggered or no conversation history');
|
|
221
|
-
return;
|
|
222
|
-
}
|
|
223
|
-
try {
|
|
224
|
-
console.log('Previous conversation length:', runningHistory.length);
|
|
225
|
-
console.log('Last message:', runningHistory[runningHistory.length - 1].content);
|
|
226
|
-
const llmConfig = getLLMConfig(provider);
|
|
227
|
-
const customHandlers = setupCustomHandlers();
|
|
228
|
-
run = await Run.create({
|
|
229
|
-
runId: 'test-run-id',
|
|
230
|
-
graphConfig: {
|
|
231
|
-
type: 'standard',
|
|
232
|
-
llmConfig,
|
|
233
|
-
tools: [new Calculator()],
|
|
234
|
-
instructions: 'You are a helpful AI assistant. Keep responses concise and friendly.',
|
|
235
|
-
},
|
|
236
|
-
returnContent: true,
|
|
237
|
-
skipCleanup: true,
|
|
238
|
-
customHandlers,
|
|
239
|
-
});
|
|
240
|
-
conversationHistory = runningHistory.slice();
|
|
241
|
-
conversationHistory.push(new HumanMessage('What else can you tell me?'));
|
|
242
|
-
const inputs = {
|
|
243
|
-
messages: conversationHistory,
|
|
244
|
-
};
|
|
245
|
-
const finalContentParts = await run.processStream(inputs, config);
|
|
246
|
-
expect(finalContentParts).toBeDefined();
|
|
247
|
-
const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
|
|
248
|
-
expect(allTextParts).toBe(true);
|
|
249
|
-
expect(collectedUsage.length).toBeGreaterThan(0);
|
|
250
|
-
expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
|
|
251
|
-
expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
|
|
252
|
-
const finalMessages = run.getRunMessages();
|
|
253
|
-
expect(finalMessages).toBeDefined();
|
|
254
|
-
expect(finalMessages?.length).toBeGreaterThan(0);
|
|
255
|
-
console.log(`${capitalizeFirstLetter(provider)} follow-up message:`, finalMessages?.[finalMessages.length - 1]?.content);
|
|
256
|
-
expect(onMessageDeltaSpy).toHaveBeenCalled();
|
|
257
|
-
expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
|
|
258
|
-
expect(onRunStepSpy).toHaveBeenCalled();
|
|
259
|
-
expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
|
|
260
|
-
}
|
|
261
|
-
catch (error) {
|
|
262
|
-
if (isContentFilterError(error)) {
|
|
263
|
-
console.warn('Skipping test: Azure content filter triggered');
|
|
264
|
-
return;
|
|
265
|
-
}
|
|
266
|
-
throw error;
|
|
267
|
-
}
|
|
268
|
-
});
|
|
269
|
-
test(`${capitalizeFirstLetter(provider)}: disableStreaming should not duplicate message content`, async () => {
|
|
270
|
-
if (contentFilterTriggered) {
|
|
271
|
-
console.warn('Skipping test: Azure content filter was triggered in previous test');
|
|
272
|
-
return;
|
|
273
|
-
}
|
|
274
|
-
try {
|
|
275
|
-
const llmConfig = getLLMConfig(provider);
|
|
276
|
-
const nonStreamingConfig = {
|
|
277
|
-
...llmConfig,
|
|
278
|
-
disableStreaming: true,
|
|
279
|
-
};
|
|
280
|
-
const messageDeltaPayloads = [];
|
|
281
|
-
const localRunStepSpy = jest.fn();
|
|
282
|
-
const localAggregateContent = createContentAggregator();
|
|
283
|
-
const localContentParts = localAggregateContent.contentParts;
|
|
284
|
-
const localAggregate = localAggregateContent.aggregateContent;
|
|
285
|
-
const customHandlers = {
|
|
286
|
-
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
287
|
-
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
|
|
288
|
-
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
289
|
-
handle: (event, data) => {
|
|
290
|
-
localAggregate({
|
|
291
|
-
event,
|
|
292
|
-
data: data,
|
|
293
|
-
});
|
|
294
|
-
},
|
|
295
|
-
},
|
|
296
|
-
[GraphEvents.ON_RUN_STEP]: {
|
|
297
|
-
handle: (event, data, metadata, graph) => {
|
|
298
|
-
localRunStepSpy(event, data, metadata, graph);
|
|
299
|
-
localAggregate({ event, data: data });
|
|
300
|
-
},
|
|
301
|
-
},
|
|
302
|
-
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
303
|
-
handle: (event, data) => {
|
|
304
|
-
localAggregate({ event, data: data });
|
|
305
|
-
},
|
|
306
|
-
},
|
|
307
|
-
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
308
|
-
handle: (event, data) => {
|
|
309
|
-
messageDeltaPayloads.push(data);
|
|
310
|
-
localAggregate({ event, data: data });
|
|
311
|
-
},
|
|
312
|
-
},
|
|
313
|
-
};
|
|
314
|
-
run = await Run.create({
|
|
315
|
-
runId: 'azure-disable-streaming-dedup-test',
|
|
316
|
-
graphConfig: {
|
|
317
|
-
type: 'standard',
|
|
318
|
-
llmConfig: nonStreamingConfig,
|
|
319
|
-
tools: [],
|
|
320
|
-
instructions: 'You are a helpful AI assistant. Respond with exactly one sentence.',
|
|
321
|
-
},
|
|
322
|
-
returnContent: true,
|
|
323
|
-
skipCleanup: true,
|
|
324
|
-
customHandlers,
|
|
325
|
-
});
|
|
326
|
-
conversationHistory.push(new HumanMessage('Hello'));
|
|
327
|
-
const finalContentParts = await run.processStream({ messages: conversationHistory }, config);
|
|
328
|
-
expect(finalContentParts).toBeDefined();
|
|
329
|
-
expect(finalContentParts.length).toBeGreaterThan(0);
|
|
330
|
-
expect(messageDeltaPayloads.length).toBeGreaterThan(0);
|
|
331
|
-
const allTextDeltas = messageDeltaPayloads
|
|
332
|
-
.flatMap((p) => p.delta.content ?? [])
|
|
333
|
-
.filter((c) => c.type === ContentTypes.TEXT)
|
|
334
|
-
.map((c) => ('text' in c ? c.text : ''));
|
|
335
|
-
const combinedText = allTextDeltas.join('');
|
|
336
|
-
/**
|
|
337
|
-
* When model.stream() is available (the common path even with
|
|
338
|
-
* disableStreaming), ChatModelStreamHandler already dispatches the full
|
|
339
|
-
* text as a single MESSAGE_DELTA. The disableStreaming fallback block in
|
|
340
|
-
* createCallModel must NOT dispatch the same content a second time.
|
|
341
|
-
*
|
|
342
|
-
* If the bug is present, the text is emitted twice and localContentParts
|
|
343
|
-
* will contain duplicated text.
|
|
344
|
-
*/
|
|
345
|
-
const aggregatedText = localContentParts
|
|
346
|
-
.filter((p) => p.type === ContentTypes.TEXT)
|
|
347
|
-
.map((p) => ('text' in p ? p.text : ''))
|
|
348
|
-
.join('');
|
|
349
|
-
console.log('Message delta count:', messageDeltaPayloads.length);
|
|
350
|
-
console.log('Combined delta text length:', combinedText.length);
|
|
351
|
-
console.log('Aggregated text length:', aggregatedText.length);
|
|
352
|
-
/**
|
|
353
|
-
* Each delta payload contains the FULL text (non-streaming returns a
|
|
354
|
-
* single chunk). If the bug is present, we get >=2 identical payloads
|
|
355
|
-
* and the aggregated text will be 2x the actual response.
|
|
356
|
-
*/
|
|
357
|
-
const uniqueTexts = [...new Set(allTextDeltas)];
|
|
358
|
-
expect(uniqueTexts.length).toBe(1);
|
|
359
|
-
expect(uniqueTexts[0].length).toBeGreaterThan(0);
|
|
360
|
-
const singleResponseText = uniqueTexts[0];
|
|
361
|
-
expect(aggregatedText).toBe(singleResponseText);
|
|
362
|
-
expect(combinedText).toBe(singleResponseText);
|
|
363
|
-
console.log('disableStreaming dedup test passed — no duplicate content');
|
|
364
|
-
}
|
|
365
|
-
catch (error) {
|
|
366
|
-
if (isContentFilterError(error)) {
|
|
367
|
-
console.warn('Skipping test: Azure content filter triggered');
|
|
368
|
-
return;
|
|
369
|
-
}
|
|
370
|
-
throw error;
|
|
371
|
-
}
|
|
372
|
-
});
|
|
373
|
-
test('should handle errors appropriately', async () => {
|
|
374
|
-
await expect(async () => {
|
|
375
|
-
await run.processStream({
|
|
376
|
-
messages: [],
|
|
377
|
-
}, {});
|
|
378
|
-
}).rejects.toThrow();
|
|
379
|
-
});
|
|
380
|
-
});
|
|
381
|
-
//# sourceMappingURL=azure.simple.test.js.map
|