@librechat/agents 2.0.5 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/common/enum.cjs +1 -0
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/events.cjs +10 -0
- package/dist/cjs/events.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +27 -1
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/llm.cjs +1 -3
- package/dist/cjs/llm/anthropic/llm.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
- package/dist/cjs/llm/fake.cjs +55 -0
- package/dist/cjs/llm/fake.cjs.map +1 -0
- package/dist/cjs/llm/providers.cjs +7 -5
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/cjs/llm/text.cjs.map +1 -1
- package/dist/cjs/messages.cjs.map +1 -1
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/splitStream.cjs.map +1 -1
- package/dist/cjs/stream.cjs +93 -55
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/tools/CodeExecutor.cjs +8 -2
- package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/cjs/utils/graph.cjs.map +1 -1
- package/dist/cjs/utils/llm.cjs.map +1 -1
- package/dist/cjs/utils/misc.cjs.map +1 -1
- package/dist/cjs/utils/run.cjs.map +1 -1
- package/dist/cjs/utils/title.cjs.map +1 -1
- package/dist/esm/common/enum.mjs +1 -0
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/events.mjs +10 -0
- package/dist/esm/events.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +28 -2
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/llm/anthropic/llm.mjs +1 -3
- package/dist/esm/llm/anthropic/llm.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
- package/dist/esm/llm/fake.mjs +52 -0
- package/dist/esm/llm/fake.mjs.map +1 -0
- package/dist/esm/llm/providers.mjs +8 -6
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/esm/llm/text.mjs.map +1 -1
- package/dist/esm/messages.mjs.map +1 -1
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/splitStream.mjs.map +1 -1
- package/dist/esm/stream.mjs +94 -56
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/tools/CodeExecutor.mjs +9 -3
- package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/esm/utils/graph.mjs.map +1 -1
- package/dist/esm/utils/llm.mjs.map +1 -1
- package/dist/esm/utils/misc.mjs.map +1 -1
- package/dist/esm/utils/run.mjs.map +1 -1
- package/dist/esm/utils/title.mjs.map +1 -1
- package/dist/types/common/enum.d.ts +2 -1
- package/dist/types/events.d.ts +4 -1
- package/dist/types/graphs/Graph.d.ts +8 -1
- package/dist/types/llm/fake.d.ts +21 -0
- package/dist/types/specs/spec.utils.d.ts +1 -0
- package/dist/types/stream.d.ts +9 -13
- package/dist/types/types/llm.d.ts +10 -5
- package/dist/types/types/stream.d.ts +12 -0
- package/package.json +15 -26
- package/src/common/enum.ts +1 -0
- package/src/events.ts +13 -1
- package/src/graphs/Graph.ts +31 -2
- package/src/llm/fake.ts +83 -0
- package/src/llm/providers.ts +7 -5
- package/src/scripts/simple.ts +28 -14
- package/src/specs/anthropic.simple.test.ts +204 -0
- package/src/specs/openai.simple.test.ts +204 -0
- package/src/specs/reasoning.test.ts +165 -0
- package/src/specs/spec.utils.ts +3 -0
- package/src/stream.ts +100 -72
- package/src/tools/CodeExecutor.ts +8 -2
- package/src/types/llm.ts +10 -5
- package/src/types/stream.ts +14 -1
- package/src/utils/llmConfig.ts +7 -1
package/src/llm/fake.ts
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import type { BaseMessage } from '@langchain/core/messages';
|
|
2
|
+
import type { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
3
|
+
import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
|
4
|
+
import { FakeListChatModel } from '@langchain/core/utils/testing';
|
|
5
|
+
|
|
6
|
+
type SplitStrategy = {
|
|
7
|
+
type: 'regex' | 'fixed';
|
|
8
|
+
value: RegExp | number;
|
|
9
|
+
};
|
|
10
|
+
|
|
11
|
+
export class FakeChatModel extends FakeListChatModel {
|
|
12
|
+
private splitStrategy: SplitStrategy;
|
|
13
|
+
|
|
14
|
+
constructor({
|
|
15
|
+
responses,
|
|
16
|
+
sleep,
|
|
17
|
+
emitCustomEvent,
|
|
18
|
+
splitStrategy = { type: 'regex', value: /(?<=\s+)|(?=\s+)/ }
|
|
19
|
+
}: {
|
|
20
|
+
responses: string[];
|
|
21
|
+
sleep?: number;
|
|
22
|
+
emitCustomEvent?: boolean;
|
|
23
|
+
splitStrategy?: SplitStrategy;
|
|
24
|
+
}) {
|
|
25
|
+
super({ responses, sleep, emitCustomEvent });
|
|
26
|
+
this.splitStrategy = splitStrategy;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
private splitText(text: string): string[] {
|
|
30
|
+
if (this.splitStrategy.type === 'regex') {
|
|
31
|
+
return text.split(this.splitStrategy.value as RegExp);
|
|
32
|
+
} else {
|
|
33
|
+
const chunkSize = this.splitStrategy.value as number;
|
|
34
|
+
const chunks: string[] = [];
|
|
35
|
+
for (let i = 0; i < text.length; i += chunkSize) {
|
|
36
|
+
chunks.push(text.slice(i, i + chunkSize));
|
|
37
|
+
}
|
|
38
|
+
return chunks;
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
async *_streamResponseChunks(
|
|
43
|
+
_messages: BaseMessage[],
|
|
44
|
+
options: this['ParsedCallOptions'],
|
|
45
|
+
runManager?: CallbackManagerForLLMRun
|
|
46
|
+
): AsyncGenerator<ChatGenerationChunk> {
|
|
47
|
+
const response = this._currentResponse();
|
|
48
|
+
this._incrementResponse();
|
|
49
|
+
|
|
50
|
+
if (this.emitCustomEvent) {
|
|
51
|
+
await runManager?.handleCustomEvent('some_test_event', {
|
|
52
|
+
someval: true,
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const chunks = this.splitText(response);
|
|
57
|
+
|
|
58
|
+
for await (const chunk of chunks) {
|
|
59
|
+
await this._sleepIfRequested();
|
|
60
|
+
|
|
61
|
+
if (options.thrownErrorString != null && options.thrownErrorString) {
|
|
62
|
+
throw new Error(options.thrownErrorString);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
const responseChunk = this._createResponseChunk(chunk);
|
|
66
|
+
yield responseChunk;
|
|
67
|
+
void runManager?.handleLLMNewToken(chunk);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
export function createFakeStreamingLLM(
|
|
73
|
+
responses: string[],
|
|
74
|
+
sleep?: number,
|
|
75
|
+
splitStrategy?: SplitStrategy
|
|
76
|
+
): FakeChatModel {
|
|
77
|
+
return new FakeChatModel({
|
|
78
|
+
sleep,
|
|
79
|
+
responses,
|
|
80
|
+
emitCustomEvent: true,
|
|
81
|
+
splitStrategy,
|
|
82
|
+
});
|
|
83
|
+
}
|
package/src/llm/providers.ts
CHANGED
|
@@ -1,25 +1,27 @@
|
|
|
1
1
|
// src/llm/providers.ts
|
|
2
2
|
import { ChatOllama } from '@langchain/ollama';
|
|
3
|
+
import { ChatDeepSeek } from '@langchain/deepseek';
|
|
4
|
+
import { ChatMistralAI } from '@langchain/mistralai';
|
|
3
5
|
import { ChatBedrockConverse } from '@langchain/aws';
|
|
4
6
|
// import { ChatAnthropic } from '@langchain/anthropic';
|
|
5
|
-
import { ChatMistralAI } from '@langchain/mistralai';
|
|
6
7
|
import { ChatVertexAI } from '@langchain/google-vertexai';
|
|
7
8
|
import { ChatOpenAI, AzureChatOpenAI } from '@langchain/openai';
|
|
8
9
|
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
9
10
|
import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
|
|
10
11
|
import type { ChatModelConstructorMap, ProviderOptionsMap, ChatModelMap } from '@/types';
|
|
11
|
-
import { Providers } from '@/common';
|
|
12
12
|
import { CustomAnthropic } from '@/llm/anthropic/llm';
|
|
13
|
+
import { Providers } from '@/common';
|
|
13
14
|
|
|
14
15
|
export const llmProviders: Partial<ChatModelConstructorMap> = {
|
|
15
16
|
[Providers.OPENAI]: ChatOpenAI,
|
|
16
|
-
[Providers.AZURE]: AzureChatOpenAI,
|
|
17
17
|
[Providers.OLLAMA]: ChatOllama,
|
|
18
|
+
[Providers.AZURE]: AzureChatOpenAI,
|
|
18
19
|
[Providers.VERTEXAI]: ChatVertexAI,
|
|
19
|
-
[Providers.
|
|
20
|
+
[Providers.DEEPSEEK]: ChatDeepSeek,
|
|
20
21
|
[Providers.MISTRALAI]: ChatMistralAI,
|
|
21
|
-
[Providers.BEDROCK]: ChatBedrockConverse,
|
|
22
22
|
[Providers.ANTHROPIC]: CustomAnthropic,
|
|
23
|
+
[Providers.BEDROCK_LEGACY]: BedrockChat,
|
|
24
|
+
[Providers.BEDROCK]: ChatBedrockConverse,
|
|
23
25
|
// [Providers.ANTHROPIC]: ChatAnthropic,
|
|
24
26
|
[Providers.GOOGLE]: ChatGoogleGenerativeAI,
|
|
25
27
|
};
|
package/src/scripts/simple.ts
CHANGED
|
@@ -12,10 +12,12 @@ import { GraphEvents } from '@/common';
|
|
|
12
12
|
import { Run } from '@/run';
|
|
13
13
|
|
|
14
14
|
const conversationHistory: BaseMessage[] = [];
|
|
15
|
+
let _contentParts: t.MessageContentComplex[] = [];
|
|
15
16
|
|
|
16
17
|
async function testStandardStreaming(): Promise<void> {
|
|
17
18
|
const { userName, location, provider, currentDate } = await getArgs();
|
|
18
19
|
const { contentParts, aggregateContent } = createContentAggregator();
|
|
20
|
+
_contentParts = contentParts as t.MessageContentComplex[];
|
|
19
21
|
const customHandlers = {
|
|
20
22
|
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
21
23
|
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
|
|
@@ -48,6 +50,13 @@ async function testStandardStreaming(): Promise<void> {
|
|
|
48
50
|
aggregateContent({ event, data: data as t.MessageDeltaEvent });
|
|
49
51
|
}
|
|
50
52
|
},
|
|
53
|
+
[GraphEvents.ON_REASONING_DELTA]: {
|
|
54
|
+
handle: (event: GraphEvents.ON_REASONING_DELTA, data: t.StreamEventData): void => {
|
|
55
|
+
console.log('====== ON_REASONING_DELTA ======');
|
|
56
|
+
console.dir(data, { depth: null });
|
|
57
|
+
aggregateContent({ event, data: data as t.ReasoningDeltaEvent });
|
|
58
|
+
}
|
|
59
|
+
},
|
|
51
60
|
[GraphEvents.TOOL_START]: {
|
|
52
61
|
handle: (_event: string, data: t.StreamEventData, metadata?: Record<string, unknown>): void => {
|
|
53
62
|
console.log('====== TOOL_START ======');
|
|
@@ -63,7 +72,7 @@ async function testStandardStreaming(): Promise<void> {
|
|
|
63
72
|
graphConfig: {
|
|
64
73
|
type: 'standard',
|
|
65
74
|
llmConfig,
|
|
66
|
-
tools: [new TavilySearchResults()],
|
|
75
|
+
// tools: [new TavilySearchResults()],
|
|
67
76
|
instructions: 'You are a friendly AI assistant. Always address the user by their name.',
|
|
68
77
|
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
69
78
|
},
|
|
@@ -81,7 +90,7 @@ async function testStandardStreaming(): Promise<void> {
|
|
|
81
90
|
|
|
82
91
|
console.log('Test 1: Simple message test');
|
|
83
92
|
|
|
84
|
-
const userMessage = `hi
|
|
93
|
+
const userMessage = `hi`;
|
|
85
94
|
|
|
86
95
|
conversationHistory.push(new HumanMessage(userMessage));
|
|
87
96
|
|
|
@@ -97,23 +106,26 @@ async function testStandardStreaming(): Promise<void> {
|
|
|
97
106
|
// console.dir(finalContentParts, { depth: null });
|
|
98
107
|
console.log('\n\n====================\n\n');
|
|
99
108
|
console.dir(contentParts, { depth: null });
|
|
100
|
-
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
101
|
-
const titleResult = await run.generateTitle({
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
});
|
|
110
|
-
console.log('Generated Title:', titleResult);
|
|
111
|
-
console.log('Collected metadata:', collected);
|
|
109
|
+
// const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
110
|
+
// const titleResult = await run.generateTitle({
|
|
111
|
+
// inputText: userMessage,
|
|
112
|
+
// contentParts,
|
|
113
|
+
// chainOptions: {
|
|
114
|
+
// callbacks: [{
|
|
115
|
+
// handleLLMEnd,
|
|
116
|
+
// }],
|
|
117
|
+
// },
|
|
118
|
+
// });
|
|
119
|
+
// console.log('Generated Title:', titleResult);
|
|
120
|
+
// console.log('Collected metadata:', collected);
|
|
112
121
|
}
|
|
113
122
|
|
|
114
123
|
process.on('unhandledRejection', (reason, promise) => {
|
|
115
124
|
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
|
116
125
|
console.log('Conversation history:');
|
|
126
|
+
console.dir(conversationHistory, { depth: null });
|
|
127
|
+
console.log('Content parts:');
|
|
128
|
+
console.dir(_contentParts, { depth: null });
|
|
117
129
|
process.exit(1);
|
|
118
130
|
});
|
|
119
131
|
|
|
@@ -125,5 +137,7 @@ testStandardStreaming().catch((err) => {
|
|
|
125
137
|
console.error(err);
|
|
126
138
|
console.log('Conversation history:');
|
|
127
139
|
console.dir(conversationHistory, { depth: null });
|
|
140
|
+
console.log('Content parts:');
|
|
141
|
+
console.dir(_contentParts, { depth: null });
|
|
128
142
|
process.exit(1);
|
|
129
143
|
});
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
/* eslint-disable no-console */
|
|
2
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
3
|
+
// src/scripts/cli.test.ts
|
|
4
|
+
import { config } from 'dotenv';
|
|
5
|
+
config();
|
|
6
|
+
import { Calculator } from '@langchain/community/tools/calculator';
|
|
7
|
+
import { HumanMessage, BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
|
8
|
+
import type { StandardGraph } from '@/graphs';
|
|
9
|
+
import type * as t from '@/types';
|
|
10
|
+
import { ToolEndHandler, ModelEndHandler, createMetadataAggregator } from '@/events';
|
|
11
|
+
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
12
|
+
import { ContentTypes, GraphEvents, Providers } from '@/common';
|
|
13
|
+
import { capitalizeFirstLetter } from './spec.utils';
|
|
14
|
+
import { getLLMConfig } from '@/utils/llmConfig';
|
|
15
|
+
import { getArgs } from '@/scripts/args';
|
|
16
|
+
import { Run } from '@/run';
|
|
17
|
+
|
|
18
|
+
const provider = Providers.ANTHROPIC;
|
|
19
|
+
describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
|
|
20
|
+
jest.setTimeout(30000);
|
|
21
|
+
let run: Run<t.IState>;
|
|
22
|
+
let runningHistory: BaseMessage[];
|
|
23
|
+
let collectedUsage: UsageMetadata[];
|
|
24
|
+
let conversationHistory: BaseMessage[];
|
|
25
|
+
let aggregateContent: t.ContentAggregator;
|
|
26
|
+
let contentParts: t.MessageContentComplex[];
|
|
27
|
+
|
|
28
|
+
const config = {
|
|
29
|
+
configurable: {
|
|
30
|
+
thread_id: 'conversation-num-1',
|
|
31
|
+
},
|
|
32
|
+
streamMode: 'values',
|
|
33
|
+
version: 'v2' as const,
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
beforeEach(async () => {
|
|
37
|
+
conversationHistory = [];
|
|
38
|
+
collectedUsage = [];
|
|
39
|
+
const { contentParts: cp, aggregateContent: ac } = createContentAggregator();
|
|
40
|
+
contentParts = cp as t.MessageContentComplex[];
|
|
41
|
+
aggregateContent = ac;
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
const onMessageDeltaSpy = jest.fn();
|
|
45
|
+
const onRunStepSpy = jest.fn();
|
|
46
|
+
|
|
47
|
+
afterAll(() => {
|
|
48
|
+
onMessageDeltaSpy.mockReset();
|
|
49
|
+
onRunStepSpy.mockReset();
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
const setupCustomHandlers = (): Record<string | GraphEvents, t.EventHandler> => ({
|
|
53
|
+
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
54
|
+
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
|
|
55
|
+
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
56
|
+
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
57
|
+
handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
|
|
58
|
+
aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent; } });
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
[GraphEvents.ON_RUN_STEP]: {
|
|
62
|
+
handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData, metadata, graph): void => {
|
|
63
|
+
onRunStepSpy(event, data, metadata, graph);
|
|
64
|
+
aggregateContent({ event, data: data as t.RunStep });
|
|
65
|
+
}
|
|
66
|
+
},
|
|
67
|
+
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
68
|
+
handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
|
|
69
|
+
aggregateContent({ event, data: data as t.RunStepDeltaEvent });
|
|
70
|
+
}
|
|
71
|
+
},
|
|
72
|
+
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
73
|
+
handle: (event: GraphEvents.ON_MESSAGE_DELTA, data: t.StreamEventData, metadata, graph): void => {
|
|
74
|
+
onMessageDeltaSpy(event, data, metadata, graph);
|
|
75
|
+
aggregateContent({ event, data: data as t.MessageDeltaEvent });
|
|
76
|
+
}
|
|
77
|
+
},
|
|
78
|
+
[GraphEvents.TOOL_START]: {
|
|
79
|
+
handle: (_event: string, _data: t.StreamEventData, _metadata?: Record<string, unknown>): void => {
|
|
80
|
+
// Handle tool start
|
|
81
|
+
}
|
|
82
|
+
},
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
test(`${capitalizeFirstLetter(provider)}: should process a simple message, generate title`, async () => {
|
|
86
|
+
const { userName, location } = await getArgs();
|
|
87
|
+
const llmConfig = getLLMConfig(provider);
|
|
88
|
+
const customHandlers = setupCustomHandlers();
|
|
89
|
+
|
|
90
|
+
run = await Run.create<t.IState>({
|
|
91
|
+
runId: 'test-run-id',
|
|
92
|
+
graphConfig: {
|
|
93
|
+
type: 'standard',
|
|
94
|
+
llmConfig,
|
|
95
|
+
tools: [new Calculator()],
|
|
96
|
+
instructions: 'You are a friendly AI assistant. Always address the user by their name.',
|
|
97
|
+
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
98
|
+
},
|
|
99
|
+
returnContent: true,
|
|
100
|
+
customHandlers,
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
const userMessage = 'hi';
|
|
104
|
+
conversationHistory.push(new HumanMessage(userMessage));
|
|
105
|
+
|
|
106
|
+
const inputs = {
|
|
107
|
+
messages: conversationHistory,
|
|
108
|
+
};
|
|
109
|
+
|
|
110
|
+
const finalContentParts = await run.processStream(inputs, config);
|
|
111
|
+
expect(finalContentParts).toBeDefined();
|
|
112
|
+
const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
|
|
113
|
+
expect(allTextParts).toBe(true);
|
|
114
|
+
expect(collectedUsage.length).toBeGreaterThan(0);
|
|
115
|
+
expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
|
|
116
|
+
expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
|
|
117
|
+
|
|
118
|
+
const finalMessages = run.getRunMessages();
|
|
119
|
+
expect(finalMessages).toBeDefined();
|
|
120
|
+
conversationHistory.push(...finalMessages ?? []);
|
|
121
|
+
expect(conversationHistory.length).toBeGreaterThan(1);
|
|
122
|
+
runningHistory = conversationHistory.slice();
|
|
123
|
+
|
|
124
|
+
expect(onMessageDeltaSpy).toHaveBeenCalled();
|
|
125
|
+
expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
|
|
126
|
+
expect((onMessageDeltaSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
|
|
127
|
+
|
|
128
|
+
expect(onRunStepSpy).toHaveBeenCalled();
|
|
129
|
+
expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
|
|
130
|
+
expect((onRunStepSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
|
|
131
|
+
|
|
132
|
+
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
133
|
+
const titleResult = await run.generateTitle({
|
|
134
|
+
inputText: userMessage,
|
|
135
|
+
contentParts,
|
|
136
|
+
chainOptions: {
|
|
137
|
+
callbacks: [{
|
|
138
|
+
handleLLMEnd,
|
|
139
|
+
}],
|
|
140
|
+
},
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
expect(titleResult).toBeDefined();
|
|
144
|
+
expect(titleResult.title).toBeDefined();
|
|
145
|
+
expect(titleResult.language).toBeDefined();
|
|
146
|
+
expect(collected).toBeDefined();
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
test(`${capitalizeFirstLetter(provider)}: should follow-up`, async () => {
|
|
150
|
+
console.log('Previous conversation length:', runningHistory.length);
|
|
151
|
+
console.log('Last message:', runningHistory[runningHistory.length - 1].content);
|
|
152
|
+
const { userName, location } = await getArgs();
|
|
153
|
+
const llmConfig = getLLMConfig(provider);
|
|
154
|
+
const customHandlers = setupCustomHandlers();
|
|
155
|
+
|
|
156
|
+
run = await Run.create<t.IState>({
|
|
157
|
+
runId: 'test-run-id',
|
|
158
|
+
graphConfig: {
|
|
159
|
+
type: 'standard',
|
|
160
|
+
llmConfig,
|
|
161
|
+
tools: [new Calculator()],
|
|
162
|
+
instructions: 'You are a friendly AI assistant. Always address the user by their name.',
|
|
163
|
+
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
164
|
+
},
|
|
165
|
+
returnContent: true,
|
|
166
|
+
customHandlers,
|
|
167
|
+
});
|
|
168
|
+
|
|
169
|
+
conversationHistory = runningHistory.slice();
|
|
170
|
+
conversationHistory.push(new HumanMessage('how are you?'));
|
|
171
|
+
|
|
172
|
+
const inputs = {
|
|
173
|
+
messages: conversationHistory,
|
|
174
|
+
};
|
|
175
|
+
|
|
176
|
+
const finalContentParts = await run.processStream(inputs, config);
|
|
177
|
+
expect(finalContentParts).toBeDefined();
|
|
178
|
+
const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
|
|
179
|
+
expect(allTextParts).toBe(true);
|
|
180
|
+
expect(collectedUsage.length).toBeGreaterThan(0);
|
|
181
|
+
expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
|
|
182
|
+
expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
|
|
183
|
+
|
|
184
|
+
const finalMessages = run.getRunMessages();
|
|
185
|
+
expect(finalMessages).toBeDefined();
|
|
186
|
+
expect(finalMessages?.length).toBeGreaterThan(0);
|
|
187
|
+
console.log(`${capitalizeFirstLetter(provider)} follow-up message:`, finalMessages?.[finalMessages.length - 1]?.content);
|
|
188
|
+
|
|
189
|
+
expect(onMessageDeltaSpy).toHaveBeenCalled();
|
|
190
|
+
expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
|
|
191
|
+
|
|
192
|
+
expect(onRunStepSpy).toHaveBeenCalled();
|
|
193
|
+
expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
test('should handle errors appropriately', async () => {
|
|
197
|
+
// Test error scenarios
|
|
198
|
+
await expect(async () => {
|
|
199
|
+
await run.processStream({
|
|
200
|
+
messages: [],
|
|
201
|
+
}, {} as any);
|
|
202
|
+
}).rejects.toThrow();
|
|
203
|
+
});
|
|
204
|
+
});
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
/* eslint-disable no-console */
|
|
2
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
3
|
+
// src/scripts/cli.test.ts
|
|
4
|
+
import { config } from 'dotenv';
|
|
5
|
+
config();
|
|
6
|
+
import { Calculator } from '@langchain/community/tools/calculator';
|
|
7
|
+
import { HumanMessage, BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
|
8
|
+
import type { StandardGraph } from '@/graphs';
|
|
9
|
+
import type * as t from '@/types';
|
|
10
|
+
import { ToolEndHandler, ModelEndHandler, createMetadataAggregator } from '@/events';
|
|
11
|
+
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
12
|
+
import { ContentTypes, GraphEvents, Providers } from '@/common';
|
|
13
|
+
import { capitalizeFirstLetter } from './spec.utils';
|
|
14
|
+
import { getLLMConfig } from '@/utils/llmConfig';
|
|
15
|
+
import { getArgs } from '@/scripts/args';
|
|
16
|
+
import { Run } from '@/run';
|
|
17
|
+
|
|
18
|
+
const provider = Providers.OPENAI;
|
|
19
|
+
describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
|
|
20
|
+
jest.setTimeout(30000);
|
|
21
|
+
let run: Run<t.IState>;
|
|
22
|
+
let runningHistory: BaseMessage[];
|
|
23
|
+
let collectedUsage: UsageMetadata[];
|
|
24
|
+
let conversationHistory: BaseMessage[];
|
|
25
|
+
let aggregateContent: t.ContentAggregator;
|
|
26
|
+
let contentParts: t.MessageContentComplex[];
|
|
27
|
+
|
|
28
|
+
const config = {
|
|
29
|
+
configurable: {
|
|
30
|
+
thread_id: 'conversation-num-1',
|
|
31
|
+
},
|
|
32
|
+
streamMode: 'values',
|
|
33
|
+
version: 'v2' as const,
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
beforeEach(async () => {
|
|
37
|
+
conversationHistory = [];
|
|
38
|
+
collectedUsage = [];
|
|
39
|
+
const { contentParts: cp, aggregateContent: ac } = createContentAggregator();
|
|
40
|
+
contentParts = cp as t.MessageContentComplex[];
|
|
41
|
+
aggregateContent = ac;
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
const onMessageDeltaSpy = jest.fn();
|
|
45
|
+
const onRunStepSpy = jest.fn();
|
|
46
|
+
|
|
47
|
+
afterAll(() => {
|
|
48
|
+
onMessageDeltaSpy.mockReset();
|
|
49
|
+
onRunStepSpy.mockReset();
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
const setupCustomHandlers = (): Record<string | GraphEvents, t.EventHandler> => ({
|
|
53
|
+
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
54
|
+
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
|
|
55
|
+
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
56
|
+
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
57
|
+
handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
|
|
58
|
+
aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent; } });
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
[GraphEvents.ON_RUN_STEP]: {
|
|
62
|
+
handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData, metadata, graph): void => {
|
|
63
|
+
onRunStepSpy(event, data, metadata, graph);
|
|
64
|
+
aggregateContent({ event, data: data as t.RunStep });
|
|
65
|
+
}
|
|
66
|
+
},
|
|
67
|
+
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
68
|
+
handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
|
|
69
|
+
aggregateContent({ event, data: data as t.RunStepDeltaEvent });
|
|
70
|
+
}
|
|
71
|
+
},
|
|
72
|
+
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
73
|
+
handle: (event: GraphEvents.ON_MESSAGE_DELTA, data: t.StreamEventData, metadata, graph): void => {
|
|
74
|
+
onMessageDeltaSpy(event, data, metadata, graph);
|
|
75
|
+
aggregateContent({ event, data: data as t.MessageDeltaEvent });
|
|
76
|
+
}
|
|
77
|
+
},
|
|
78
|
+
[GraphEvents.TOOL_START]: {
|
|
79
|
+
handle: (_event: string, _data: t.StreamEventData, _metadata?: Record<string, unknown>): void => {
|
|
80
|
+
// Handle tool start
|
|
81
|
+
}
|
|
82
|
+
},
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
test(`${capitalizeFirstLetter(provider)}: should process a simple message, generate title`, async () => {
|
|
86
|
+
const { userName, location } = await getArgs();
|
|
87
|
+
const llmConfig = getLLMConfig(provider);
|
|
88
|
+
const customHandlers = setupCustomHandlers();
|
|
89
|
+
|
|
90
|
+
run = await Run.create<t.IState>({
|
|
91
|
+
runId: 'test-run-id',
|
|
92
|
+
graphConfig: {
|
|
93
|
+
type: 'standard',
|
|
94
|
+
llmConfig,
|
|
95
|
+
tools: [new Calculator()],
|
|
96
|
+
instructions: 'You are a friendly AI assistant. Always address the user by their name.',
|
|
97
|
+
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
98
|
+
},
|
|
99
|
+
returnContent: true,
|
|
100
|
+
customHandlers,
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
const userMessage = 'hi';
|
|
104
|
+
conversationHistory.push(new HumanMessage(userMessage));
|
|
105
|
+
|
|
106
|
+
const inputs = {
|
|
107
|
+
messages: conversationHistory,
|
|
108
|
+
};
|
|
109
|
+
|
|
110
|
+
const finalContentParts = await run.processStream(inputs, config);
|
|
111
|
+
expect(finalContentParts).toBeDefined();
|
|
112
|
+
const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
|
|
113
|
+
expect(allTextParts).toBe(true);
|
|
114
|
+
expect(collectedUsage.length).toBeGreaterThan(0);
|
|
115
|
+
expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
|
|
116
|
+
expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
|
|
117
|
+
|
|
118
|
+
const finalMessages = run.getRunMessages();
|
|
119
|
+
expect(finalMessages).toBeDefined();
|
|
120
|
+
conversationHistory.push(...finalMessages ?? []);
|
|
121
|
+
expect(conversationHistory.length).toBeGreaterThan(1);
|
|
122
|
+
runningHistory = conversationHistory.slice();
|
|
123
|
+
|
|
124
|
+
expect(onMessageDeltaSpy).toHaveBeenCalled();
|
|
125
|
+
expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
|
|
126
|
+
expect((onMessageDeltaSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
|
|
127
|
+
|
|
128
|
+
expect(onRunStepSpy).toHaveBeenCalled();
|
|
129
|
+
expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
|
|
130
|
+
expect((onRunStepSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
|
|
131
|
+
|
|
132
|
+
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
133
|
+
const titleResult = await run.generateTitle({
|
|
134
|
+
inputText: userMessage,
|
|
135
|
+
contentParts,
|
|
136
|
+
chainOptions: {
|
|
137
|
+
callbacks: [{
|
|
138
|
+
handleLLMEnd,
|
|
139
|
+
}],
|
|
140
|
+
},
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
expect(titleResult).toBeDefined();
|
|
144
|
+
expect(titleResult.title).toBeDefined();
|
|
145
|
+
expect(titleResult.language).toBeDefined();
|
|
146
|
+
expect(collected).toBeDefined();
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
test(`${capitalizeFirstLetter(provider)}: should follow-up`, async () => {
|
|
150
|
+
console.log('Previous conversation length:', runningHistory.length);
|
|
151
|
+
console.log('Last message:', runningHistory[runningHistory.length - 1].content);
|
|
152
|
+
const { userName, location } = await getArgs();
|
|
153
|
+
const llmConfig = getLLMConfig(provider);
|
|
154
|
+
const customHandlers = setupCustomHandlers();
|
|
155
|
+
|
|
156
|
+
run = await Run.create<t.IState>({
|
|
157
|
+
runId: 'test-run-id',
|
|
158
|
+
graphConfig: {
|
|
159
|
+
type: 'standard',
|
|
160
|
+
llmConfig,
|
|
161
|
+
tools: [new Calculator()],
|
|
162
|
+
instructions: 'You are a friendly AI assistant. Always address the user by their name.',
|
|
163
|
+
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
164
|
+
},
|
|
165
|
+
returnContent: true,
|
|
166
|
+
customHandlers,
|
|
167
|
+
});
|
|
168
|
+
|
|
169
|
+
conversationHistory = runningHistory.slice();
|
|
170
|
+
conversationHistory.push(new HumanMessage('how are you?'));
|
|
171
|
+
|
|
172
|
+
const inputs = {
|
|
173
|
+
messages: conversationHistory,
|
|
174
|
+
};
|
|
175
|
+
|
|
176
|
+
const finalContentParts = await run.processStream(inputs, config);
|
|
177
|
+
expect(finalContentParts).toBeDefined();
|
|
178
|
+
const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
|
|
179
|
+
expect(allTextParts).toBe(true);
|
|
180
|
+
expect(collectedUsage.length).toBeGreaterThan(0);
|
|
181
|
+
expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
|
|
182
|
+
expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
|
|
183
|
+
|
|
184
|
+
const finalMessages = run.getRunMessages();
|
|
185
|
+
expect(finalMessages).toBeDefined();
|
|
186
|
+
expect(finalMessages?.length).toBeGreaterThan(0);
|
|
187
|
+
console.log(`${capitalizeFirstLetter(provider)} follow-up message:`, finalMessages?.[finalMessages.length - 1]?.content);
|
|
188
|
+
|
|
189
|
+
expect(onMessageDeltaSpy).toHaveBeenCalled();
|
|
190
|
+
expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
|
|
191
|
+
|
|
192
|
+
expect(onRunStepSpy).toHaveBeenCalled();
|
|
193
|
+
expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
test('should handle errors appropriately', async () => {
|
|
197
|
+
// Test error scenarios
|
|
198
|
+
await expect(async () => {
|
|
199
|
+
await run.processStream({
|
|
200
|
+
messages: [],
|
|
201
|
+
}, {} as any);
|
|
202
|
+
}).rejects.toThrow();
|
|
203
|
+
});
|
|
204
|
+
});
|