@librechat/agents 2.0.4 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/dist/cjs/common/enum.cjs +1 -0
  2. package/dist/cjs/common/enum.cjs.map +1 -1
  3. package/dist/cjs/events.cjs +10 -0
  4. package/dist/cjs/events.cjs.map +1 -1
  5. package/dist/cjs/graphs/Graph.cjs +38 -2
  6. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  7. package/dist/cjs/llm/anthropic/llm.cjs +1 -3
  8. package/dist/cjs/llm/anthropic/llm.cjs.map +1 -1
  9. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  10. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  11. package/dist/cjs/llm/fake.cjs +55 -0
  12. package/dist/cjs/llm/fake.cjs.map +1 -0
  13. package/dist/cjs/llm/providers.cjs +7 -5
  14. package/dist/cjs/llm/providers.cjs.map +1 -1
  15. package/dist/cjs/llm/text.cjs.map +1 -1
  16. package/dist/cjs/messages.cjs.map +1 -1
  17. package/dist/cjs/run.cjs.map +1 -1
  18. package/dist/cjs/splitStream.cjs.map +1 -1
  19. package/dist/cjs/stream.cjs +97 -20
  20. package/dist/cjs/stream.cjs.map +1 -1
  21. package/dist/cjs/tools/CodeExecutor.cjs +8 -2
  22. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
  23. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  24. package/dist/cjs/utils/graph.cjs.map +1 -1
  25. package/dist/cjs/utils/llm.cjs.map +1 -1
  26. package/dist/cjs/utils/misc.cjs.map +1 -1
  27. package/dist/cjs/utils/run.cjs.map +1 -1
  28. package/dist/cjs/utils/title.cjs.map +1 -1
  29. package/dist/esm/common/enum.mjs +1 -0
  30. package/dist/esm/common/enum.mjs.map +1 -1
  31. package/dist/esm/events.mjs +10 -0
  32. package/dist/esm/events.mjs.map +1 -1
  33. package/dist/esm/graphs/Graph.mjs +39 -3
  34. package/dist/esm/graphs/Graph.mjs.map +1 -1
  35. package/dist/esm/llm/anthropic/llm.mjs +1 -3
  36. package/dist/esm/llm/anthropic/llm.mjs.map +1 -1
  37. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  38. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  39. package/dist/esm/llm/fake.mjs +52 -0
  40. package/dist/esm/llm/fake.mjs.map +1 -0
  41. package/dist/esm/llm/providers.mjs +8 -6
  42. package/dist/esm/llm/providers.mjs.map +1 -1
  43. package/dist/esm/llm/text.mjs.map +1 -1
  44. package/dist/esm/messages.mjs.map +1 -1
  45. package/dist/esm/run.mjs.map +1 -1
  46. package/dist/esm/splitStream.mjs.map +1 -1
  47. package/dist/esm/stream.mjs +98 -21
  48. package/dist/esm/stream.mjs.map +1 -1
  49. package/dist/esm/tools/CodeExecutor.mjs +9 -3
  50. package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
  51. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  52. package/dist/esm/utils/graph.mjs.map +1 -1
  53. package/dist/esm/utils/llm.mjs.map +1 -1
  54. package/dist/esm/utils/misc.mjs.map +1 -1
  55. package/dist/esm/utils/run.mjs.map +1 -1
  56. package/dist/esm/utils/title.mjs.map +1 -1
  57. package/dist/types/common/enum.d.ts +2 -1
  58. package/dist/types/events.d.ts +4 -1
  59. package/dist/types/graphs/Graph.d.ts +9 -1
  60. package/dist/types/llm/fake.d.ts +21 -0
  61. package/dist/types/specs/spec.utils.d.ts +1 -0
  62. package/dist/types/stream.d.ts +9 -13
  63. package/dist/types/types/llm.d.ts +10 -5
  64. package/dist/types/types/stream.d.ts +12 -0
  65. package/package.json +15 -26
  66. package/src/common/enum.ts +1 -0
  67. package/src/events.ts +13 -1
  68. package/src/graphs/Graph.ts +43 -4
  69. package/src/llm/fake.ts +83 -0
  70. package/src/llm/providers.ts +7 -5
  71. package/src/scripts/simple.ts +28 -14
  72. package/src/specs/anthropic.simple.test.ts +204 -0
  73. package/src/specs/openai.simple.test.ts +204 -0
  74. package/src/specs/reasoning.test.ts +165 -0
  75. package/src/specs/spec.utils.ts +3 -0
  76. package/src/stream.ts +104 -36
  77. package/src/tools/CodeExecutor.ts +8 -2
  78. package/src/types/llm.ts +10 -5
  79. package/src/types/stream.ts +14 -1
  80. package/src/utils/llmConfig.ts +17 -1
@@ -0,0 +1,165 @@
1
+ /* eslint-disable no-console */
2
+ /* eslint-disable @typescript-eslint/no-explicit-any */
3
+ // src/scripts/cli.test.ts
4
+ import { config } from 'dotenv';
5
+ config();
6
+ import { HumanMessage, BaseMessage, MessageContentText } from '@langchain/core/messages';
7
+ import type { RunnableConfig } from '@langchain/core/runnables';
8
+ import type { StandardGraph } from '@/graphs';
9
+ import type * as t from '@/types';
10
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
11
+ import { capitalizeFirstLetter } from './spec.utils';
12
+ import { GraphEvents, Providers } from '@/common';
13
+ import { getLLMConfig } from '@/utils/llmConfig';
14
+ import { getArgs } from '@/scripts/args';
15
+ import { Run } from '@/run';
16
+
17
+ const reasoningText = `<think>
18
+ Okay, the user is Jo from New York. I should start by greeting them by name. Let's keep it friendly and open-ended. Maybe mention the weather in New York to make it personal. Then offer help with something specific like plans or questions. Need to keep it concise and welcoming. Check for any typos. Alright, that should work.
19
+ </think>
20
+ Hi Jo! 🌆 How's everything in New York today? Whether you need recommendations for the city, help with a task, or just want to chat, I'm here for it. What's on your mind? 😊`;
21
+
22
+ const provider = 'Reasoning LLM';
23
+ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
24
+ jest.setTimeout(30000);
25
+ let run: Run<t.IState>;
26
+ let contentParts: t.MessageContentComplex[];
27
+ let conversationHistory: BaseMessage[];
28
+ let aggregateContent: t.ContentAggregator;
29
+ let runSteps: Set<string>;
30
+
31
+ const config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string } = {
32
+ configurable: {
33
+ thread_id: 'conversation-num-1',
34
+ },
35
+ streamMode: 'values',
36
+ version: 'v2' as const,
37
+ callbacks: [{
38
+ async handleCustomEvent(event, data, metadata): Promise<void> {
39
+ if (event !== GraphEvents.ON_MESSAGE_DELTA) {
40
+ return;
41
+ }
42
+ const messageDeltaData = data as t.MessageDeltaEvent;
43
+
44
+ // Wait until we see the run step (with timeout for safety)
45
+ const maxAttempts = 50; // 5 seconds total
46
+ let attempts = 0;
47
+ while (!runSteps.has(messageDeltaData.id) && attempts < maxAttempts) {
48
+ await new Promise(resolve => setTimeout(resolve, 100));
49
+ attempts++;
50
+ }
51
+
52
+ if (!runSteps.has(messageDeltaData.id)) {
53
+ console.warn(`Timeout waiting for run step: ${messageDeltaData.id}`);
54
+ }
55
+
56
+ onMessageDeltaSpy(event, data, metadata, run.Graph);
57
+ aggregateContent({ event, data: messageDeltaData });
58
+ },
59
+ }],
60
+ };
61
+
62
+ beforeEach(async () => {
63
+ conversationHistory = [];
64
+ const { contentParts: parts, aggregateContent: ac } = createContentAggregator();
65
+ aggregateContent = ac;
66
+ runSteps = new Set();
67
+ contentParts = parts as t.MessageContentComplex[];
68
+ });
69
+
70
+ afterEach(() => {
71
+ runSteps.clear();
72
+ });
73
+
74
+ const onReasoningDeltaSpy = jest.fn();
75
+ const onMessageDeltaSpy = jest.fn();
76
+ const onRunStepSpy = jest.fn();
77
+
78
+ afterAll(() => {
79
+ onReasoningDeltaSpy.mockReset();
80
+ onMessageDeltaSpy.mockReset();
81
+ onRunStepSpy.mockReset();
82
+ });
83
+
84
+ const setupCustomHandlers = (): Record<string | GraphEvents, t.EventHandler> => ({
85
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
86
+ [GraphEvents.ON_RUN_STEP_COMPLETED]: {
87
+ handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
88
+ aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent; } });
89
+ }
90
+ },
91
+ [GraphEvents.ON_RUN_STEP]: {
92
+ handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData, metadata, graph): void => {
93
+ const runStepData = data as t.RunStep;
94
+ runSteps.add(runStepData.id);
95
+
96
+ onRunStepSpy(event, runStepData, metadata, graph);
97
+ aggregateContent({ event, data: runStepData });
98
+ }
99
+ },
100
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
101
+ handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
102
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
103
+ }
104
+ },
105
+ [GraphEvents.ON_REASONING_DELTA]: {
106
+ handle: (event: GraphEvents.ON_REASONING_DELTA, data: t.StreamEventData, metadata, graph): void => {
107
+ onReasoningDeltaSpy(event, data, metadata, graph);
108
+ aggregateContent({ event, data: data as t.ReasoningDeltaEvent });
109
+ }
110
+ },
111
+ });
112
+
113
+ test(`${capitalizeFirstLetter(provider)}: should process a simple reasoning message`, async () => {
114
+ const { userName, location } = await getArgs();
115
+ const llmConfig = getLLMConfig(Providers.OPENAI);
116
+ const customHandlers = setupCustomHandlers();
117
+
118
+ run = await Run.create<t.IState>({
119
+ runId: 'test-run-id',
120
+ graphConfig: {
121
+ type: 'standard',
122
+ llmConfig,
123
+ instructions: 'You are a friendly AI assistant. Always address the user by their name.',
124
+ additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
125
+ },
126
+ returnContent: true,
127
+ customHandlers,
128
+ });
129
+
130
+ run.Graph?.overrideTestModel([reasoningText], 2);
131
+
132
+ const userMessage = 'hi';
133
+ conversationHistory.push(new HumanMessage(userMessage));
134
+
135
+ const inputs = {
136
+ messages: conversationHistory,
137
+ };
138
+
139
+ await run.processStream(inputs, config);
140
+ expect(contentParts).toBeDefined();
141
+ expect(contentParts.length).toBe(2);
142
+ const reasoningContent = reasoningText.match(/<think>(.*)<\/think>/s)?.[0];
143
+ const content = reasoningText.split(/<\/think>/)[1];
144
+ expect((contentParts[0] as t.ReasoningContentText).think).toBe(reasoningContent);
145
+ expect((contentParts[1] as MessageContentText).text).toBe(content);
146
+
147
+ const finalMessages = run.getRunMessages();
148
+ expect(finalMessages).toBeDefined();
149
+ conversationHistory.push(...finalMessages ?? []);
150
+ expect(conversationHistory.length).toBeGreaterThan(1);
151
+
152
+ expect(onMessageDeltaSpy).toHaveBeenCalled();
153
+ expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
154
+ expect((onMessageDeltaSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
155
+
156
+ expect(onReasoningDeltaSpy).toHaveBeenCalled();
157
+ expect(onReasoningDeltaSpy.mock.calls.length).toBeGreaterThan(1);
158
+ expect((onReasoningDeltaSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
159
+
160
+ expect(onRunStepSpy).toHaveBeenCalled();
161
+ expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
162
+ expect((onRunStepSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
163
+
164
+ });
165
+ });
@@ -0,0 +1,3 @@
1
+ export function capitalizeFirstLetter(string: string): string {
2
+ return string.charAt(0).toUpperCase() + string.slice(1);
3
+ }
package/src/stream.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  // src/stream.ts
2
2
  import { nanoid } from 'nanoid';
3
3
  import type { AIMessageChunk } from '@langchain/core/messages';
4
- import type { ToolCall } from '@langchain/core/messages/tool';
4
+ import type { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';
5
5
  import type { Graph } from '@/graphs';
6
6
  import type * as t from '@/types';
7
7
  import { StepTypes, ContentTypes, GraphEvents, ToolCallTypes } from '@/common';
@@ -83,6 +83,7 @@ export const handleToolCalls = (toolCalls?: ToolCall[], metadata?: Record<string
83
83
  /* If the previous step exists and is a message creation */
84
84
  if (prevStepId && prevRunStep && prevRunStep.type === StepTypes.MESSAGE_CREATION) {
85
85
  dispatchToolCallIds(prevStepId);
86
+ graph.messageStepHasToolCalls.set(prevStepId, true);
86
87
  /* If the previous step doesn't exist or is not a message creation */
87
88
  } else if (!prevRunStep || prevRunStep.type !== StepTypes.MESSAGE_CREATION) {
88
89
  const messageId = getMessageId(stepKey, graph, true) ?? '';
@@ -93,6 +94,7 @@ export const handleToolCalls = (toolCalls?: ToolCall[], metadata?: Record<string
93
94
  },
94
95
  });
95
96
  dispatchToolCallIds(stepId);
97
+ graph.messageStepHasToolCalls.set(prevStepId, true);
96
98
  }
97
99
  graph.dispatchRunStep(stepKey, {
98
100
  type: StepTypes.TOOL_CALLS,
@@ -105,27 +107,25 @@ export class ChatModelStreamHandler implements t.EventHandler {
105
107
  if (!graph) {
106
108
  throw new Error('Graph not found');
107
109
  }
108
-
109
- const chunk = data.chunk as AIMessageChunk | undefined;
110
- const content = chunk?.content;
111
-
112
110
  if (!graph.config) {
113
111
  throw new Error('Config not found in graph');
114
112
  }
115
-
116
- if (!chunk) {
113
+ if (!data.chunk) {
117
114
  console.warn(`No chunk found in ${event} event`);
118
115
  return;
119
116
  }
120
117
 
121
- let hasToolCalls = false;
122
- const hasToolCallChunks = (chunk.tool_call_chunks && chunk.tool_call_chunks.length > 0) ?? false;
118
+ const chunk = data.chunk as Partial<AIMessageChunk>;
119
+ const content = (chunk.additional_kwargs?.[graph.reasoningKey] as string | undefined) ?? chunk.content;
120
+ this.handleReasoning(chunk, graph);
123
121
 
122
+ let hasToolCalls = false;
124
123
  if (chunk.tool_calls && chunk.tool_calls.length > 0 && chunk.tool_calls.every((tc) => tc.id)) {
125
124
  hasToolCalls = true;
126
125
  handleToolCalls(chunk.tool_calls, metadata, graph);
127
126
  }
128
127
 
128
+ const hasToolCallChunks = (chunk.tool_call_chunks && chunk.tool_call_chunks.length > 0) ?? false;
129
129
  const isEmptyContent = typeof content === 'undefined' || !content.length || typeof content === 'string' && !content;
130
130
  const isEmptyChunk = isEmptyContent && !hasToolCallChunks;
131
131
  const chunkId = chunk.id ?? '';
@@ -149,13 +149,7 @@ export class ChatModelStreamHandler implements t.EventHandler {
149
149
  && chunk.tool_call_chunks
150
150
  && chunk.tool_call_chunks.length
151
151
  && typeof chunk.tool_call_chunks[0]?.index === 'number') {
152
- const prevStepId = graph.getStepIdByKey(stepKey, graph.contentData.length - 1);
153
- const prevRunStep = graph.getRunStep(prevStepId);
154
- const stepId = graph.getStepIdByKey(stepKey, prevRunStep?.index);
155
- graph.dispatchRunStepDelta(stepId, {
156
- type: StepTypes.TOOL_CALLS,
157
- tool_calls: chunk.tool_call_chunks,
158
- });
152
+ this.handleToolCallChunks({ graph, stepKey, toolCallChunks: chunk.tool_call_chunks });
159
153
  }
160
154
 
161
155
  if (isEmptyContent) {
@@ -200,32 +194,106 @@ hasToolCallChunks: ${hasToolCallChunks}
200
194
  } else if (hasToolCallChunks && (chunk.tool_call_chunks?.some((tc) => tc.args === content) ?? false)) {
201
195
  return;
202
196
  } else if (typeof content === 'string') {
197
+ if (graph.currentTokenType === ContentTypes.TEXT) {
198
+ graph.dispatchMessageDelta(stepId, {
199
+ content: [{
200
+ type: ContentTypes.TEXT,
201
+ text: content,
202
+ }],
203
+ });
204
+ } else {
205
+ graph.dispatchReasoningDelta(stepId, {
206
+ content: [{
207
+ type: ContentTypes.THINK,
208
+ think: content,
209
+ }],
210
+ });
211
+ }
212
+ } else if (content.every((c) => c.type?.startsWith(ContentTypes.TEXT))) {
203
213
  graph.dispatchMessageDelta(stepId, {
214
+ content,
215
+ });
216
+ }
217
+ }
218
+ handleToolCallChunks = ({
219
+ graph,
220
+ stepKey,
221
+ toolCallChunks,
222
+ }: {
223
+ graph: Graph;
224
+ stepKey: string;
225
+ toolCallChunks: ToolCallChunk[],
226
+ }): void => {
227
+ const prevStepId = graph.getStepIdByKey(stepKey, graph.contentData.length - 1);
228
+ const prevRunStep = graph.getRunStep(prevStepId);
229
+ const _stepId = graph.getStepIdByKey(stepKey, prevRunStep?.index);
230
+ /** Edge Case: Tool Call Run Step or `tool_call_ids` never dispatched */
231
+ const tool_calls: ToolCall[] | undefined =
232
+ prevStepId && prevRunStep && prevRunStep.type === StepTypes.MESSAGE_CREATION
233
+ ? []
234
+ : undefined;
235
+ /** Edge Case: `id` and `name` fields cannot be empty strings */
236
+ for (const toolCallChunk of toolCallChunks) {
237
+ if (toolCallChunk.name === '') {
238
+ toolCallChunk.name = undefined;
239
+ }
240
+ if (toolCallChunk.id === '') {
241
+ toolCallChunk.id = undefined;
242
+ } else if (tool_calls != null && toolCallChunk.id != null && toolCallChunk.name != null) {
243
+ tool_calls.push({
244
+ args: {},
245
+ id: toolCallChunk.id,
246
+ name: toolCallChunk.name,
247
+ type: ToolCallTypes.TOOL_CALL,
248
+ });
249
+ }
250
+ }
251
+
252
+ let stepId: string = _stepId;
253
+ const alreadyDispatched = prevRunStep?.type === StepTypes.MESSAGE_CREATION && graph.messageStepHasToolCalls.has(prevStepId);
254
+ if (!alreadyDispatched && tool_calls?.length === toolCallChunks.length) {
255
+ graph.dispatchMessageDelta(prevStepId, {
204
256
  content: [{
205
- type: 'text',
206
- text: content,
257
+ type: ContentTypes.TEXT,
258
+ text: '',
259
+ tool_call_ids: tool_calls.map((tc) => tc.id ?? ''),
207
260
  }],
208
261
  });
209
- } else if (content.every((c) => c.type?.startsWith('text'))) {
210
- graph.dispatchMessageDelta(stepId, {
211
- content,
262
+ graph.messageStepHasToolCalls.set(prevStepId, true);
263
+ stepId = graph.dispatchRunStep(stepKey, {
264
+ type: StepTypes.TOOL_CALLS,
265
+ tool_calls,
212
266
  });
213
267
  }
268
+ graph.dispatchRunStepDelta(stepId, {
269
+ type: StepTypes.TOOL_CALLS,
270
+ tool_calls: toolCallChunks,
271
+ });
272
+ };
273
+ handleReasoning(chunk: Partial<AIMessageChunk>, graph: Graph): void {
274
+ const reasoning_content = chunk.additional_kwargs?.[graph.reasoningKey] as string | undefined;
275
+ if (reasoning_content != null && reasoning_content && (chunk.content == null || chunk.content === '')) {
276
+ graph.currentTokenType = ContentTypes.THINK;
277
+ graph.tokenTypeSwitch = 'reasoning';
278
+ return;
279
+ } else if (graph.tokenTypeSwitch === 'reasoning' && graph.currentTokenType !== ContentTypes.TEXT && chunk.content != null && chunk.content !== '') {
280
+ graph.currentTokenType = ContentTypes.TEXT;
281
+ graph.tokenTypeSwitch = 'content';
282
+ } else if (chunk.content != null && typeof chunk.content === 'string' && chunk.content.includes('<think>')) {
283
+ graph.currentTokenType = ContentTypes.THINK;
284
+ graph.tokenTypeSwitch = 'content';
285
+ } else if (graph.lastToken != null && graph.lastToken.includes('</think>')) {
286
+ graph.currentTokenType = ContentTypes.TEXT;
287
+ graph.tokenTypeSwitch = 'content';
288
+ }
289
+ if (typeof chunk.content !== 'string') {
290
+ return;
291
+ }
292
+ graph.lastToken = chunk.content;
214
293
  }
215
294
  }
216
295
 
217
- export type ContentAggregatorResult = {
218
- stepMap: Map<string, t.RunStep | undefined>;
219
- contentParts: Array<t.MessageContentComplex | undefined>;
220
- aggregateContent: ({ event, data }: {
221
- event: GraphEvents;
222
- data: t.RunStep | t.MessageDeltaEvent | t.RunStepDeltaEvent | {
223
- result: t.ToolEndEvent;
224
- };
225
- }) => void
226
- };
227
-
228
- export function createContentAggregator(): ContentAggregatorResult {
296
+ export function createContentAggregator(): t.ContentAggregatorResult {
229
297
  const contentParts: Array<t.MessageContentComplex | undefined> = [];
230
298
  const stepMap = new Map<string, t.RunStep>();
231
299
  const toolCallIdMap = new Map<string, string>();
@@ -349,7 +417,7 @@ export function createContentAggregator(): ContentAggregatorResult {
349
417
  const reasoningDelta = data as t.ReasoningDeltaEvent;
350
418
  const runStep = stepMap.get(reasoningDelta.id);
351
419
  if (!runStep) {
352
- console.warn('No run step or runId found for message delta event');
420
+ console.warn('No run step or runId found for reasoning delta event');
353
421
  return;
354
422
  }
355
423
 
@@ -374,13 +442,13 @@ export function createContentAggregator(): ContentAggregatorResult {
374
442
  ) {
375
443
 
376
444
  runStepDelta.delta.tool_calls.forEach((toolCallDelta) => {
377
- const toolCallId = toolCallIdMap.get(runStepDelta.id) ?? '';
445
+ const toolCallId = toolCallIdMap.get(runStepDelta.id);
378
446
 
379
447
  const contentPart: t.MessageContentComplex = {
380
448
  type: ContentTypes.TOOL_CALL,
381
449
  tool_call: {
382
- name: toolCallDelta.name ?? '',
383
450
  args: toolCallDelta.args ?? '',
451
+ name: toolCallDelta.name,
384
452
  id: toolCallId,
385
453
  },
386
454
  };
@@ -1,5 +1,7 @@
1
1
  import { z } from 'zod';
2
2
  import { config } from 'dotenv';
3
+ import fetch, { RequestInit } from 'node-fetch';
4
+ import { HttpsProxyAgent } from 'https-proxy-agent';
3
5
  import { tool, DynamicStructuredTool } from '@langchain/core/tools';
4
6
  import { getEnvironmentVariable } from '@langchain/core/utils/env';
5
7
  import type * as t from '@/types';
@@ -71,7 +73,7 @@ Usage:
71
73
  };
72
74
 
73
75
  try {
74
- const response = await fetch(EXEC_ENDPOINT, {
76
+ const fetchOptions: RequestInit = {
75
77
  method: 'POST',
76
78
  headers: {
77
79
  'Content-Type': 'application/json',
@@ -79,8 +81,12 @@ Usage:
79
81
  'X-API-Key': apiKey,
80
82
  },
81
83
  body: JSON.stringify(postData),
82
- });
84
+ };
83
85
 
86
+ if (process.env.PROXY != null && process.env.PROXY !== '') {
87
+ fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
88
+ }
89
+ const response = await fetch(EXEC_ENDPOINT, fetchOptions);
84
90
  if (!response.ok) {
85
91
  throw new Error(`HTTP error! status: ${response.status}`);
86
92
  }
package/src/types/llm.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  // src/types/llm.ts
2
2
  import { ChatOllama } from '@langchain/ollama';
3
+ import { ChatDeepSeek } from '@langchain/deepseek';
3
4
  import { ChatAnthropic } from '@langchain/anthropic';
4
5
  import { ChatMistralAI } from '@langchain/mistralai';
5
6
  import { ChatBedrockConverse } from '@langchain/aws';
@@ -12,6 +13,7 @@ import type { ChatOpenAIFields, OpenAIChatInput, AzureOpenAIInput } from '@langc
12
13
  import type { BedrockChatFields } from '@langchain/community/chat_models/bedrock/web';
13
14
  import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
14
15
  import type { ChatVertexAIInput } from '@langchain/google-vertexai';
16
+ import type { ChatDeepSeekCallOptions } from '@langchain/deepseek';
15
17
  import type { ChatBedrockConverseInput } from '@langchain/aws';
16
18
  import type { ChatMistralAIInput } from '@langchain/mistralai';
17
19
  import type { StructuredTool } from '@langchain/core/tools';
@@ -41,32 +43,35 @@ export type VertexAIClientOptions = ChatVertexAIInput;
41
43
  export type BedrockClientOptions = BedrockChatFields;
42
44
  export type BedrockConverseClientOptions = ChatBedrockConverseInput;
43
45
  export type GoogleClientOptions = GoogleGenerativeAIChatInput;
46
+ export type DeepSeekClientOptions = ChatDeepSeekCallOptions;
44
47
 
45
- export type ClientOptions = OpenAIClientOptions | AzureClientOptions | OllamaClientOptions | AnthropicClientOptions | MistralAIClientOptions | VertexAIClientOptions | BedrockClientOptions | BedrockConverseClientOptions | GoogleClientOptions;
48
+ export type ClientOptions = OpenAIClientOptions | AzureClientOptions | OllamaClientOptions | AnthropicClientOptions | MistralAIClientOptions | VertexAIClientOptions | BedrockClientOptions | BedrockConverseClientOptions | GoogleClientOptions | DeepSeekClientOptions;
46
49
 
47
50
  export type LLMConfig = {
48
51
  provider: Providers;
49
52
  } & ClientOptions;
50
53
 
51
54
  export type ProviderOptionsMap = {
52
- [Providers.OPENAI]: OpenAIClientOptions;
53
55
  [Providers.AZURE]: AzureClientOptions;
56
+ [Providers.OPENAI]: OpenAIClientOptions;
54
57
  [Providers.OLLAMA]: OllamaClientOptions;
58
+ [Providers.GOOGLE]: GoogleClientOptions;
59
+ [Providers.VERTEXAI]: VertexAIClientOptions;
60
+ [Providers.DEEPSEEK]: DeepSeekClientOptions;
55
61
  [Providers.ANTHROPIC]: AnthropicClientOptions;
56
62
  [Providers.MISTRALAI]: MistralAIClientOptions;
57
- [Providers.VERTEXAI]: VertexAIClientOptions;
58
63
  [Providers.BEDROCK_LEGACY]: BedrockClientOptions;
59
64
  [Providers.BEDROCK]: BedrockConverseClientOptions;
60
- [Providers.GOOGLE]: GoogleClientOptions;
61
65
  };
62
66
 
63
67
  export type ChatModelMap = {
64
68
  [Providers.OPENAI]: ChatOpenAI;
65
69
  [Providers.OLLAMA]: ChatOllama;
66
70
  [Providers.AZURE]: AzureChatOpenAI;
71
+ [Providers.DEEPSEEK]: ChatDeepSeek;
72
+ [Providers.VERTEXAI]: ChatVertexAI;
67
73
  [Providers.ANTHROPIC]: ChatAnthropic;
68
74
  [Providers.MISTRALAI]: ChatMistralAI;
69
- [Providers.VERTEXAI]: ChatVertexAI;
70
75
  [Providers.BEDROCK_LEGACY]: BedrockChat;
71
76
  [Providers.BEDROCK]: ChatBedrockConverse;
72
77
  [Providers.GOOGLE]: ChatGoogleGenerativeAI;
@@ -3,6 +3,7 @@ import type OpenAITypes from 'openai';
3
3
  import type { MessageContentImageUrl, MessageContentText, ToolMessage, BaseMessage } from '@langchain/core/messages';
4
4
  import type { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';
5
5
  import type { LLMResult, Generation } from '@langchain/core/outputs';
6
+ import type { ToolEndEvent } from '@/types/tools';
6
7
  import { StepTypes, ContentTypes, GraphEvents } from '@/common/enum';
7
8
 
8
9
  export type HandleLLMEnd = (output: LLMResult, runId: string, parentRunId?: string, tags?: string[]) => void;
@@ -239,4 +240,16 @@ export type SplitStreamHandlers = Partial<{
239
240
  [GraphEvents.ON_RUN_STEP]: ({ event, data}: { event: GraphEvents, data: RunStep }) => void;
240
241
  [GraphEvents.ON_MESSAGE_DELTA]: ({ event, data}: { event: GraphEvents, data: MessageDeltaEvent }) => void;
241
242
  [GraphEvents.ON_REASONING_DELTA]: ({ event, data}: { event: GraphEvents, data: ReasoningDeltaEvent }) => void;
242
- }>
243
+ }>
244
+
245
+ export type ContentAggregator = ({ event, data }: {
246
+ event: GraphEvents;
247
+ data: RunStep | MessageDeltaEvent | RunStepDeltaEvent | {
248
+ result: ToolEndEvent;
249
+ };
250
+ }) => void;
251
+ export type ContentAggregatorResult = {
252
+ stepMap: Map<string, RunStep | undefined>;
253
+ contentParts: Array<MessageContentComplex | undefined>;
254
+ aggregateContent: ContentAggregator;
255
+ };
@@ -5,12 +5,22 @@ import type * as t from '@/types';
5
5
  export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
6
6
  [Providers.OPENAI]: {
7
7
  provider: Providers.OPENAI,
8
- model: 'gpt-4o',
8
+ model: 'gpt-4o-mini',
9
9
  temperature: 0.7,
10
10
  streaming: true,
11
11
  streamUsage: true,
12
12
  // disableStreaming: true,
13
13
  },
14
+ alibaba: {
15
+ provider: Providers.OPENAI,
16
+ streaming: true,
17
+ streamUsage: true,
18
+ model: 'qwen-max',
19
+ openAIApiKey: process.env.ALIBABA_API_KEY,
20
+ configuration: {
21
+ baseURL: 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1',
22
+ },
23
+ },
14
24
  [Providers.AZURE]: {
15
25
  provider: Providers.AZURE,
16
26
  temperature: 0.7,
@@ -29,6 +39,12 @@ export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
29
39
  streamUsage: true,
30
40
  baseUrl: 'http://host.docker.internal:11434'
31
41
  },
42
+ [Providers.DEEPSEEK]: {
43
+ provider: Providers.DEEPSEEK,
44
+ model: 'deepseek-reasoner',
45
+ streaming: true,
46
+ streamUsage: true,
47
+ },
32
48
  [Providers.ANTHROPIC]: {
33
49
  provider: Providers.ANTHROPIC,
34
50
  model: 'claude-3-5-sonnet-20240620',