@librechat/agents 2.0.5 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/dist/cjs/common/enum.cjs +1 -0
  2. package/dist/cjs/common/enum.cjs.map +1 -1
  3. package/dist/cjs/events.cjs +10 -0
  4. package/dist/cjs/events.cjs.map +1 -1
  5. package/dist/cjs/graphs/Graph.cjs +27 -1
  6. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  7. package/dist/cjs/llm/anthropic/llm.cjs +1 -3
  8. package/dist/cjs/llm/anthropic/llm.cjs.map +1 -1
  9. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  10. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  11. package/dist/cjs/llm/fake.cjs +55 -0
  12. package/dist/cjs/llm/fake.cjs.map +1 -0
  13. package/dist/cjs/llm/providers.cjs +7 -5
  14. package/dist/cjs/llm/providers.cjs.map +1 -1
  15. package/dist/cjs/llm/text.cjs.map +1 -1
  16. package/dist/cjs/messages.cjs.map +1 -1
  17. package/dist/cjs/run.cjs.map +1 -1
  18. package/dist/cjs/splitStream.cjs.map +1 -1
  19. package/dist/cjs/stream.cjs +93 -55
  20. package/dist/cjs/stream.cjs.map +1 -1
  21. package/dist/cjs/tools/CodeExecutor.cjs +8 -2
  22. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
  23. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  24. package/dist/cjs/utils/graph.cjs.map +1 -1
  25. package/dist/cjs/utils/llm.cjs.map +1 -1
  26. package/dist/cjs/utils/misc.cjs.map +1 -1
  27. package/dist/cjs/utils/run.cjs.map +1 -1
  28. package/dist/cjs/utils/title.cjs.map +1 -1
  29. package/dist/esm/common/enum.mjs +1 -0
  30. package/dist/esm/common/enum.mjs.map +1 -1
  31. package/dist/esm/events.mjs +10 -0
  32. package/dist/esm/events.mjs.map +1 -1
  33. package/dist/esm/graphs/Graph.mjs +28 -2
  34. package/dist/esm/graphs/Graph.mjs.map +1 -1
  35. package/dist/esm/llm/anthropic/llm.mjs +1 -3
  36. package/dist/esm/llm/anthropic/llm.mjs.map +1 -1
  37. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  38. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  39. package/dist/esm/llm/fake.mjs +52 -0
  40. package/dist/esm/llm/fake.mjs.map +1 -0
  41. package/dist/esm/llm/providers.mjs +8 -6
  42. package/dist/esm/llm/providers.mjs.map +1 -1
  43. package/dist/esm/llm/text.mjs.map +1 -1
  44. package/dist/esm/messages.mjs.map +1 -1
  45. package/dist/esm/run.mjs.map +1 -1
  46. package/dist/esm/splitStream.mjs.map +1 -1
  47. package/dist/esm/stream.mjs +94 -56
  48. package/dist/esm/stream.mjs.map +1 -1
  49. package/dist/esm/tools/CodeExecutor.mjs +9 -3
  50. package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
  51. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  52. package/dist/esm/utils/graph.mjs.map +1 -1
  53. package/dist/esm/utils/llm.mjs.map +1 -1
  54. package/dist/esm/utils/misc.mjs.map +1 -1
  55. package/dist/esm/utils/run.mjs.map +1 -1
  56. package/dist/esm/utils/title.mjs.map +1 -1
  57. package/dist/types/common/enum.d.ts +2 -1
  58. package/dist/types/events.d.ts +4 -1
  59. package/dist/types/graphs/Graph.d.ts +8 -1
  60. package/dist/types/llm/fake.d.ts +21 -0
  61. package/dist/types/specs/spec.utils.d.ts +1 -0
  62. package/dist/types/stream.d.ts +9 -13
  63. package/dist/types/types/llm.d.ts +10 -5
  64. package/dist/types/types/stream.d.ts +12 -0
  65. package/package.json +15 -26
  66. package/src/common/enum.ts +1 -0
  67. package/src/events.ts +13 -1
  68. package/src/graphs/Graph.ts +31 -2
  69. package/src/llm/fake.ts +83 -0
  70. package/src/llm/providers.ts +7 -5
  71. package/src/scripts/simple.ts +28 -14
  72. package/src/specs/anthropic.simple.test.ts +204 -0
  73. package/src/specs/openai.simple.test.ts +204 -0
  74. package/src/specs/reasoning.test.ts +165 -0
  75. package/src/specs/spec.utils.ts +3 -0
  76. package/src/stream.ts +100 -72
  77. package/src/tools/CodeExecutor.ts +8 -2
  78. package/src/types/llm.ts +10 -5
  79. package/src/types/stream.ts +14 -1
  80. package/src/utils/llmConfig.ts +7 -1
@@ -0,0 +1,165 @@
1
+ /* eslint-disable no-console */
2
+ /* eslint-disable @typescript-eslint/no-explicit-any */
3
+ // src/scripts/cli.test.ts
4
+ import { config } from 'dotenv';
5
+ config();
6
+ import { HumanMessage, BaseMessage, MessageContentText } from '@langchain/core/messages';
7
+ import type { RunnableConfig } from '@langchain/core/runnables';
8
+ import type { StandardGraph } from '@/graphs';
9
+ import type * as t from '@/types';
10
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
11
+ import { capitalizeFirstLetter } from './spec.utils';
12
+ import { GraphEvents, Providers } from '@/common';
13
+ import { getLLMConfig } from '@/utils/llmConfig';
14
+ import { getArgs } from '@/scripts/args';
15
+ import { Run } from '@/run';
16
+
17
+ const reasoningText = `<think>
18
+ Okay, the user is Jo from New York. I should start by greeting them by name. Let's keep it friendly and open-ended. Maybe mention the weather in New York to make it personal. Then offer help with something specific like plans or questions. Need to keep it concise and welcoming. Check for any typos. Alright, that should work.
19
+ </think>
20
+ Hi Jo! 🌆 How's everything in New York today? Whether you need recommendations for the city, help with a task, or just want to chat, I'm here for it. What's on your mind? 😊`;
21
+
22
+ const provider = 'Reasoning LLM';
23
+ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
24
+ jest.setTimeout(30000);
25
+ let run: Run<t.IState>;
26
+ let contentParts: t.MessageContentComplex[];
27
+ let conversationHistory: BaseMessage[];
28
+ let aggregateContent: t.ContentAggregator;
29
+ let runSteps: Set<string>;
30
+
31
+ const config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string } = {
32
+ configurable: {
33
+ thread_id: 'conversation-num-1',
34
+ },
35
+ streamMode: 'values',
36
+ version: 'v2' as const,
37
+ callbacks: [{
38
+ async handleCustomEvent(event, data, metadata): Promise<void> {
39
+ if (event !== GraphEvents.ON_MESSAGE_DELTA) {
40
+ return;
41
+ }
42
+ const messageDeltaData = data as t.MessageDeltaEvent;
43
+
44
+ // Wait until we see the run step (with timeout for safety)
45
+ const maxAttempts = 50; // 5 seconds total
46
+ let attempts = 0;
47
+ while (!runSteps.has(messageDeltaData.id) && attempts < maxAttempts) {
48
+ await new Promise(resolve => setTimeout(resolve, 100));
49
+ attempts++;
50
+ }
51
+
52
+ if (!runSteps.has(messageDeltaData.id)) {
53
+ console.warn(`Timeout waiting for run step: ${messageDeltaData.id}`);
54
+ }
55
+
56
+ onMessageDeltaSpy(event, data, metadata, run.Graph);
57
+ aggregateContent({ event, data: messageDeltaData });
58
+ },
59
+ }],
60
+ };
61
+
62
+ beforeEach(async () => {
63
+ conversationHistory = [];
64
+ const { contentParts: parts, aggregateContent: ac } = createContentAggregator();
65
+ aggregateContent = ac;
66
+ runSteps = new Set();
67
+ contentParts = parts as t.MessageContentComplex[];
68
+ });
69
+
70
+ afterEach(() => {
71
+ runSteps.clear();
72
+ });
73
+
74
+ const onReasoningDeltaSpy = jest.fn();
75
+ const onMessageDeltaSpy = jest.fn();
76
+ const onRunStepSpy = jest.fn();
77
+
78
+ afterAll(() => {
79
+ onReasoningDeltaSpy.mockReset();
80
+ onMessageDeltaSpy.mockReset();
81
+ onRunStepSpy.mockReset();
82
+ });
83
+
84
+ const setupCustomHandlers = (): Record<string | GraphEvents, t.EventHandler> => ({
85
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
86
+ [GraphEvents.ON_RUN_STEP_COMPLETED]: {
87
+ handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
88
+ aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent; } });
89
+ }
90
+ },
91
+ [GraphEvents.ON_RUN_STEP]: {
92
+ handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData, metadata, graph): void => {
93
+ const runStepData = data as t.RunStep;
94
+ runSteps.add(runStepData.id);
95
+
96
+ onRunStepSpy(event, runStepData, metadata, graph);
97
+ aggregateContent({ event, data: runStepData });
98
+ }
99
+ },
100
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
101
+ handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
102
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
103
+ }
104
+ },
105
+ [GraphEvents.ON_REASONING_DELTA]: {
106
+ handle: (event: GraphEvents.ON_REASONING_DELTA, data: t.StreamEventData, metadata, graph): void => {
107
+ onReasoningDeltaSpy(event, data, metadata, graph);
108
+ aggregateContent({ event, data: data as t.ReasoningDeltaEvent });
109
+ }
110
+ },
111
+ });
112
+
113
+ test(`${capitalizeFirstLetter(provider)}: should process a simple reasoning message`, async () => {
114
+ const { userName, location } = await getArgs();
115
+ const llmConfig = getLLMConfig(Providers.OPENAI);
116
+ const customHandlers = setupCustomHandlers();
117
+
118
+ run = await Run.create<t.IState>({
119
+ runId: 'test-run-id',
120
+ graphConfig: {
121
+ type: 'standard',
122
+ llmConfig,
123
+ instructions: 'You are a friendly AI assistant. Always address the user by their name.',
124
+ additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
125
+ },
126
+ returnContent: true,
127
+ customHandlers,
128
+ });
129
+
130
+ run.Graph?.overrideTestModel([reasoningText], 2);
131
+
132
+ const userMessage = 'hi';
133
+ conversationHistory.push(new HumanMessage(userMessage));
134
+
135
+ const inputs = {
136
+ messages: conversationHistory,
137
+ };
138
+
139
+ await run.processStream(inputs, config);
140
+ expect(contentParts).toBeDefined();
141
+ expect(contentParts.length).toBe(2);
142
+ const reasoningContent = reasoningText.match(/<think>(.*)<\/think>/s)?.[0];
143
+ const content = reasoningText.split(/<\/think>/)[1];
144
+ expect((contentParts[0] as t.ReasoningContentText).think).toBe(reasoningContent);
145
+ expect((contentParts[1] as MessageContentText).text).toBe(content);
146
+
147
+ const finalMessages = run.getRunMessages();
148
+ expect(finalMessages).toBeDefined();
149
+ conversationHistory.push(...finalMessages ?? []);
150
+ expect(conversationHistory.length).toBeGreaterThan(1);
151
+
152
+ expect(onMessageDeltaSpy).toHaveBeenCalled();
153
+ expect(onMessageDeltaSpy.mock.calls.length).toBeGreaterThan(1);
154
+ expect((onMessageDeltaSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
155
+
156
+ expect(onReasoningDeltaSpy).toHaveBeenCalled();
157
+ expect(onReasoningDeltaSpy.mock.calls.length).toBeGreaterThan(1);
158
+ expect((onReasoningDeltaSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
159
+
160
+ expect(onRunStepSpy).toHaveBeenCalled();
161
+ expect(onRunStepSpy.mock.calls.length).toBeGreaterThan(0);
162
+ expect((onRunStepSpy.mock.calls[0][3] as StandardGraph).provider).toBeDefined();
163
+
164
+ });
165
+ });
@@ -0,0 +1,3 @@
1
+ export function capitalizeFirstLetter(string: string): string {
2
+ return string.charAt(0).toUpperCase() + string.slice(1);
3
+ }
package/src/stream.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  // src/stream.ts
2
2
  import { nanoid } from 'nanoid';
3
3
  import type { AIMessageChunk } from '@langchain/core/messages';
4
- import type { ToolCall } from '@langchain/core/messages/tool';
4
+ import type { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';
5
5
  import type { Graph } from '@/graphs';
6
6
  import type * as t from '@/types';
7
7
  import { StepTypes, ContentTypes, GraphEvents, ToolCallTypes } from '@/common';
@@ -107,27 +107,25 @@ export class ChatModelStreamHandler implements t.EventHandler {
107
107
  if (!graph) {
108
108
  throw new Error('Graph not found');
109
109
  }
110
-
111
- const chunk = data.chunk as AIMessageChunk | undefined;
112
- const content = chunk?.content;
113
-
114
110
  if (!graph.config) {
115
111
  throw new Error('Config not found in graph');
116
112
  }
117
-
118
- if (!chunk) {
113
+ if (!data.chunk) {
119
114
  console.warn(`No chunk found in ${event} event`);
120
115
  return;
121
116
  }
122
117
 
123
- let hasToolCalls = false;
124
- const hasToolCallChunks = (chunk.tool_call_chunks && chunk.tool_call_chunks.length > 0) ?? false;
118
+ const chunk = data.chunk as Partial<AIMessageChunk>;
119
+ const content = (chunk.additional_kwargs?.[graph.reasoningKey] as string | undefined) ?? chunk.content;
120
+ this.handleReasoning(chunk, graph);
125
121
 
122
+ let hasToolCalls = false;
126
123
  if (chunk.tool_calls && chunk.tool_calls.length > 0 && chunk.tool_calls.every((tc) => tc.id)) {
127
124
  hasToolCalls = true;
128
125
  handleToolCalls(chunk.tool_calls, metadata, graph);
129
126
  }
130
127
 
128
+ const hasToolCallChunks = (chunk.tool_call_chunks && chunk.tool_call_chunks.length > 0) ?? false;
131
129
  const isEmptyContent = typeof content === 'undefined' || !content.length || typeof content === 'string' && !content;
132
130
  const isEmptyChunk = isEmptyContent && !hasToolCallChunks;
133
131
  const chunkId = chunk.id ?? '';
@@ -151,51 +149,7 @@ export class ChatModelStreamHandler implements t.EventHandler {
151
149
  && chunk.tool_call_chunks
152
150
  && chunk.tool_call_chunks.length
153
151
  && typeof chunk.tool_call_chunks[0]?.index === 'number') {
154
- const prevStepId = graph.getStepIdByKey(stepKey, graph.contentData.length - 1);
155
- const prevRunStep = graph.getRunStep(prevStepId);
156
- const _stepId = graph.getStepIdByKey(stepKey, prevRunStep?.index);
157
- /** Edge Case: Tool Call Run Step or `tool_call_ids` never dispatched */
158
- const tool_calls: ToolCall[] | undefined =
159
- prevStepId && prevRunStep && prevRunStep.type === StepTypes.MESSAGE_CREATION
160
- ? []
161
- : undefined;
162
- /** Edge Case: `id` and `name` fields cannot be empty strings */
163
- for (const toolCallChunk of chunk.tool_call_chunks) {
164
- if (toolCallChunk.name === '') {
165
- toolCallChunk.name = undefined;
166
- }
167
- if (toolCallChunk.id === '') {
168
- toolCallChunk.id = undefined;
169
- } else if (tool_calls != null && toolCallChunk.id != null && toolCallChunk.name != null) {
170
- tool_calls.push({
171
- args: {},
172
- id: toolCallChunk.id,
173
- name: toolCallChunk.name,
174
- type: ToolCallTypes.TOOL_CALL,
175
- });
176
- }
177
- }
178
-
179
- let stepId: string = _stepId;
180
- const alreadyDispatched = prevRunStep?.type === StepTypes.MESSAGE_CREATION && graph.messageStepHasToolCalls.has(prevStepId);
181
- if (!alreadyDispatched && tool_calls?.length === chunk.tool_call_chunks.length) {
182
- graph.dispatchMessageDelta(prevStepId, {
183
- content: [{
184
- type: 'text',
185
- text: '',
186
- tool_call_ids: tool_calls.map((tc) => tc.id ?? ''),
187
- }],
188
- });
189
- graph.messageStepHasToolCalls.set(prevStepId, true);
190
- stepId = graph.dispatchRunStep(stepKey, {
191
- type: StepTypes.TOOL_CALLS,
192
- tool_calls,
193
- });
194
- }
195
- graph.dispatchRunStepDelta(stepId, {
196
- type: StepTypes.TOOL_CALLS,
197
- tool_calls: chunk.tool_call_chunks,
198
- });
152
+ this.handleToolCallChunks({ graph, stepKey, toolCallChunks: chunk.tool_call_chunks });
199
153
  }
200
154
 
201
155
  if (isEmptyContent) {
@@ -240,32 +194,106 @@ hasToolCallChunks: ${hasToolCallChunks}
240
194
  } else if (hasToolCallChunks && (chunk.tool_call_chunks?.some((tc) => tc.args === content) ?? false)) {
241
195
  return;
242
196
  } else if (typeof content === 'string') {
197
+ if (graph.currentTokenType === ContentTypes.TEXT) {
198
+ graph.dispatchMessageDelta(stepId, {
199
+ content: [{
200
+ type: ContentTypes.TEXT,
201
+ text: content,
202
+ }],
203
+ });
204
+ } else {
205
+ graph.dispatchReasoningDelta(stepId, {
206
+ content: [{
207
+ type: ContentTypes.THINK,
208
+ think: content,
209
+ }],
210
+ });
211
+ }
212
+ } else if (content.every((c) => c.type?.startsWith(ContentTypes.TEXT))) {
243
213
  graph.dispatchMessageDelta(stepId, {
214
+ content,
215
+ });
216
+ }
217
+ }
218
+ handleToolCallChunks = ({
219
+ graph,
220
+ stepKey,
221
+ toolCallChunks,
222
+ }: {
223
+ graph: Graph;
224
+ stepKey: string;
225
+ toolCallChunks: ToolCallChunk[],
226
+ }): void => {
227
+ const prevStepId = graph.getStepIdByKey(stepKey, graph.contentData.length - 1);
228
+ const prevRunStep = graph.getRunStep(prevStepId);
229
+ const _stepId = graph.getStepIdByKey(stepKey, prevRunStep?.index);
230
+ /** Edge Case: Tool Call Run Step or `tool_call_ids` never dispatched */
231
+ const tool_calls: ToolCall[] | undefined =
232
+ prevStepId && prevRunStep && prevRunStep.type === StepTypes.MESSAGE_CREATION
233
+ ? []
234
+ : undefined;
235
+ /** Edge Case: `id` and `name` fields cannot be empty strings */
236
+ for (const toolCallChunk of toolCallChunks) {
237
+ if (toolCallChunk.name === '') {
238
+ toolCallChunk.name = undefined;
239
+ }
240
+ if (toolCallChunk.id === '') {
241
+ toolCallChunk.id = undefined;
242
+ } else if (tool_calls != null && toolCallChunk.id != null && toolCallChunk.name != null) {
243
+ tool_calls.push({
244
+ args: {},
245
+ id: toolCallChunk.id,
246
+ name: toolCallChunk.name,
247
+ type: ToolCallTypes.TOOL_CALL,
248
+ });
249
+ }
250
+ }
251
+
252
+ let stepId: string = _stepId;
253
+ const alreadyDispatched = prevRunStep?.type === StepTypes.MESSAGE_CREATION && graph.messageStepHasToolCalls.has(prevStepId);
254
+ if (!alreadyDispatched && tool_calls?.length === toolCallChunks.length) {
255
+ graph.dispatchMessageDelta(prevStepId, {
244
256
  content: [{
245
- type: 'text',
246
- text: content,
257
+ type: ContentTypes.TEXT,
258
+ text: '',
259
+ tool_call_ids: tool_calls.map((tc) => tc.id ?? ''),
247
260
  }],
248
261
  });
249
- } else if (content.every((c) => c.type?.startsWith('text'))) {
250
- graph.dispatchMessageDelta(stepId, {
251
- content,
262
+ graph.messageStepHasToolCalls.set(prevStepId, true);
263
+ stepId = graph.dispatchRunStep(stepKey, {
264
+ type: StepTypes.TOOL_CALLS,
265
+ tool_calls,
252
266
  });
253
267
  }
268
+ graph.dispatchRunStepDelta(stepId, {
269
+ type: StepTypes.TOOL_CALLS,
270
+ tool_calls: toolCallChunks,
271
+ });
272
+ };
273
+ handleReasoning(chunk: Partial<AIMessageChunk>, graph: Graph): void {
274
+ const reasoning_content = chunk.additional_kwargs?.[graph.reasoningKey] as string | undefined;
275
+ if (reasoning_content != null && reasoning_content && (chunk.content == null || chunk.content === '')) {
276
+ graph.currentTokenType = ContentTypes.THINK;
277
+ graph.tokenTypeSwitch = 'reasoning';
278
+ return;
279
+ } else if (graph.tokenTypeSwitch === 'reasoning' && graph.currentTokenType !== ContentTypes.TEXT && chunk.content != null && chunk.content !== '') {
280
+ graph.currentTokenType = ContentTypes.TEXT;
281
+ graph.tokenTypeSwitch = 'content';
282
+ } else if (chunk.content != null && typeof chunk.content === 'string' && chunk.content.includes('<think>')) {
283
+ graph.currentTokenType = ContentTypes.THINK;
284
+ graph.tokenTypeSwitch = 'content';
285
+ } else if (graph.lastToken != null && graph.lastToken.includes('</think>')) {
286
+ graph.currentTokenType = ContentTypes.TEXT;
287
+ graph.tokenTypeSwitch = 'content';
288
+ }
289
+ if (typeof chunk.content !== 'string') {
290
+ return;
291
+ }
292
+ graph.lastToken = chunk.content;
254
293
  }
255
294
  }
256
295
 
257
- export type ContentAggregatorResult = {
258
- stepMap: Map<string, t.RunStep | undefined>;
259
- contentParts: Array<t.MessageContentComplex | undefined>;
260
- aggregateContent: ({ event, data }: {
261
- event: GraphEvents;
262
- data: t.RunStep | t.MessageDeltaEvent | t.RunStepDeltaEvent | {
263
- result: t.ToolEndEvent;
264
- };
265
- }) => void
266
- };
267
-
268
- export function createContentAggregator(): ContentAggregatorResult {
296
+ export function createContentAggregator(): t.ContentAggregatorResult {
269
297
  const contentParts: Array<t.MessageContentComplex | undefined> = [];
270
298
  const stepMap = new Map<string, t.RunStep>();
271
299
  const toolCallIdMap = new Map<string, string>();
@@ -389,7 +417,7 @@ export function createContentAggregator(): ContentAggregatorResult {
389
417
  const reasoningDelta = data as t.ReasoningDeltaEvent;
390
418
  const runStep = stepMap.get(reasoningDelta.id);
391
419
  if (!runStep) {
392
- console.warn('No run step or runId found for message delta event');
420
+ console.warn('No run step or runId found for reasoning delta event');
393
421
  return;
394
422
  }
395
423
 
@@ -1,5 +1,7 @@
1
1
  import { z } from 'zod';
2
2
  import { config } from 'dotenv';
3
+ import fetch, { RequestInit } from 'node-fetch';
4
+ import { HttpsProxyAgent } from 'https-proxy-agent';
3
5
  import { tool, DynamicStructuredTool } from '@langchain/core/tools';
4
6
  import { getEnvironmentVariable } from '@langchain/core/utils/env';
5
7
  import type * as t from '@/types';
@@ -71,7 +73,7 @@ Usage:
71
73
  };
72
74
 
73
75
  try {
74
- const response = await fetch(EXEC_ENDPOINT, {
76
+ const fetchOptions: RequestInit = {
75
77
  method: 'POST',
76
78
  headers: {
77
79
  'Content-Type': 'application/json',
@@ -79,8 +81,12 @@ Usage:
79
81
  'X-API-Key': apiKey,
80
82
  },
81
83
  body: JSON.stringify(postData),
82
- });
84
+ };
83
85
 
86
+ if (process.env.PROXY != null && process.env.PROXY !== '') {
87
+ fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
88
+ }
89
+ const response = await fetch(EXEC_ENDPOINT, fetchOptions);
84
90
  if (!response.ok) {
85
91
  throw new Error(`HTTP error! status: ${response.status}`);
86
92
  }
package/src/types/llm.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  // src/types/llm.ts
2
2
  import { ChatOllama } from '@langchain/ollama';
3
+ import { ChatDeepSeek } from '@langchain/deepseek';
3
4
  import { ChatAnthropic } from '@langchain/anthropic';
4
5
  import { ChatMistralAI } from '@langchain/mistralai';
5
6
  import { ChatBedrockConverse } from '@langchain/aws';
@@ -12,6 +13,7 @@ import type { ChatOpenAIFields, OpenAIChatInput, AzureOpenAIInput } from '@langc
12
13
  import type { BedrockChatFields } from '@langchain/community/chat_models/bedrock/web';
13
14
  import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
14
15
  import type { ChatVertexAIInput } from '@langchain/google-vertexai';
16
+ import type { ChatDeepSeekCallOptions } from '@langchain/deepseek';
15
17
  import type { ChatBedrockConverseInput } from '@langchain/aws';
16
18
  import type { ChatMistralAIInput } from '@langchain/mistralai';
17
19
  import type { StructuredTool } from '@langchain/core/tools';
@@ -41,32 +43,35 @@ export type VertexAIClientOptions = ChatVertexAIInput;
41
43
  export type BedrockClientOptions = BedrockChatFields;
42
44
  export type BedrockConverseClientOptions = ChatBedrockConverseInput;
43
45
  export type GoogleClientOptions = GoogleGenerativeAIChatInput;
46
+ export type DeepSeekClientOptions = ChatDeepSeekCallOptions;
44
47
 
45
- export type ClientOptions = OpenAIClientOptions | AzureClientOptions | OllamaClientOptions | AnthropicClientOptions | MistralAIClientOptions | VertexAIClientOptions | BedrockClientOptions | BedrockConverseClientOptions | GoogleClientOptions;
48
+ export type ClientOptions = OpenAIClientOptions | AzureClientOptions | OllamaClientOptions | AnthropicClientOptions | MistralAIClientOptions | VertexAIClientOptions | BedrockClientOptions | BedrockConverseClientOptions | GoogleClientOptions | DeepSeekClientOptions;
46
49
 
47
50
  export type LLMConfig = {
48
51
  provider: Providers;
49
52
  } & ClientOptions;
50
53
 
51
54
  export type ProviderOptionsMap = {
52
- [Providers.OPENAI]: OpenAIClientOptions;
53
55
  [Providers.AZURE]: AzureClientOptions;
56
+ [Providers.OPENAI]: OpenAIClientOptions;
54
57
  [Providers.OLLAMA]: OllamaClientOptions;
58
+ [Providers.GOOGLE]: GoogleClientOptions;
59
+ [Providers.VERTEXAI]: VertexAIClientOptions;
60
+ [Providers.DEEPSEEK]: DeepSeekClientOptions;
55
61
  [Providers.ANTHROPIC]: AnthropicClientOptions;
56
62
  [Providers.MISTRALAI]: MistralAIClientOptions;
57
- [Providers.VERTEXAI]: VertexAIClientOptions;
58
63
  [Providers.BEDROCK_LEGACY]: BedrockClientOptions;
59
64
  [Providers.BEDROCK]: BedrockConverseClientOptions;
60
- [Providers.GOOGLE]: GoogleClientOptions;
61
65
  };
62
66
 
63
67
  export type ChatModelMap = {
64
68
  [Providers.OPENAI]: ChatOpenAI;
65
69
  [Providers.OLLAMA]: ChatOllama;
66
70
  [Providers.AZURE]: AzureChatOpenAI;
71
+ [Providers.DEEPSEEK]: ChatDeepSeek;
72
+ [Providers.VERTEXAI]: ChatVertexAI;
67
73
  [Providers.ANTHROPIC]: ChatAnthropic;
68
74
  [Providers.MISTRALAI]: ChatMistralAI;
69
- [Providers.VERTEXAI]: ChatVertexAI;
70
75
  [Providers.BEDROCK_LEGACY]: BedrockChat;
71
76
  [Providers.BEDROCK]: ChatBedrockConverse;
72
77
  [Providers.GOOGLE]: ChatGoogleGenerativeAI;
@@ -3,6 +3,7 @@ import type OpenAITypes from 'openai';
3
3
  import type { MessageContentImageUrl, MessageContentText, ToolMessage, BaseMessage } from '@langchain/core/messages';
4
4
  import type { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';
5
5
  import type { LLMResult, Generation } from '@langchain/core/outputs';
6
+ import type { ToolEndEvent } from '@/types/tools';
6
7
  import { StepTypes, ContentTypes, GraphEvents } from '@/common/enum';
7
8
 
8
9
  export type HandleLLMEnd = (output: LLMResult, runId: string, parentRunId?: string, tags?: string[]) => void;
@@ -239,4 +240,16 @@ export type SplitStreamHandlers = Partial<{
239
240
  [GraphEvents.ON_RUN_STEP]: ({ event, data}: { event: GraphEvents, data: RunStep }) => void;
240
241
  [GraphEvents.ON_MESSAGE_DELTA]: ({ event, data}: { event: GraphEvents, data: MessageDeltaEvent }) => void;
241
242
  [GraphEvents.ON_REASONING_DELTA]: ({ event, data}: { event: GraphEvents, data: ReasoningDeltaEvent }) => void;
242
- }>
243
+ }>
244
+
245
+ export type ContentAggregator = ({ event, data }: {
246
+ event: GraphEvents;
247
+ data: RunStep | MessageDeltaEvent | RunStepDeltaEvent | {
248
+ result: ToolEndEvent;
249
+ };
250
+ }) => void;
251
+ export type ContentAggregatorResult = {
252
+ stepMap: Map<string, RunStep | undefined>;
253
+ contentParts: Array<MessageContentComplex | undefined>;
254
+ aggregateContent: ContentAggregator;
255
+ };
@@ -5,7 +5,7 @@ import type * as t from '@/types';
5
5
  export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
6
6
  [Providers.OPENAI]: {
7
7
  provider: Providers.OPENAI,
8
- model: 'gpt-4o',
8
+ model: 'gpt-4o-mini',
9
9
  temperature: 0.7,
10
10
  streaming: true,
11
11
  streamUsage: true,
@@ -39,6 +39,12 @@ export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
39
39
  streamUsage: true,
40
40
  baseUrl: 'http://host.docker.internal:11434'
41
41
  },
42
+ [Providers.DEEPSEEK]: {
43
+ provider: Providers.DEEPSEEK,
44
+ model: 'deepseek-reasoner',
45
+ streaming: true,
46
+ streamUsage: true,
47
+ },
42
48
  [Providers.ANTHROPIC]: {
43
49
  provider: Providers.ANTHROPIC,
44
50
  model: 'claude-3-5-sonnet-20240620',