@librechat/agents 2.2.6 → 2.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/llm/fake.ts CHANGED
@@ -1,7 +1,9 @@
1
- import type { BaseMessage } from '@langchain/core/messages';
2
- import type { ChatGenerationChunk } from '@langchain/core/outputs';
1
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
2
+ import { AIMessageChunk } from '@langchain/core/messages';
3
3
  import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
4
+ import type { BaseMessage } from '@langchain/core/messages';
4
5
  import { FakeListChatModel } from '@langchain/core/utils/testing';
6
+ import { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';
5
7
 
6
8
  type SplitStrategy = {
7
9
  type: 'regex' | 'fixed';
@@ -10,20 +12,25 @@ type SplitStrategy = {
10
12
 
11
13
  export class FakeChatModel extends FakeListChatModel {
12
14
  private splitStrategy: SplitStrategy;
15
+ private toolCalls: ToolCall[] = [];
16
+ private addedToolCalls: boolean = false;
13
17
 
14
18
  constructor({
15
19
  responses,
16
20
  sleep,
17
21
  emitCustomEvent,
18
- splitStrategy = { type: 'regex', value: /(?<=\s+)|(?=\s+)/ }
22
+ splitStrategy = { type: 'regex', value: /(?<=\s+)|(?=\s+)/ },
23
+ toolCalls = []
19
24
  }: {
20
25
  responses: string[];
21
26
  sleep?: number;
22
27
  emitCustomEvent?: boolean;
23
28
  splitStrategy?: SplitStrategy;
29
+ toolCalls?: ToolCall[];
24
30
  }) {
25
31
  super({ responses, sleep, emitCustomEvent });
26
32
  this.splitStrategy = splitStrategy;
33
+ this.toolCalls = toolCalls;
27
34
  }
28
35
 
29
36
  private splitText(text: string): string[] {
@@ -38,6 +45,26 @@ export class FakeChatModel extends FakeListChatModel {
38
45
  return chunks;
39
46
  }
40
47
  }
48
+ _createResponseChunk(text: string, tool_call_chunks?: ToolCallChunk[]): ChatGenerationChunk {
49
+ return new ChatGenerationChunk({
50
+ text,
51
+ generationInfo: {},
52
+ message: new AIMessageChunk({
53
+ content: text,
54
+ tool_call_chunks,
55
+ additional_kwargs: tool_call_chunks ? {
56
+ tool_calls: tool_call_chunks.map((toolCall) => ({
57
+ index: toolCall.index ?? 0,
58
+ id: toolCall.id ?? '',
59
+ type: 'function',
60
+ function: {
61
+ name: toolCall.name ?? '',
62
+ arguments: toolCall.args ?? '',
63
+ },
64
+ })),
65
+ } : undefined,
66
+ })});
67
+ }
41
68
 
42
69
  async *_streamResponseChunks(
43
70
  _messages: BaseMessage[],
@@ -54,7 +81,6 @@ export class FakeChatModel extends FakeListChatModel {
54
81
  }
55
82
 
56
83
  const chunks = this.splitText(response);
57
-
58
84
  for await (const chunk of chunks) {
59
85
  await this._sleepIfRequested();
60
86
 
@@ -62,22 +88,46 @@ export class FakeChatModel extends FakeListChatModel {
62
88
  throw new Error(options.thrownErrorString);
63
89
  }
64
90
 
65
- const responseChunk = this._createResponseChunk(chunk);
91
+ const responseChunk = super._createResponseChunk(chunk);
66
92
  yield responseChunk;
67
93
  void runManager?.handleLLMNewToken(chunk);
68
94
  }
95
+
96
+ await this._sleepIfRequested();
97
+ if (this.toolCalls.length > 0 && !this.addedToolCalls) {
98
+ this.addedToolCalls = true;
99
+ const toolCallChunks = this.toolCalls.map((toolCall) => {;
100
+ return {
101
+ name: toolCall.name,
102
+ args: JSON.stringify(toolCall.args),
103
+ id: toolCall.id,
104
+ type: 'tool_call_chunk',
105
+ } as ToolCallChunk
106
+ });
107
+ const responseChunk = this._createResponseChunk('', toolCallChunks);
108
+ yield responseChunk;
109
+ void runManager?.handleLLMNewToken('');
110
+ }
69
111
  }
70
112
  }
71
113
 
72
- export function createFakeStreamingLLM(
114
+ export function createFakeStreamingLLM({
115
+ responses,
116
+ sleep,
117
+ splitStrategy,
118
+ toolCalls,
119
+ } : {
73
120
  responses: string[],
74
121
  sleep?: number,
75
- splitStrategy?: SplitStrategy
122
+ splitStrategy?: SplitStrategy,
123
+ toolCalls?: ToolCall[]
124
+ }
76
125
  ): FakeChatModel {
77
126
  return new FakeChatModel({
78
127
  sleep,
79
128
  responses,
80
129
  emitCustomEvent: true,
81
130
  splitStrategy,
131
+ toolCalls,
82
132
  });
83
- }
133
+ }
@@ -65,8 +65,8 @@ async function testCodeExecution(): Promise<void> {
65
65
  graphConfig: {
66
66
  type: 'standard',
67
67
  llmConfig,
68
- // tools: [fetchRandomImageTool],
69
- tools: [fetchRandomImageURL],
68
+ tools: [fetchRandomImageTool],
69
+ // tools: [fetchRandomImageURL],
70
70
  instructions: 'You are a friendly AI assistant with internet capabilities. Always address the user by their name.',
71
71
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
72
72
  },
@@ -0,0 +1,157 @@
1
+ /* eslint-disable no-console */
2
+ /* eslint-disable @typescript-eslint/no-explicit-any */
3
+ import { z } from 'zod';
4
+ import { config } from 'dotenv';
5
+ config();
6
+ import { tool } from '@langchain/core/tools';
7
+ import { ToolCall } from '@langchain/core/messages/tool';
8
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
9
+ import type { RunnableConfig } from '@langchain/core/runnables';
10
+ import type * as t from '@/types';
11
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
12
+ import { ToolEndHandler, ModelEndHandler } from '@/events';
13
+ import { GraphEvents, Providers } from '@/common';
14
+ import { getLLMConfig } from '@/utils/llmConfig';
15
+ import { getArgs } from '@/scripts/args';
16
+ import { StandardGraph } from '@/graphs';
17
+ import { Run } from '@/run';
18
+
19
+ const errorTool = tool(
20
+ async () => {
21
+ throw new Error('this is a test error I threw on purpose');
22
+ },
23
+ {
24
+ name: 'errorTool',
25
+ description: 'A tool that always throws an error',
26
+ schema: z.object({ input: z.string().optional() }),
27
+ }
28
+ );
29
+
30
+ describe('Tool Error Handling Tests', () => {
31
+ jest.setTimeout(30000);
32
+ let run: Run<t.IState>;
33
+ let contentParts: t.MessageContentComplex[];
34
+ let conversationHistory: BaseMessage[];
35
+ let aggregateContent: t.ContentAggregator;
36
+ let handleToolCallErrorSpy: jest.SpyInstance;
37
+
38
+ const config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string } = {
39
+ configurable: {
40
+ thread_id: 'conversation-num-1',
41
+ },
42
+ streamMode: 'values',
43
+ version: 'v2' as const,
44
+ };
45
+
46
+ beforeEach(async () => {
47
+ conversationHistory = [];
48
+ const { contentParts: parts, aggregateContent: ac } = createContentAggregator();
49
+ aggregateContent = ac;
50
+ contentParts = parts as t.MessageContentComplex[];
51
+ handleToolCallErrorSpy = jest.spyOn(StandardGraph.prototype, 'handleToolCallError');
52
+ });
53
+
54
+ afterEach(() => {
55
+ handleToolCallErrorSpy.mockRestore();
56
+ });
57
+
58
+ const onMessageDeltaSpy = jest.fn();
59
+ const onRunStepSpy = jest.fn();
60
+ const onRunStepCompletedSpy = jest.fn();
61
+
62
+ afterAll(() => {
63
+ onMessageDeltaSpy.mockReset();
64
+ onRunStepSpy.mockReset();
65
+ onRunStepCompletedSpy.mockReset();
66
+ });
67
+
68
+ const setupCustomHandlers = (): Record<string | GraphEvents, t.EventHandler> => ({
69
+ [GraphEvents.TOOL_END]: new ToolEndHandler(),
70
+ [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
71
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
72
+ [GraphEvents.ON_RUN_STEP_COMPLETED]: {
73
+ handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
74
+ if ((data.result as t.MessageContentComplex)?.['type'] === 'tool_call') {
75
+ run.Graph?.overrideTestModel(['Looks like there was an error calling the tool.'], 5);
76
+ }
77
+ onRunStepCompletedSpy(event, data);
78
+ aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent; } });
79
+ }
80
+ },
81
+ [GraphEvents.ON_RUN_STEP]: {
82
+ handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData, metadata, graph): void => {
83
+ const runStepData = data as t.RunStep;
84
+ onRunStepSpy(event, runStepData, metadata, graph);
85
+ aggregateContent({ event, data: runStepData });
86
+ }
87
+ },
88
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
89
+ handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
90
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
91
+ }
92
+ },
93
+ [GraphEvents.ON_MESSAGE_DELTA]: {
94
+ handle: (event: GraphEvents.ON_MESSAGE_DELTA, data: t.StreamEventData, metadata, graph): void => {
95
+ onMessageDeltaSpy(event, data, metadata, graph);
96
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
97
+ }
98
+ },
99
+ });
100
+
101
+ test('should handle tool call errors correctly', async () => {
102
+ const { userName, location } = await getArgs();
103
+ const llmConfig = getLLMConfig(Providers.OPENAI);
104
+ const customHandlers = setupCustomHandlers();
105
+
106
+ // Create the run instance
107
+ run = await Run.create<t.IState>({
108
+ runId: 'test-run-id',
109
+ graphConfig: {
110
+ type: 'standard',
111
+ llmConfig,
112
+ tools: [errorTool],
113
+ instructions: 'You are a helpful AI assistant.',
114
+ additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
115
+ },
116
+ returnContent: true,
117
+ customHandlers,
118
+ });
119
+
120
+ const toolCalls: ToolCall[] = [
121
+ {
122
+ name: "errorTool",
123
+ args: {
124
+ input: "test input",
125
+ },
126
+ id: "call_test123",
127
+ type: "tool_call",
128
+ }
129
+ ];
130
+
131
+ const firstResponse = 'Let me try calling the tool';
132
+ run.Graph?.overrideTestModel([firstResponse], 5, toolCalls);
133
+
134
+ const userMessage = 'Use the error tool';
135
+ conversationHistory.push(new HumanMessage(userMessage));
136
+
137
+ const inputs = {
138
+ messages: conversationHistory,
139
+ };
140
+
141
+ await run.processStream(inputs, config);
142
+
143
+ // Verify handleToolCallError was called
144
+ expect(handleToolCallErrorSpy).toHaveBeenCalled();
145
+
146
+ // Find the tool call content part
147
+ const toolCallPart = contentParts.find(part =>
148
+ part?.type === 'tool_call'
149
+ ) as t.ToolCallContent | undefined;
150
+
151
+ // Verify the error message in contentParts
152
+ expect(toolCallPart).toBeDefined();
153
+ expect(toolCallPart?.tool_call?.args).toEqual(JSON.stringify(toolCalls[0].args));
154
+ expect(toolCallPart?.tool_call?.output).toContain('Error processing tool');
155
+ expect(toolCallPart?.tool_call?.output).toContain('this is a test error I threw on purpose');
156
+ });
157
+ });
@@ -14,12 +14,14 @@ export class ToolNode<T = any> extends RunnableCallable<T, T> {
14
14
  private loadRuntimeTools?: t.ToolRefGenerator;
15
15
  handleToolErrors = true;
16
16
  toolCallStepIds?: Map<string, string>;
17
+ errorHandler?: t.ToolNodeConstructorParams['errorHandler'];
17
18
 
18
19
  constructor({
19
20
  tools,
20
21
  toolMap,
21
22
  name,
22
23
  tags,
24
+ errorHandler,
23
25
  toolCallStepIds,
24
26
  handleToolErrors,
25
27
  loadRuntimeTools,
@@ -30,6 +32,7 @@ export class ToolNode<T = any> extends RunnableCallable<T, T> {
30
32
  this.toolCallStepIds = toolCallStepIds;
31
33
  this.handleToolErrors = handleToolErrors ?? this.handleToolErrors;
32
34
  this.loadRuntimeTools = loadRuntimeTools;
35
+ this.errorHandler = errorHandler;
33
36
  }
34
37
 
35
38
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
@@ -83,6 +86,12 @@ export class ToolNode<T = any> extends RunnableCallable<T, T> {
83
86
  if (isGraphInterrupt(e)) {
84
87
  throw e;
85
88
  }
89
+ this.errorHandler?.({
90
+ error: e,
91
+ id: call.id!,
92
+ name: call.name,
93
+ input: call.args,
94
+ }, config?.metadata);
86
95
  return new ToolMessage({
87
96
  content: `Error: ${e.message}\n Please fix your mistakes.`,
88
97
  name: call.name,
@@ -93,6 +93,11 @@ export type MessageCreationDetails = {
93
93
  };
94
94
 
95
95
  export type ToolEndData = { input: string | Record<string, unknown>, output?: ToolMessage };
96
+ export type ToolErrorData = {
97
+ id: string,
98
+ name: string,
99
+ error?: Error,
100
+ } & Pick<ToolEndData, 'input'>;
96
101
  export type ToolEndCallback = (data: ToolEndData, metadata?: Record<string, unknown>) => void;
97
102
 
98
103
  export type ProcessedToolCall = {
@@ -2,7 +2,8 @@
2
2
  import type { RunnableToolLike } from '@langchain/core/runnables';
3
3
  import type { StructuredToolInterface } from '@langchain/core/tools';
4
4
  import type { ToolCall } from '@langchain/core/messages/tool';
5
- import { ContentTypes, EnvVar } from '@/common';
5
+ import type { ToolErrorData } from './stream';
6
+ import { EnvVar } from '@/common';
6
7
 
7
8
  /** Replacement type for `import type { ToolCall } from '@langchain/core/messages/tool'` in order to have stringified args typed */
8
9
  export type CustomToolCall = {
@@ -29,6 +30,7 @@ export type ToolNodeOptions = {
29
30
  handleToolErrors?: boolean;
30
31
  loadRuntimeTools?: ToolRefGenerator;
31
32
  toolCallStepIds?: Map<string, string>;
33
+ errorHandler?: (data: ToolErrorData, metadata?: Record<string, unknown>) => void
32
34
  };
33
35
 
34
36
  export type ToolNodeConstructorParams = ToolRefs & ToolNodeOptions;