@librechat/agents 2.3.93 → 2.3.95

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -74,7 +74,9 @@ describe('formatAgentMessages with tools parameter', () => {
74
74
 
75
75
  // The content should be a string representation of both messages
76
76
  expect(typeof result.messages[1].content).toBe('string');
77
- expect(result.messages[1].content).toEqual('AI: Let me check the weather for you.\nTool: check_weather, Sunny, 75°F');
77
+ expect(result.messages[1].content).toEqual(
78
+ 'AI: Let me check the weather for you.\nTool: check_weather, Sunny, 75°F'
79
+ );
78
80
  });
79
81
 
80
82
  it('should convert tool messages to string when tool is not in the allowed set', () => {
@@ -113,7 +115,9 @@ describe('formatAgentMessages with tools parameter', () => {
113
115
 
114
116
  // The content should be a string representation of both messages
115
117
  expect(typeof result.messages[1].content).toBe('string');
116
- expect(result.messages[1].content).toEqual('AI: Let me check the weather for you.\nTool: check_weather, Sunny, 75°F');
118
+ expect(result.messages[1].content).toEqual(
119
+ 'AI: Let me check the weather for you.\nTool: check_weather, Sunny, 75°F'
120
+ );
117
121
  });
118
122
 
119
123
  it('should not convert tool messages when tool is in the allowed set', () => {
@@ -154,7 +158,10 @@ describe('formatAgentMessages with tools parameter', () => {
154
158
 
155
159
  it('should handle multiple tool calls with mixed allowed/disallowed tools', () => {
156
160
  const payload: TPayload = [
157
- { role: 'user', content: 'Tell me about the weather and calculate something' },
161
+ {
162
+ role: 'user',
163
+ content: 'Tell me about the weather and calculate something',
164
+ },
158
165
  {
159
166
  role: 'assistant',
160
167
  content: [
@@ -202,9 +209,13 @@ describe('formatAgentMessages with tools parameter', () => {
202
209
 
203
210
  // The content should include all parts
204
211
  expect(typeof result.messages[1].content).toBe('string');
205
- expect(result.messages[1].content).toContain('Let me check the weather first.');
212
+ expect(result.messages[1].content).toContain(
213
+ 'Let me check the weather first.'
214
+ );
206
215
  expect(result.messages[1].content).toContain('Sunny, 75°F');
207
- expect(result.messages[1].content).toContain('Now let me calculate something for you.');
216
+ expect(result.messages[1].content).toContain(
217
+ 'Now let me calculate something for you.'
218
+ );
208
219
  expect(result.messages[1].content).toContain('2');
209
220
  });
210
221
 
@@ -233,14 +244,18 @@ describe('formatAgentMessages with tools parameter', () => {
233
244
  ];
234
245
 
235
246
  const indexTokenCountMap = {
236
- 0: 10, // 10 tokens for user message
237
- 1: 40, // 40 tokens for assistant message with tool call
247
+ 0: 10, // 10 tokens for user message
248
+ 1: 40, // 40 tokens for assistant message with tool call
238
249
  };
239
250
 
240
251
  // Provide a set of allowed tools that doesn't include 'check_weather'
241
252
  const allowedTools = new Set(['search', 'calculator']);
242
253
 
243
- const result = formatAgentMessages(payload, indexTokenCountMap, allowedTools);
254
+ const result = formatAgentMessages(
255
+ payload,
256
+ indexTokenCountMap,
257
+ allowedTools
258
+ );
244
259
 
245
260
  // Should have 2 messages and 2 entries in the token count map
246
261
  expect(result.messages).toHaveLength(2);
@@ -253,6 +268,39 @@ describe('formatAgentMessages with tools parameter', () => {
253
268
  expect(result.indexTokenCountMap?.[1]).toBe(40);
254
269
  });
255
270
 
271
+ it('should heal invalid tool call structure when converting to string', () => {
272
+ const payload: TPayload = [
273
+ {
274
+ role: 'assistant',
275
+ content: [
276
+ {
277
+ type: ContentTypes.TOOL_CALL,
278
+ tool_call: {
279
+ id: 'tool_1',
280
+ name: 'check_weather',
281
+ args: '{"location":"New York"}',
282
+ output: 'Sunny, 75°F',
283
+ },
284
+ },
285
+ ],
286
+ },
287
+ ];
288
+
289
+ // Provide a set of allowed tools that doesn't include 'check_weather'
290
+ const allowedTools = new Set(['search', 'calculator']);
291
+
292
+ const result = formatAgentMessages(payload, undefined, allowedTools);
293
+
294
+ // Should convert to a single AIMessage with string content
295
+ expect(result.messages).toHaveLength(1);
296
+ expect(result.messages[0]).toBeInstanceOf(AIMessage);
297
+
298
+ // The content should be a string representation of the tool message
299
+ expect(typeof result.messages[0].content).toBe('string');
300
+ expect(result.messages[0].content).toContain('check_weather');
301
+ expect(result.messages[0].content).toContain('Sunny, 75°F');
302
+ });
303
+
256
304
  it('should handle complex sequences with multiple tool calls', () => {
257
305
  const payload: TPayload = [
258
306
  { role: 'user', content: 'Help me with a complex task' },
@@ -313,7 +361,10 @@ describe('formatAgentMessages with tools parameter', () => {
313
361
  },
314
362
  ],
315
363
  },
316
- { role: 'assistant', content: 'Here\'s your answer based on all that information.' },
364
+ {
365
+ role: 'assistant',
366
+ content: 'Here\'s your answer based on all that information.',
367
+ },
317
368
  ];
318
369
 
319
370
  // Allow search and calculator but not check_weather
@@ -328,7 +379,7 @@ describe('formatAgentMessages with tools parameter', () => {
328
379
 
329
380
  // Check the types of messages
330
381
  expect(result.messages[0]).toBeInstanceOf(HumanMessage);
331
- expect(result.messages[1]).toBeInstanceOf(AIMessage); // Search message
382
+ expect(result.messages[1]).toBeInstanceOf(AIMessage); // Search message
332
383
  expect(result.messages[2]).toBeInstanceOf(ToolMessage); // Search tool response
333
384
  expect(result.messages[3]).toBeInstanceOf(AIMessage); // Converted weather+calculator message
334
385
  expect(result.messages[4]).toBeInstanceOf(AIMessage); // Final message
package/src/run.ts CHANGED
@@ -3,7 +3,10 @@ import { zodToJsonSchema } from 'zod-to-json-schema';
3
3
  import { PromptTemplate } from '@langchain/core/prompts';
4
4
  import { AzureChatOpenAI, ChatOpenAI } from '@langchain/openai';
5
5
  import { SystemMessage } from '@langchain/core/messages';
6
- import type { BaseMessage, MessageContentComplex } from '@langchain/core/messages';
6
+ import type {
7
+ BaseMessage,
8
+ MessageContentComplex,
9
+ } from '@langchain/core/messages';
7
10
  import type { ClientCallbacks, SystemCallbacks } from '@/graphs/Graph';
8
11
  import type { RunnableConfig } from '@langchain/core/runnables';
9
12
  import type * as t from '@/types';
@@ -37,7 +40,9 @@ export class Run<T extends t.BaseGraphState> {
37
40
  const handlerRegistry = new HandlerRegistry();
38
41
 
39
42
  if (config.customHandlers) {
40
- for (const [eventType, handler] of Object.entries(config.customHandlers)) {
43
+ for (const [eventType, handler] of Object.entries(
44
+ config.customHandlers
45
+ )) {
41
46
  handlerRegistry.register(eventType, handler);
42
47
  }
43
48
  }
@@ -50,7 +55,9 @@ export class Run<T extends t.BaseGraphState> {
50
55
 
51
56
  if (config.graphConfig.type === 'standard' || !config.graphConfig.type) {
52
57
  this.provider = config.graphConfig.llmConfig.provider;
53
- this.graphRunnable = this.createStandardGraph(config.graphConfig) as unknown as t.CompiledWorkflow<T, Partial<T>, string>;
58
+ this.graphRunnable = this.createStandardGraph(
59
+ config.graphConfig
60
+ ) as unknown as t.CompiledWorkflow<T, Partial<T>, string>;
54
61
  if (this.Graph) {
55
62
  this.Graph.handlerRegistry = handlerRegistry;
56
63
  }
@@ -59,7 +66,9 @@ export class Run<T extends t.BaseGraphState> {
59
66
  this.returnContent = config.returnContent ?? false;
60
67
  }
61
68
 
62
- private createStandardGraph(config: t.StandardGraphConfig): t.CompiledWorkflow<t.IState, Partial<t.IState>, string> {
69
+ private createStandardGraph(
70
+ config: t.StandardGraphConfig
71
+ ): t.CompiledWorkflow<t.IState, Partial<t.IState>, string> {
63
72
  const { llmConfig, tools = [], ...graphInput } = config;
64
73
  const { provider, ...clientOptions } = llmConfig;
65
74
 
@@ -74,13 +83,17 @@ export class Run<T extends t.BaseGraphState> {
74
83
  return standardGraph.createWorkflow();
75
84
  }
76
85
 
77
- static async create<T extends t.BaseGraphState>(config: t.RunConfig): Promise<Run<T>> {
86
+ static async create<T extends t.BaseGraphState>(
87
+ config: t.RunConfig
88
+ ): Promise<Run<T>> {
78
89
  return new Run<T>(config);
79
90
  }
80
91
 
81
92
  getRunMessages(): BaseMessage[] | undefined {
82
93
  if (!this.Graph) {
83
- throw new Error('Graph not initialized. Make sure to use Run.create() to instantiate the Run.');
94
+ throw new Error(
95
+ 'Graph not initialized. Make sure to use Run.create() to instantiate the Run.'
96
+ );
84
97
  }
85
98
  return this.Graph.getRunMessages();
86
99
  }
@@ -88,13 +101,17 @@ export class Run<T extends t.BaseGraphState> {
88
101
  async processStream(
89
102
  inputs: t.IState,
90
103
  config: Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string },
91
- streamOptions?: t.EventStreamOptions,
104
+ streamOptions?: t.EventStreamOptions
92
105
  ): Promise<MessageContentComplex[] | undefined> {
93
106
  if (!this.graphRunnable) {
94
- throw new Error('Run not initialized. Make sure to use Run.create() to instantiate the Run.');
107
+ throw new Error(
108
+ 'Run not initialized. Make sure to use Run.create() to instantiate the Run.'
109
+ );
95
110
  }
96
111
  if (!this.Graph) {
97
- throw new Error('Graph not initialized. Make sure to use Run.create() to instantiate the Run.');
112
+ throw new Error(
113
+ 'Graph not initialized. Make sure to use Run.create() to instantiate the Run.'
114
+ );
98
115
  }
99
116
 
100
117
  this.Graph.resetValues(streamOptions?.keepContent);
@@ -102,30 +119,46 @@ export class Run<T extends t.BaseGraphState> {
102
119
  const hasTools = this.Graph.tools ? this.Graph.tools.length > 0 : false;
103
120
  if (streamOptions?.callbacks) {
104
121
  /* TODO: conflicts with callback manager */
105
- const callbacks = config.callbacks as t.ProvidedCallbacks ?? [];
106
- config.callbacks = callbacks.concat(this.getCallbacks(streamOptions.callbacks));
122
+ const callbacks = (config.callbacks as t.ProvidedCallbacks) ?? [];
123
+ config.callbacks = callbacks.concat(
124
+ this.getCallbacks(streamOptions.callbacks)
125
+ );
107
126
  }
108
127
 
109
128
  if (!this.id) {
110
129
  throw new Error('Run ID not provided');
111
130
  }
112
131
 
113
- const tokenCounter = streamOptions?.tokenCounter ?? (streamOptions?.indexTokenCountMap ? await createTokenCounter() : undefined);
114
- const toolTokens = tokenCounter ? (this.Graph.tools?.reduce((acc, tool) => {
115
- if (!tool.schema) {
116
- return acc;
117
- }
132
+ const tokenCounter =
133
+ streamOptions?.tokenCounter ??
134
+ (streamOptions?.indexTokenCountMap
135
+ ? await createTokenCounter()
136
+ : undefined);
137
+ const toolTokens = tokenCounter
138
+ ? (this.Graph.tools?.reduce((acc, tool) => {
139
+ if (!(tool as Partial<t.GenericTool>).schema) {
140
+ return acc;
141
+ }
118
142
 
119
- const jsonSchema = zodToJsonSchema(tool.schema.describe(tool.description ?? ''), tool.name);
120
- return acc + tokenCounter(new SystemMessage(JSON.stringify(jsonSchema)));
121
- }, 0) ?? 0) : 0;
143
+ const jsonSchema = zodToJsonSchema(
144
+ tool.schema.describe(tool.description ?? ''),
145
+ tool.name
146
+ );
147
+ return (
148
+ acc + tokenCounter(new SystemMessage(JSON.stringify(jsonSchema)))
149
+ );
150
+ }, 0) ?? 0)
151
+ : 0;
122
152
  let instructionTokens = toolTokens;
123
153
  if (this.Graph.systemMessage && tokenCounter) {
124
154
  instructionTokens += tokenCounter(this.Graph.systemMessage);
125
155
  }
126
156
  const tokenMap = streamOptions?.indexTokenCountMap ?? {};
127
- if (this.Graph.systemMessage && instructionTokens > 0) {
128
- this.Graph.indexTokenCountMap = shiftIndexTokenCountMap(tokenMap, instructionTokens);
157
+ if (this.Graph.systemMessage && instructionTokens > 0) {
158
+ this.Graph.indexTokenCountMap = shiftIndexTokenCountMap(
159
+ tokenMap,
160
+ instructionTokens
161
+ );
129
162
  } else if (instructionTokens > 0) {
130
163
  tokenMap[0] = tokenMap[0] + instructionTokens;
131
164
  this.Graph.indexTokenCountMap = tokenMap;
@@ -137,7 +170,10 @@ export class Run<T extends t.BaseGraphState> {
137
170
  this.Graph.tokenCounter = tokenCounter;
138
171
 
139
172
  config.run_id = this.id;
140
- config.configurable = Object.assign(config.configurable ?? {}, { run_id: this.id, provider: this.provider });
173
+ config.configurable = Object.assign(config.configurable ?? {}, {
174
+ run_id: this.id,
175
+ provider: this.provider,
176
+ });
141
177
 
142
178
  const stream = this.graphRunnable.streamEvents(inputs, config);
143
179
 
@@ -145,7 +181,11 @@ export class Run<T extends t.BaseGraphState> {
145
181
  const { data, name, metadata, ...info } = event;
146
182
 
147
183
  let eventName: t.EventName = info.event;
148
- if (hasTools && manualToolStreamProviders.has(provider) && eventName === GraphEvents.CHAT_MODEL_STREAM) {
184
+ if (
185
+ hasTools &&
186
+ manualToolStreamProviders.has(provider) &&
187
+ eventName === GraphEvents.CHAT_MODEL_STREAM
188
+ ) {
149
189
  /* Skipping CHAT_MODEL_STREAM event due to double-call edge case */
150
190
  continue;
151
191
  }
@@ -179,9 +219,18 @@ export class Run<T extends t.BaseGraphState> {
179
219
 
180
220
  getCallbacks(clientCallbacks: ClientCallbacks): SystemCallbacks {
181
221
  return {
182
- [Callback.TOOL_ERROR]: this.createSystemCallback(clientCallbacks, Callback.TOOL_ERROR),
183
- [Callback.TOOL_START]: this.createSystemCallback(clientCallbacks, Callback.TOOL_START),
184
- [Callback.TOOL_END]: this.createSystemCallback(clientCallbacks, Callback.TOOL_END),
222
+ [Callback.TOOL_ERROR]: this.createSystemCallback(
223
+ clientCallbacks,
224
+ Callback.TOOL_ERROR
225
+ ),
226
+ [Callback.TOOL_START]: this.createSystemCallback(
227
+ clientCallbacks,
228
+ Callback.TOOL_START
229
+ ),
230
+ [Callback.TOOL_END]: this.createSystemCallback(
231
+ clientCallbacks,
232
+ Callback.TOOL_END
233
+ ),
185
234
  };
186
235
  }
187
236
 
@@ -192,35 +241,55 @@ export class Run<T extends t.BaseGraphState> {
192
241
  clientOptions,
193
242
  chainOptions,
194
243
  skipLanguage,
195
- } : {
196
- inputText: string;
197
- contentParts: (t.MessageContentComplex | undefined)[];
198
- titlePrompt?: string;
199
- skipLanguage?: boolean;
200
- clientOptions?: t.ClientOptions;
201
- chainOptions?: Partial<RunnableConfig> | undefined;
202
- }): Promise<{ language: string; title: string }> {
203
- const convoTemplate = PromptTemplate.fromTemplate('User: {input}\nAI: {output}');
204
- const response = contentParts.map((part) => {
205
- if (part?.type === 'text') return part.text;
206
- return '';
207
- }).join('\n');
208
- const convo = (await convoTemplate.invoke({ input: inputText, output: response })).value;
244
+ }: t.RunTitleOptions): Promise<{ language: string; title: string }> {
245
+ const convoTemplate = PromptTemplate.fromTemplate(
246
+ 'User: {input}\nAI: {output}'
247
+ );
248
+ const response = contentParts
249
+ .map((part) => {
250
+ if (part?.type === 'text') return part.text;
251
+ return '';
252
+ })
253
+ .join('\n');
254
+ const convo = (
255
+ await convoTemplate.invoke({ input: inputText, output: response })
256
+ ).value;
209
257
  const model = this.Graph?.getNewModel({
210
258
  clientOptions,
211
- omitOriginalOptions: ['streaming', 'stream', 'thinking', 'maxTokens', 'maxOutputTokens', 'additionalModelRequestFields'],
259
+ omitOriginalOptions: new Set([
260
+ 'clientOptions',
261
+ 'streaming',
262
+ 'stream',
263
+ 'thinking',
264
+ 'maxTokens',
265
+ 'maxOutputTokens',
266
+ 'additionalModelRequestFields',
267
+ ]),
212
268
  });
213
269
  if (!model) {
214
270
  return { language: '', title: '' };
215
271
  }
216
- if (isOpenAILike(this.provider) && (model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)) {
217
- model.temperature = (clientOptions as t.OpenAIClientOptions | undefined)?.temperature as number;
218
- model.topP = (clientOptions as t.OpenAIClientOptions | undefined)?.topP as number;
219
- model.frequencyPenalty = (clientOptions as t.OpenAIClientOptions | undefined)?.frequencyPenalty as number;
220
- model.presencePenalty = (clientOptions as t.OpenAIClientOptions | undefined)?.presencePenalty as number;
221
- model.n = (clientOptions as t.OpenAIClientOptions | undefined)?.n as number;
272
+ if (
273
+ isOpenAILike(this.provider) &&
274
+ (model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)
275
+ ) {
276
+ model.temperature = (clientOptions as t.OpenAIClientOptions | undefined)
277
+ ?.temperature as number;
278
+ model.topP = (clientOptions as t.OpenAIClientOptions | undefined)
279
+ ?.topP as number;
280
+ model.frequencyPenalty = (
281
+ clientOptions as t.OpenAIClientOptions | undefined
282
+ )?.frequencyPenalty as number;
283
+ model.presencePenalty = (
284
+ clientOptions as t.OpenAIClientOptions | undefined
285
+ )?.presencePenalty as number;
286
+ model.n = (clientOptions as t.OpenAIClientOptions | undefined)
287
+ ?.n as number;
222
288
  }
223
289
  const chain = await createTitleRunnable(model, titlePrompt);
224
- return await chain.invoke({ convo, inputText, skipLanguage }, chainOptions) as { language: string; title: string };
290
+ return (await chain.invoke(
291
+ { convo, inputText, skipLanguage },
292
+ chainOptions
293
+ )) as { language: string; title: string };
225
294
  }
226
295
  }
@@ -5,10 +5,14 @@ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
5
5
  import { TavilySearchResults } from '@langchain/community/tools/tavily_search';
6
6
  import type * as t from '@/types';
7
7
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
8
- import { ToolEndHandler, ModelEndHandler, createMetadataAggregator } from '@/events';
8
+ import {
9
+ ToolEndHandler,
10
+ ModelEndHandler,
11
+ createMetadataAggregator,
12
+ } from '@/events';
9
13
  import { getLLMConfig } from '@/utils/llmConfig';
10
14
  import { getArgs } from '@/scripts/args';
11
- import { GraphEvents } from '@/common';
15
+ import { GraphEvents, Providers } from '@/common';
12
16
  import { Run } from '@/run';
13
17
 
14
18
  const conversationHistory: BaseMessage[] = [];
@@ -23,49 +27,79 @@ async function testStandardStreaming(): Promise<void> {
23
27
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
24
28
  [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
25
29
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
26
- handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
30
+ handle: (
31
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
32
+ data: t.StreamEventData
33
+ ): void => {
27
34
  console.log('====== ON_RUN_STEP_COMPLETED ======');
28
35
  // console.dir(data, { depth: null });
29
- aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent } });
30
- }
36
+ aggregateContent({
37
+ event,
38
+ data: data as unknown as { result: t.ToolEndEvent },
39
+ });
40
+ },
31
41
  },
32
42
  [GraphEvents.ON_RUN_STEP]: {
33
- handle: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData): void => {
43
+ handle: (
44
+ event: GraphEvents.ON_RUN_STEP,
45
+ data: t.StreamEventData
46
+ ): void => {
34
47
  console.log('====== ON_RUN_STEP ======');
35
48
  console.dir(data, { depth: null });
36
49
  aggregateContent({ event, data: data as t.RunStep });
37
- }
50
+ },
38
51
  },
39
52
  [GraphEvents.ON_RUN_STEP_DELTA]: {
40
- handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.StreamEventData): void => {
53
+ handle: (
54
+ event: GraphEvents.ON_RUN_STEP_DELTA,
55
+ data: t.StreamEventData
56
+ ): void => {
41
57
  console.log('====== ON_RUN_STEP_DELTA ======');
42
58
  console.dir(data, { depth: null });
43
59
  aggregateContent({ event, data: data as t.RunStepDeltaEvent });
44
- }
60
+ },
45
61
  },
46
62
  [GraphEvents.ON_MESSAGE_DELTA]: {
47
- handle: (event: GraphEvents.ON_MESSAGE_DELTA, data: t.StreamEventData): void => {
63
+ handle: (
64
+ event: GraphEvents.ON_MESSAGE_DELTA,
65
+ data: t.StreamEventData
66
+ ): void => {
48
67
  console.log('====== ON_MESSAGE_DELTA ======');
49
68
  console.dir(data, { depth: null });
50
69
  aggregateContent({ event, data: data as t.MessageDeltaEvent });
51
- }
70
+ },
52
71
  },
53
72
  [GraphEvents.ON_REASONING_DELTA]: {
54
- handle: (event: GraphEvents.ON_REASONING_DELTA, data: t.StreamEventData): void => {
73
+ handle: (
74
+ event: GraphEvents.ON_REASONING_DELTA,
75
+ data: t.StreamEventData
76
+ ): void => {
55
77
  console.log('====== ON_REASONING_DELTA ======');
56
78
  console.dir(data, { depth: null });
57
79
  aggregateContent({ event, data: data as t.ReasoningDeltaEvent });
58
- }
80
+ },
59
81
  },
60
82
  [GraphEvents.TOOL_START]: {
61
- handle: (_event: string, data: t.StreamEventData, metadata?: Record<string, unknown>): void => {
83
+ handle: (
84
+ _event: string,
85
+ data: t.StreamEventData,
86
+ metadata?: Record<string, unknown>
87
+ ): void => {
62
88
  console.log('====== TOOL_START ======');
63
89
  // console.dir(data, { depth: null });
64
- }
90
+ },
65
91
  },
66
92
  };
67
93
 
68
94
  const llmConfig = getLLMConfig(provider);
95
+ if (provider === Providers.ANTHROPIC) {
96
+ (llmConfig as t.AnthropicClientOptions).clientOptions = {
97
+ defaultHeaders: {
98
+ 'anthropic-beta':
99
+ 'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
100
+ },
101
+ };
102
+ }
69
103
 
70
104
  const run = await Run.create<t.IState>({
71
105
  runId: 'test-run-id',
@@ -74,7 +108,8 @@ async function testStandardStreaming(): Promise<void> {
74
108
  llmConfig,
75
109
  // tools: [new TavilySearchResults()],
76
110
  reasoningKey: 'reasoning',
77
- instructions: 'You are a friendly AI assistant. Always address the user by their name.',
111
+ instructions:
112
+ 'You are a friendly AI assistant. Always address the user by their name.',
78
113
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
79
114
  },
80
115
  returnContent: true,
@@ -107,18 +142,26 @@ async function testStandardStreaming(): Promise<void> {
107
142
  // console.dir(finalContentParts, { depth: null });
108
143
  console.log('\n\n====================\n\n');
109
144
  console.dir(contentParts, { depth: null });
110
- // const { handleLLMEnd, collected } = createMetadataAggregator();
111
- // const titleResult = await run.generateTitle({
112
- // inputText: userMessage,
113
- // contentParts,
114
- // chainOptions: {
115
- // callbacks: [{
116
- // handleLLMEnd,
117
- // }],
118
- // },
119
- // });
120
- // console.log('Generated Title:', titleResult);
121
- // console.log('Collected metadata:', collected);
145
+ const { handleLLMEnd, collected } = createMetadataAggregator();
146
+ const titleOptions: t.RunTitleOptions = {
147
+ inputText: userMessage,
148
+ contentParts,
149
+ chainOptions: {
150
+ callbacks: [
151
+ {
152
+ handleLLMEnd,
153
+ },
154
+ ],
155
+ },
156
+ };
157
+ if (provider === Providers.ANTHROPIC) {
158
+ titleOptions.clientOptions = {
159
+ model: 'claude-3-5-haiku-latest',
160
+ };
161
+ }
162
+ const titleResult = await run.generateTitle(titleOptions);
163
+ console.log('Generated Title:', titleResult);
164
+ console.log('Collected metadata:', collected);
122
165
  }
123
166
 
124
167
  process.on('unhandledRejection', (reason, promise) => {
package/src/types/run.ts CHANGED
@@ -2,8 +2,13 @@
2
2
  import type * as z from 'zod';
3
3
  import type { BaseMessage } from '@langchain/core/messages';
4
4
  import type { StructuredTool } from '@langchain/core/tools';
5
- import type { BaseCallbackHandler, CallbackHandlerMethods } from '@langchain/core/callbacks/base';
5
+ import type { RunnableConfig } from '@langchain/core/runnables';
6
+ import type {
7
+ BaseCallbackHandler,
8
+ CallbackHandlerMethods,
9
+ } from '@langchain/core/callbacks/base';
6
10
  import type * as graph from '@/graphs/Graph';
11
+ import type * as s from '@/types/stream';
7
12
  import type * as e from '@/common/enum';
8
13
  import type * as g from '@/types/graph';
9
14
  import type * as l from '@/types/llm';
@@ -19,6 +24,15 @@ export type BaseGraphConfig = {
19
24
  export type StandardGraphConfig = BaseGraphConfig &
20
25
  Omit<g.StandardGraphInput, 'provider' | 'clientOptions'>;
21
26
 
27
+ export type RunTitleOptions = {
28
+ inputText: string;
29
+ contentParts: (s.MessageContentComplex | undefined)[];
30
+ titlePrompt?: string;
31
+ skipLanguage?: boolean;
32
+ clientOptions?: l.ClientOptions;
33
+ chainOptions?: Partial<RunnableConfig> | undefined;
34
+ };
35
+
22
36
  export interface AgentStateChannels {
23
37
  messages: BaseMessage[];
24
38
  next: string;
@@ -48,12 +62,17 @@ export type TaskManagerGraphConfig = {
48
62
 
49
63
  export type RunConfig = {
50
64
  runId: string;
51
- graphConfig: StandardGraphConfig | CollaborativeGraphConfig | TaskManagerGraphConfig;
65
+ graphConfig:
66
+ | StandardGraphConfig
67
+ | CollaborativeGraphConfig
68
+ | TaskManagerGraphConfig;
52
69
  customHandlers?: Record<string, g.EventHandler>;
53
70
  returnContent?: boolean;
54
71
  };
55
72
 
56
- export type ProvidedCallbacks = (BaseCallbackHandler | CallbackHandlerMethods)[] | undefined;
73
+ export type ProvidedCallbacks =
74
+ | (BaseCallbackHandler | CallbackHandlerMethods)[]
75
+ | undefined;
57
76
 
58
77
  export type TokenCounter = (message: BaseMessage) => number;
59
78
  export type EventStreamOptions = {
@@ -63,4 +82,4 @@ export type EventStreamOptions = {
63
82
  maxContextTokens?: number;
64
83
  tokenCounter?: TokenCounter;
65
84
  indexTokenCountMap?: Record<string, number>;
66
- }
85
+ };