@librechat/agents 3.0.0-rc11 → 3.0.0-rc13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/dist/cjs/agents/AgentContext.cjs +6 -2
  2. package/dist/cjs/agents/AgentContext.cjs.map +1 -1
  3. package/dist/cjs/graphs/Graph.cjs +16 -1
  4. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  5. package/dist/cjs/instrumentation.cjs +21 -0
  6. package/dist/cjs/instrumentation.cjs.map +1 -0
  7. package/dist/cjs/llm/openai/index.cjs +0 -4
  8. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  9. package/dist/cjs/main.cjs +5 -1
  10. package/dist/cjs/main.cjs.map +1 -1
  11. package/dist/cjs/messages/cache.cjs +49 -0
  12. package/dist/cjs/messages/cache.cjs.map +1 -0
  13. package/dist/cjs/messages/content.cjs +53 -0
  14. package/dist/cjs/messages/content.cjs.map +1 -0
  15. package/dist/cjs/messages/format.cjs +0 -27
  16. package/dist/cjs/messages/format.cjs.map +1 -1
  17. package/dist/cjs/run.cjs +57 -5
  18. package/dist/cjs/run.cjs.map +1 -1
  19. package/dist/cjs/stream.cjs +7 -0
  20. package/dist/cjs/stream.cjs.map +1 -1
  21. package/dist/cjs/utils/misc.cjs +8 -1
  22. package/dist/cjs/utils/misc.cjs.map +1 -1
  23. package/dist/cjs/utils/title.cjs +54 -25
  24. package/dist/cjs/utils/title.cjs.map +1 -1
  25. package/dist/esm/agents/AgentContext.mjs +6 -2
  26. package/dist/esm/agents/AgentContext.mjs.map +1 -1
  27. package/dist/esm/graphs/Graph.mjs +16 -1
  28. package/dist/esm/graphs/Graph.mjs.map +1 -1
  29. package/dist/esm/instrumentation.mjs +19 -0
  30. package/dist/esm/instrumentation.mjs.map +1 -0
  31. package/dist/esm/llm/openai/index.mjs +0 -4
  32. package/dist/esm/llm/openai/index.mjs.map +1 -1
  33. package/dist/esm/main.mjs +4 -2
  34. package/dist/esm/main.mjs.map +1 -1
  35. package/dist/esm/messages/cache.mjs +47 -0
  36. package/dist/esm/messages/cache.mjs.map +1 -0
  37. package/dist/esm/messages/content.mjs +51 -0
  38. package/dist/esm/messages/content.mjs.map +1 -0
  39. package/dist/esm/messages/format.mjs +1 -27
  40. package/dist/esm/messages/format.mjs.map +1 -1
  41. package/dist/esm/run.mjs +57 -5
  42. package/dist/esm/run.mjs.map +1 -1
  43. package/dist/esm/stream.mjs +7 -0
  44. package/dist/esm/stream.mjs.map +1 -1
  45. package/dist/esm/utils/misc.mjs +8 -2
  46. package/dist/esm/utils/misc.mjs.map +1 -1
  47. package/dist/esm/utils/title.mjs +54 -25
  48. package/dist/esm/utils/title.mjs.map +1 -1
  49. package/dist/types/agents/AgentContext.d.ts +4 -1
  50. package/dist/types/instrumentation.d.ts +1 -0
  51. package/dist/types/messages/cache.d.ts +8 -0
  52. package/dist/types/messages/content.d.ts +7 -0
  53. package/dist/types/messages/format.d.ts +1 -7
  54. package/dist/types/messages/index.d.ts +2 -0
  55. package/dist/types/run.d.ts +2 -1
  56. package/dist/types/types/graph.d.ts +2 -0
  57. package/dist/types/types/messages.d.ts +4 -0
  58. package/dist/types/utils/misc.d.ts +1 -0
  59. package/package.json +5 -1
  60. package/src/agents/AgentContext.ts +8 -0
  61. package/src/graphs/Graph.ts +23 -1
  62. package/src/instrumentation.ts +22 -0
  63. package/src/llm/anthropic/llm.spec.ts +1 -1
  64. package/src/llm/openai/index.ts +0 -5
  65. package/src/messages/cache.test.ts +262 -0
  66. package/src/messages/cache.ts +56 -0
  67. package/src/messages/content.test.ts +362 -0
  68. package/src/messages/content.ts +63 -0
  69. package/src/messages/format.ts +0 -34
  70. package/src/messages/index.ts +2 -0
  71. package/src/run.ts +82 -10
  72. package/src/scripts/ant_web_search.ts +1 -1
  73. package/src/scripts/handoff-test.ts +1 -1
  74. package/src/scripts/multi-agent-chain.ts +4 -4
  75. package/src/scripts/multi-agent-conditional.ts +4 -4
  76. package/src/scripts/multi-agent-document-review-chain.ts +4 -4
  77. package/src/scripts/multi-agent-parallel.ts +10 -8
  78. package/src/scripts/multi-agent-sequence.ts +3 -3
  79. package/src/scripts/multi-agent-supervisor.ts +5 -3
  80. package/src/scripts/multi-agent-test.ts +2 -2
  81. package/src/scripts/simple.ts +8 -0
  82. package/src/scripts/test-custom-prompt-key.ts +4 -4
  83. package/src/scripts/test-handoff-input.ts +3 -3
  84. package/src/scripts/test-multi-agent-list-handoff.ts +2 -2
  85. package/src/stream.ts +9 -2
  86. package/src/types/graph.ts +2 -0
  87. package/src/types/messages.ts +4 -0
  88. package/src/utils/misc.ts +33 -21
  89. package/src/utils/title.ts +80 -40
@@ -0,0 +1,362 @@
1
+ import {
2
+ HumanMessage,
3
+ AIMessage,
4
+ SystemMessage,
5
+ } from '@langchain/core/messages';
6
+ import { formatContentStrings } from './content';
7
+ import { ContentTypes } from '@/common';
8
+
9
+ describe('formatContentStrings', () => {
10
+ describe('Human messages', () => {
11
+ it('should convert human message with all text blocks to string', () => {
12
+ const messages = [
13
+ new HumanMessage({
14
+ content: [
15
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
16
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
17
+ ],
18
+ }),
19
+ ];
20
+
21
+ const result = formatContentStrings(messages);
22
+
23
+ expect(result).toHaveLength(1);
24
+ expect(result[0].content).toBe('Hello\nWorld');
25
+ });
26
+
27
+ it('should not convert human message with mixed content types (text + image)', () => {
28
+ const messages = [
29
+ new HumanMessage({
30
+ content: [
31
+ { type: ContentTypes.TEXT, text: 'what do you see' },
32
+ {
33
+ type: 'image_url',
34
+ image_url: {
35
+ url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
36
+ detail: 'auto',
37
+ },
38
+ },
39
+ ],
40
+ }),
41
+ ];
42
+
43
+ const result = formatContentStrings(messages);
44
+
45
+ expect(result).toHaveLength(1);
46
+ expect(result[0].content).toEqual([
47
+ { type: ContentTypes.TEXT, text: 'what do you see' },
48
+ {
49
+ type: 'image_url',
50
+ image_url: {
51
+ url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
52
+ detail: 'auto',
53
+ },
54
+ },
55
+ ]);
56
+ });
57
+
58
+ it('should leave string content unchanged', () => {
59
+ const messages = [
60
+ new HumanMessage({
61
+ content: 'Hello World',
62
+ }),
63
+ ];
64
+
65
+ const result = formatContentStrings(messages);
66
+
67
+ expect(result).toHaveLength(1);
68
+ expect(result[0].content).toBe('Hello World');
69
+ });
70
+
71
+ it('should handle empty text blocks', () => {
72
+ const messages = [
73
+ new HumanMessage({
74
+ content: [
75
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
76
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: '' },
77
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
78
+ ],
79
+ }),
80
+ ];
81
+
82
+ const result = formatContentStrings(messages);
83
+
84
+ expect(result).toHaveLength(1);
85
+ expect(result[0].content).toBe('Hello\n\nWorld');
86
+ });
87
+
88
+ it('should handle null/undefined text values', () => {
89
+ const messages = [
90
+ new HumanMessage({
91
+ content: [
92
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
93
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: null },
94
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: undefined },
95
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
96
+ ],
97
+ }),
98
+ ];
99
+
100
+ const result = formatContentStrings(messages);
101
+
102
+ expect(result).toHaveLength(1);
103
+ expect(result[0].content).toBe('Hello\n\n\nWorld');
104
+ });
105
+ });
106
+
107
+ describe('AI messages', () => {
108
+ it('should convert AI message with all text blocks to string', () => {
109
+ const messages = [
110
+ new AIMessage({
111
+ content: [
112
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
113
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
114
+ ],
115
+ }),
116
+ ];
117
+
118
+ const result = formatContentStrings(messages);
119
+
120
+ expect(result).toHaveLength(1);
121
+ expect(result[0].content).toBe('Hello\nWorld');
122
+ expect(result[0].getType()).toBe('ai');
123
+ });
124
+
125
+ it('should not convert AI message with mixed content types', () => {
126
+ const messages = [
127
+ new AIMessage({
128
+ content: [
129
+ {
130
+ type: ContentTypes.TEXT,
131
+ [ContentTypes.TEXT]: 'Here is an image',
132
+ },
133
+ {
134
+ type: ContentTypes.TOOL_CALL,
135
+ tool_call: { name: 'generate_image' },
136
+ },
137
+ ],
138
+ }),
139
+ ];
140
+
141
+ const result = formatContentStrings(messages);
142
+
143
+ expect(result).toHaveLength(1);
144
+ expect(result[0].content).toEqual([
145
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here is an image' },
146
+ { type: ContentTypes.TOOL_CALL, tool_call: { name: 'generate_image' } },
147
+ ]);
148
+ });
149
+ });
150
+
151
+ describe('System messages', () => {
152
+ it('should convert System message with all text blocks to string', () => {
153
+ const messages = [
154
+ new SystemMessage({
155
+ content: [
156
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'System' },
157
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Message' },
158
+ ],
159
+ }),
160
+ ];
161
+
162
+ const result = formatContentStrings(messages);
163
+
164
+ expect(result).toHaveLength(1);
165
+ expect(result[0].content).toBe('System\nMessage');
166
+ expect(result[0].getType()).toBe('system');
167
+ });
168
+ });
169
+
170
+ describe('Mixed message types', () => {
171
+ it('should process all valid message types in mixed array', () => {
172
+ const messages = [
173
+ new HumanMessage({
174
+ content: [
175
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Human' },
176
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Message' },
177
+ ],
178
+ }),
179
+ new AIMessage({
180
+ content: [
181
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'AI' },
182
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Response' },
183
+ ],
184
+ }),
185
+ new SystemMessage({
186
+ content: [
187
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'System' },
188
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Prompt' },
189
+ ],
190
+ }),
191
+ ];
192
+
193
+ const result = formatContentStrings(messages);
194
+
195
+ expect(result).toHaveLength(3);
196
+ // All messages should be converted
197
+ expect(result[0].content).toBe('Human\nMessage');
198
+ expect(result[0].getType()).toBe('human');
199
+
200
+ expect(result[1].content).toBe('AI\nResponse');
201
+ expect(result[1].getType()).toBe('ai');
202
+
203
+ expect(result[2].content).toBe('System\nPrompt');
204
+ expect(result[2].getType()).toBe('system');
205
+ });
206
+ });
207
+
208
+ describe('Edge cases', () => {
209
+ it('should handle empty array', () => {
210
+ const result = formatContentStrings([]);
211
+ expect(result).toEqual([]);
212
+ });
213
+
214
+ it('should handle messages with non-array content', () => {
215
+ const messages = [
216
+ new HumanMessage({
217
+ content: 'This is a string content',
218
+ }),
219
+ ];
220
+
221
+ const result = formatContentStrings(messages);
222
+
223
+ expect(result).toHaveLength(1);
224
+ expect(result[0].content).toBe('This is a string content');
225
+ });
226
+
227
+ it('should trim the final concatenated string', () => {
228
+ const messages = [
229
+ new HumanMessage({
230
+ content: [
231
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: ' Hello ' },
232
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: ' World ' },
233
+ ],
234
+ }),
235
+ ];
236
+
237
+ const result = formatContentStrings(messages);
238
+
239
+ expect(result).toHaveLength(1);
240
+ expect(result[0].content).toBe('Hello \n World');
241
+ });
242
+ });
243
+
244
+ describe('Real-world scenarios', () => {
245
+ it('should handle the exact scenario from the issue', () => {
246
+ const messages = [
247
+ new HumanMessage({
248
+ content: [
249
+ {
250
+ type: 'text',
251
+ text: 'hi there',
252
+ },
253
+ ],
254
+ }),
255
+ new AIMessage({
256
+ content: [
257
+ {
258
+ type: 'text',
259
+ text: 'Hi Danny! How can I help you today?',
260
+ },
261
+ ],
262
+ }),
263
+ new HumanMessage({
264
+ content: [
265
+ {
266
+ type: 'text',
267
+ text: 'what do you see',
268
+ },
269
+ {
270
+ type: 'image_url',
271
+ image_url: {
272
+ url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
273
+ detail: 'auto',
274
+ },
275
+ },
276
+ ],
277
+ }),
278
+ ];
279
+
280
+ const result = formatContentStrings(messages);
281
+
282
+ expect(result).toHaveLength(3);
283
+
284
+ // First human message (all text) should be converted
285
+ expect(result[0].content).toBe('hi there');
286
+ expect(result[0].getType()).toBe('human');
287
+
288
+ // AI message (all text) should now also be converted
289
+ expect(result[1].content).toBe('Hi Danny! How can I help you today?');
290
+ expect(result[1].getType()).toBe('ai');
291
+
292
+ // Third message (mixed content) should remain unchanged
293
+ expect(result[2].content).toEqual([
294
+ {
295
+ type: 'text',
296
+ text: 'what do you see',
297
+ },
298
+ {
299
+ type: 'image_url',
300
+ image_url: {
301
+ url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
302
+ detail: 'auto',
303
+ },
304
+ },
305
+ ]);
306
+ });
307
+
308
+ it('should handle messages with tool calls', () => {
309
+ const messages = [
310
+ new HumanMessage({
311
+ content: [
312
+ {
313
+ type: ContentTypes.TEXT,
314
+ [ContentTypes.TEXT]: 'Please use the calculator',
315
+ },
316
+ {
317
+ type: ContentTypes.TOOL_CALL,
318
+ tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
319
+ },
320
+ ],
321
+ }),
322
+ new AIMessage({
323
+ content: [
324
+ {
325
+ type: ContentTypes.TEXT,
326
+ [ContentTypes.TEXT]: 'I will calculate that for you',
327
+ },
328
+ {
329
+ type: ContentTypes.TOOL_CALL,
330
+ tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
331
+ },
332
+ ],
333
+ }),
334
+ ];
335
+
336
+ const result = formatContentStrings(messages);
337
+
338
+ expect(result).toHaveLength(2);
339
+ // Should not convert because not all blocks are text
340
+ expect(result[0].content).toEqual([
341
+ {
342
+ type: ContentTypes.TEXT,
343
+ [ContentTypes.TEXT]: 'Please use the calculator',
344
+ },
345
+ {
346
+ type: ContentTypes.TOOL_CALL,
347
+ tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
348
+ },
349
+ ]);
350
+ expect(result[1].content).toEqual([
351
+ {
352
+ type: ContentTypes.TEXT,
353
+ [ContentTypes.TEXT]: 'I will calculate that for you',
354
+ },
355
+ {
356
+ type: ContentTypes.TOOL_CALL,
357
+ tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
358
+ },
359
+ ]);
360
+ });
361
+ });
362
+ });
@@ -0,0 +1,63 @@
1
+ import { ContentTypes } from '@/common';
2
+ import type { BaseMessage } from '@langchain/core/messages';
3
+
4
+ /**
5
+ * Formats an array of messages for LangChain, making sure all content fields are strings
6
+ * @param {Array<HumanMessage | AIMessage | SystemMessage | ToolMessage>} payload - The array of messages to format.
7
+ * @returns {Array<HumanMessage | AIMessage | SystemMessage | ToolMessage>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
8
+ */
9
+ export const formatContentStrings = (
10
+ payload: Array<BaseMessage>
11
+ ): Array<BaseMessage> => {
12
+ // Create a new array to store the processed messages
13
+ const result: Array<BaseMessage> = [];
14
+
15
+ for (const message of payload) {
16
+ const messageType = message.getType();
17
+ const isValidMessage =
18
+ messageType === 'human' ||
19
+ messageType === 'ai' ||
20
+ messageType === 'system';
21
+
22
+ if (!isValidMessage) {
23
+ result.push(message);
24
+ continue;
25
+ }
26
+
27
+ // If content is already a string, add as-is
28
+ if (typeof message.content === 'string') {
29
+ result.push(message);
30
+ continue;
31
+ }
32
+
33
+ // If content is not an array, add as-is
34
+ if (!Array.isArray(message.content)) {
35
+ result.push(message);
36
+ continue;
37
+ }
38
+
39
+ // Check if all content blocks are text type
40
+ const allTextBlocks = message.content.every(
41
+ (block) => block.type === ContentTypes.TEXT
42
+ );
43
+
44
+ // Only convert to string if all blocks are text type
45
+ if (!allTextBlocks) {
46
+ result.push(message);
47
+ continue;
48
+ }
49
+
50
+ // Reduce text types to a single string
51
+ const content = message.content.reduce((acc, curr) => {
52
+ if (curr.type === ContentTypes.TEXT) {
53
+ return `${acc}${curr[ContentTypes.TEXT] || ''}\n`;
54
+ }
55
+ return acc;
56
+ }, '');
57
+
58
+ message.content = content.trim();
59
+ result.push(message);
60
+ }
61
+
62
+ return result;
63
+ };
@@ -565,40 +565,6 @@ export const formatAgentMessages = (
565
565
  };
566
566
  };
567
567
 
568
- /**
569
- * Formats an array of messages for LangChain, making sure all content fields are strings
570
- * @param payload - The array of messages to format.
571
- * @returns - The array of formatted LangChain messages, including ToolMessages for tool calls.
572
- */
573
- export const formatContentStrings = (
574
- payload: Array<BaseMessage>
575
- ): Array<BaseMessage> => {
576
- // Create a copy of the payload to avoid modifying the original
577
- const result = [...payload];
578
-
579
- for (const message of result) {
580
- if (typeof message.content === 'string') {
581
- continue;
582
- }
583
-
584
- if (!Array.isArray(message.content)) {
585
- continue;
586
- }
587
-
588
- // Reduce text types to a single string, ignore all other types
589
- const content = message.content.reduce((acc, curr) => {
590
- if (curr.type === ContentTypes.TEXT) {
591
- return `${acc}${curr[ContentTypes.TEXT] || ''}\n`;
592
- }
593
- return acc;
594
- }, '');
595
-
596
- message.content = content.trim();
597
- }
598
-
599
- return result;
600
- };
601
-
602
568
  /**
603
569
  * Adds a value at key 0 for system messages and shifts all key indices by one in an indexTokenCountMap.
604
570
  * This is useful when adding a system message at the beginning of a conversation.
@@ -2,3 +2,5 @@ export * from './core';
2
2
  export * from './ids';
3
3
  export * from './prune';
4
4
  export * from './format';
5
+ export * from './cache';
6
+ export * from './content';
package/src/run.ts CHANGED
@@ -1,10 +1,14 @@
1
1
  // src/run.ts
2
+ import './instrumentation';
3
+ import { CallbackHandler } from '@langfuse/langchain';
2
4
  import { PromptTemplate } from '@langchain/core/prompts';
5
+ import { RunnableLambda } from '@langchain/core/runnables';
3
6
  import { AzureChatOpenAI, ChatOpenAI } from '@langchain/openai';
4
7
  import type {
5
- BaseMessage,
6
8
  MessageContentComplex,
9
+ BaseMessage,
7
10
  } from '@langchain/core/messages';
11
+ import type { StringPromptValue } from '@langchain/core/prompt_values';
8
12
  import type { RunnableConfig } from '@langchain/core/runnables';
9
13
  import type * as t from '@/types';
10
14
  import {
@@ -17,6 +21,7 @@ import { createTokenCounter } from '@/utils/tokens';
17
21
  import { StandardGraph } from '@/graphs/Graph';
18
22
  import { HandlerRegistry } from '@/events';
19
23
  import { isOpenAILike } from '@/utils/llm';
24
+ import { isPresent } from '@/utils/misc';
20
25
 
21
26
  export const defaultOmitOptions = new Set([
22
27
  'stream',
@@ -239,6 +244,27 @@ export class Run<_T extends t.BaseGraphState> {
239
244
  [Callback.CUSTOM_EVENT]: customEventCallback,
240
245
  });
241
246
 
247
+ if (
248
+ isPresent(process.env.LANGFUSE_SECRET_KEY) &&
249
+ isPresent(process.env.LANGFUSE_PUBLIC_KEY) &&
250
+ isPresent(process.env.LANGFUSE_BASE_URL)
251
+ ) {
252
+ const userId = config.configurable?.user_id;
253
+ const sessionId = config.configurable?.thread_id;
254
+ const traceMetadata = {
255
+ messageId: this.id,
256
+ parentMessageId: config.configurable?.requestBody?.parentMessageId,
257
+ };
258
+ const handler = new CallbackHandler({
259
+ userId,
260
+ sessionId,
261
+ traceMetadata,
262
+ });
263
+ config.callbacks = (
264
+ (config.callbacks as t.ProvidedCallbacks) ?? []
265
+ ).concat([handler]);
266
+ }
267
+
242
268
  if (!this.id) {
243
269
  throw new Error('Run ID not provided');
244
270
  }
@@ -313,18 +339,38 @@ export class Run<_T extends t.BaseGraphState> {
313
339
  titleMethod = TitleMethod.COMPLETION,
314
340
  titlePromptTemplate,
315
341
  }: t.RunTitleOptions): Promise<{ language?: string; title?: string }> {
342
+ if (
343
+ chainOptions != null &&
344
+ isPresent(process.env.LANGFUSE_SECRET_KEY) &&
345
+ isPresent(process.env.LANGFUSE_PUBLIC_KEY) &&
346
+ isPresent(process.env.LANGFUSE_BASE_URL)
347
+ ) {
348
+ const userId = chainOptions.configurable?.user_id;
349
+ const sessionId = chainOptions.configurable?.thread_id;
350
+ const traceMetadata = {
351
+ messageId: 'title-' + this.id,
352
+ };
353
+ const handler = new CallbackHandler({
354
+ userId,
355
+ sessionId,
356
+ traceMetadata,
357
+ });
358
+ chainOptions.callbacks = (
359
+ (chainOptions.callbacks as t.ProvidedCallbacks) ?? []
360
+ ).concat([handler]);
361
+ }
362
+
316
363
  const convoTemplate = PromptTemplate.fromTemplate(
317
364
  titlePromptTemplate ?? 'User: {input}\nAI: {output}'
318
365
  );
366
+
319
367
  const response = contentParts
320
368
  .map((part) => {
321
369
  if (part?.type === 'text') return part.text;
322
370
  return '';
323
371
  })
324
372
  .join('\n');
325
- const convo = (
326
- await convoTemplate.invoke({ input: inputText, output: response })
327
- ).value;
373
+
328
374
  const model = this.Graph?.getNewModel({
329
375
  provider,
330
376
  clientOptions,
@@ -349,25 +395,51 @@ export class Run<_T extends t.BaseGraphState> {
349
395
  model.n = (clientOptions as t.OpenAIClientOptions | undefined)
350
396
  ?.n as number;
351
397
  }
352
- const chain =
398
+
399
+ const convoToTitleInput = new RunnableLambda({
400
+ func: (
401
+ promptValue: StringPromptValue
402
+ ): { convo: string; inputText: string; skipLanguage?: boolean } => ({
403
+ convo: promptValue.value,
404
+ inputText,
405
+ skipLanguage,
406
+ }),
407
+ }).withConfig({ runName: 'ConvoTransform' });
408
+
409
+ const titleChain =
353
410
  titleMethod === TitleMethod.COMPLETION
354
411
  ? await createCompletionTitleRunnable(model, titlePrompt)
355
412
  : await createTitleRunnable(model, titlePrompt);
413
+
414
+ /** Pipes `convoTemplate` -> `transformer` -> `titleChain` */
415
+ const fullChain = convoTemplate
416
+ .withConfig({ runName: 'ConvoTemplate' })
417
+ .pipe(convoToTitleInput)
418
+ .pipe(titleChain)
419
+ .withConfig({ runName: 'TitleChain' });
420
+
356
421
  const invokeConfig = Object.assign({}, chainOptions, {
357
422
  run_id: this.id,
358
423
  runId: this.id,
359
424
  });
425
+
360
426
  try {
361
- return await chain.invoke(
362
- { convo, inputText, skipLanguage },
427
+ return await fullChain.invoke(
428
+ { input: inputText, output: response },
363
429
  invokeConfig
364
430
  );
365
431
  } catch (_e) {
366
432
  // Fallback: strip callbacks to avoid EventStream tracer errors in certain environments
433
+ // But preserve langfuse handler if it exists
434
+ const langfuseHandler = (invokeConfig.callbacks as t.ProvidedCallbacks)?.find(
435
+ (cb) => cb instanceof CallbackHandler
436
+ );
367
437
  const { callbacks: _cb, ...rest } = invokeConfig;
368
- const safeConfig = Object.assign({}, rest, { callbacks: [] });
369
- return await chain.invoke(
370
- { convo, inputText, skipLanguage },
438
+ const safeConfig = Object.assign({}, rest, {
439
+ callbacks: langfuseHandler ? [langfuseHandler] : [],
440
+ });
441
+ return await fullChain.invoke(
442
+ { input: inputText, output: response },
371
443
  safeConfig as Partial<RunnableConfig>
372
444
  );
373
445
  }
@@ -80,7 +80,7 @@ async function testStandardStreaming(): Promise<void> {
80
80
  const llmConfig = getLLMConfig(
81
81
  Providers.ANTHROPIC
82
82
  ) as t.AnthropicClientOptions & t.SharedLLMConfig;
83
- llmConfig.model = 'claude-3-5-sonnet-latest';
83
+ llmConfig.model = 'claude-haiku-4-5';
84
84
 
85
85
  const run = await Run.create<t.IState>({
86
86
  runId: 'test-run-id',
@@ -92,7 +92,7 @@ const transferToFlightAssistant = createHandoffTool({
92
92
  });
93
93
 
94
94
  const llm = new ChatAnthropic({
95
- modelName: 'claude-3-5-sonnet-latest',
95
+ modelName: 'claude-haiku-4-5',
96
96
  apiKey: process.env.ANTHROPIC_API_KEY,
97
97
  });
98
98
 
@@ -71,7 +71,7 @@ async function testSequentialAgentChain() {
71
71
  agentId: 'researcher',
72
72
  provider: Providers.ANTHROPIC,
73
73
  clientOptions: {
74
- modelName: 'claude-3-5-sonnet-latest',
74
+ modelName: 'claude-haiku-4-5',
75
75
  apiKey: process.env.ANTHROPIC_API_KEY,
76
76
  },
77
77
  instructions: `You are a Research Agent specializing in gathering initial information.
@@ -88,7 +88,7 @@ async function testSequentialAgentChain() {
88
88
  agentId: 'analyst',
89
89
  provider: Providers.ANTHROPIC,
90
90
  clientOptions: {
91
- modelName: 'claude-3-5-sonnet-latest',
91
+ modelName: 'claude-haiku-4-5',
92
92
  apiKey: process.env.ANTHROPIC_API_KEY,
93
93
  },
94
94
  instructions: `You are an Analysis Agent that builds upon research findings.
@@ -105,7 +105,7 @@ async function testSequentialAgentChain() {
105
105
  agentId: 'reviewer',
106
106
  provider: Providers.ANTHROPIC,
107
107
  clientOptions: {
108
- modelName: 'claude-3-5-sonnet-latest',
108
+ modelName: 'claude-haiku-4-5',
109
109
  apiKey: process.env.ANTHROPIC_API_KEY,
110
110
  },
111
111
  instructions: `You are a Critical Review Agent that evaluates the work done so far.
@@ -122,7 +122,7 @@ async function testSequentialAgentChain() {
122
122
  agentId: 'summarizer',
123
123
  provider: Providers.ANTHROPIC,
124
124
  clientOptions: {
125
- modelName: 'claude-3-5-sonnet-latest',
125
+ modelName: 'claude-haiku-4-5',
126
126
  apiKey: process.env.ANTHROPIC_API_KEY,
127
127
  },
128
128
  instructions: `You are a Summary Agent that creates the final comprehensive output.