@librechat/agents 2.4.45 → 2.4.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,617 @@
1
+ 'use strict';
2
+
3
+ var messages = require('@langchain/core/messages');
4
+ var outputs = require('@langchain/core/outputs');
5
+ var openai_tools = require('@langchain/core/output_parsers/openai_tools');
6
+
7
+ function extractGenericMessageCustomRole(message) {
8
+ if (message.role !== 'system' &&
9
+ message.role !== 'developer' &&
10
+ message.role !== 'assistant' &&
11
+ message.role !== 'user' &&
12
+ message.role !== 'function' &&
13
+ message.role !== 'tool') {
14
+ console.warn(`Unknown message role: ${message.role}`);
15
+ }
16
+ return message.role;
17
+ }
18
+ function messageToOpenAIRole(message) {
19
+ const type = message._getType();
20
+ switch (type) {
21
+ case 'system':
22
+ return 'system';
23
+ case 'ai':
24
+ return 'assistant';
25
+ case 'human':
26
+ return 'user';
27
+ case 'function':
28
+ return 'function';
29
+ case 'tool':
30
+ return 'tool';
31
+ case 'generic': {
32
+ if (!messages.ChatMessage.isInstance(message))
33
+ throw new Error('Invalid generic chat message');
34
+ return extractGenericMessageCustomRole(message);
35
+ }
36
+ default:
37
+ throw new Error(`Unknown message type: ${type}`);
38
+ }
39
+ }
40
+ const completionsApiContentBlockConverter = {
41
+ providerName: 'ChatOpenAI',
42
+ fromStandardTextBlock(block) {
43
+ return { type: 'text', text: block.text };
44
+ },
45
+ fromStandardImageBlock(block) {
46
+ if (block.source_type === 'url') {
47
+ return {
48
+ type: 'image_url',
49
+ image_url: {
50
+ url: block.url,
51
+ ...(block.metadata?.detail
52
+ ? { detail: block.metadata.detail }
53
+ : {}),
54
+ },
55
+ };
56
+ }
57
+ if (block.source_type === 'base64') {
58
+ const url = `data:${block.mime_type ?? ''};base64,${block.data}`;
59
+ return {
60
+ type: 'image_url',
61
+ image_url: {
62
+ url,
63
+ ...(block.metadata?.detail
64
+ ? { detail: block.metadata.detail }
65
+ : {}),
66
+ },
67
+ };
68
+ }
69
+ throw new Error(`Image content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
70
+ },
71
+ fromStandardAudioBlock(block) {
72
+ if (block.source_type === 'url') {
73
+ const data = messages.parseBase64DataUrl({ dataUrl: block.url });
74
+ if (!data) {
75
+ throw new Error(`URL audio blocks with source_type ${block.source_type} must be formatted as a data URL for ChatOpenAI`);
76
+ }
77
+ const rawMimeType = data.mime_type || block.mime_type || '';
78
+ let mimeType;
79
+ try {
80
+ mimeType = messages.parseMimeType(rawMimeType);
81
+ }
82
+ catch {
83
+ throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
84
+ }
85
+ if (mimeType.type !== 'audio' ||
86
+ (mimeType.subtype !== 'wav' && mimeType.subtype !== 'mp3')) {
87
+ throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
88
+ }
89
+ return {
90
+ type: 'input_audio',
91
+ input_audio: {
92
+ format: mimeType.subtype,
93
+ data: data.data,
94
+ },
95
+ };
96
+ }
97
+ if (block.source_type === 'base64') {
98
+ let mimeType;
99
+ try {
100
+ mimeType = messages.parseMimeType(block.mime_type ?? '');
101
+ }
102
+ catch {
103
+ throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
104
+ }
105
+ if (mimeType.type !== 'audio' ||
106
+ (mimeType.subtype !== 'wav' && mimeType.subtype !== 'mp3')) {
107
+ throw new Error(`Audio blocks with source_type ${block.source_type} must have mime type of audio/wav or audio/mp3`);
108
+ }
109
+ return {
110
+ type: 'input_audio',
111
+ input_audio: {
112
+ format: mimeType.subtype,
113
+ data: block.data,
114
+ },
115
+ };
116
+ }
117
+ throw new Error(`Audio content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
118
+ },
119
+ fromStandardFileBlock(block) {
120
+ if (block.source_type === 'url') {
121
+ const data = messages.parseBase64DataUrl({ dataUrl: block.url });
122
+ if (!data) {
123
+ throw new Error(`URL file blocks with source_type ${block.source_type} must be formatted as a data URL for ChatOpenAI`);
124
+ }
125
+ return {
126
+ type: 'file',
127
+ file: {
128
+ file_data: block.url, // formatted as base64 data URL
129
+ ...(block.metadata?.filename || block.metadata?.name
130
+ ? {
131
+ filename: (block.metadata.filename ||
132
+ block.metadata.name),
133
+ }
134
+ : {}),
135
+ },
136
+ };
137
+ }
138
+ if (block.source_type === 'base64') {
139
+ return {
140
+ type: 'file',
141
+ file: {
142
+ file_data: `data:${block.mime_type ?? ''};base64,${block.data}`,
143
+ ...(block.metadata?.filename ||
144
+ block.metadata?.name ||
145
+ block.metadata?.title
146
+ ? {
147
+ filename: (block.metadata.filename ||
148
+ block.metadata.name ||
149
+ block.metadata.title),
150
+ }
151
+ : {}),
152
+ },
153
+ };
154
+ }
155
+ if (block.source_type === 'id') {
156
+ return {
157
+ type: 'file',
158
+ file: {
159
+ file_id: block.id,
160
+ },
161
+ };
162
+ }
163
+ throw new Error(`File content blocks with source_type ${block.source_type} are not supported for ChatOpenAI`);
164
+ },
165
+ };
166
+ const _FUNCTION_CALL_IDS_MAP_KEY = '__openai_function_call_ids__';
167
+ function _convertReasoningSummaryToOpenAIResponsesParams(reasoning) {
168
+ // combine summary parts that have the the same index and then remove the indexes
169
+ const summary = (reasoning.summary.length > 1
170
+ ? reasoning.summary.reduce((acc, curr) => {
171
+ const last = acc.at(-1);
172
+ if (last.index === curr.index) {
173
+ last.text += curr.text;
174
+ }
175
+ else {
176
+ acc.push(curr);
177
+ }
178
+ return acc;
179
+ }, [{ ...reasoning.summary[0] }])
180
+ : reasoning.summary).map((s) => Object.fromEntries(Object.entries(s).filter(([k]) => k !== 'index')));
181
+ return {
182
+ ...reasoning,
183
+ summary,
184
+ };
185
+ }
186
+ function _convertMessagesToOpenAIResponsesParams(messages$1, model, zdrEnabled) {
187
+ return messages$1.flatMap((lcMsg) => {
188
+ const additional_kwargs = lcMsg.additional_kwargs;
189
+ let role = messageToOpenAIRole(lcMsg);
190
+ if (role === 'system' && isReasoningModel(model))
191
+ role = 'developer';
192
+ if (role === 'function') {
193
+ throw new Error('Function messages are not supported in Responses API');
194
+ }
195
+ if (role === 'tool') {
196
+ const toolMessage = lcMsg;
197
+ // Handle computer call output
198
+ if (additional_kwargs.type === 'computer_call_output') {
199
+ const output = (() => {
200
+ if (typeof toolMessage.content === 'string') {
201
+ return {
202
+ type: 'computer_screenshot',
203
+ image_url: toolMessage.content,
204
+ };
205
+ }
206
+ if (Array.isArray(toolMessage.content)) {
207
+ const oaiScreenshot = toolMessage.content.find((i) => i.type === 'computer_screenshot');
208
+ if (oaiScreenshot)
209
+ return oaiScreenshot;
210
+ const lcImage = toolMessage.content.find((i) => i.type === 'image_url');
211
+ if (lcImage) {
212
+ return {
213
+ type: 'computer_screenshot',
214
+ image_url: typeof lcImage.image_url === 'string'
215
+ ? lcImage.image_url
216
+ : lcImage.image_url.url,
217
+ };
218
+ }
219
+ }
220
+ throw new Error('Invalid computer call output');
221
+ })();
222
+ return {
223
+ type: 'computer_call_output',
224
+ output,
225
+ call_id: toolMessage.tool_call_id,
226
+ };
227
+ }
228
+ return {
229
+ type: 'function_call_output',
230
+ call_id: toolMessage.tool_call_id,
231
+ id: toolMessage.id?.startsWith('fc_') ? toolMessage.id : undefined,
232
+ output: typeof toolMessage.content !== 'string'
233
+ ? JSON.stringify(toolMessage.content)
234
+ : toolMessage.content,
235
+ };
236
+ }
237
+ if (role === 'assistant') {
238
+ // if we have the original response items, just reuse them
239
+ if (!zdrEnabled &&
240
+ lcMsg.response_metadata.output != null &&
241
+ Array.isArray(lcMsg.response_metadata.output) &&
242
+ lcMsg.response_metadata.output.length > 0 &&
243
+ lcMsg.response_metadata.output.every((item) => 'type' in item)) {
244
+ return lcMsg.response_metadata.output;
245
+ }
246
+ // otherwise, try to reconstruct the response from what we have
247
+ const input = [];
248
+ // reasoning items
249
+ if (additional_kwargs.reasoning && !zdrEnabled) {
250
+ const reasoningItem = _convertReasoningSummaryToOpenAIResponsesParams(additional_kwargs.reasoning);
251
+ input.push(reasoningItem);
252
+ }
253
+ // ai content
254
+ let { content } = lcMsg;
255
+ if (additional_kwargs.refusal) {
256
+ if (typeof content === 'string') {
257
+ content = [{ type: 'output_text', text: content, annotations: [] }];
258
+ }
259
+ content = [
260
+ ...content,
261
+ { type: 'refusal', refusal: additional_kwargs.refusal },
262
+ ];
263
+ }
264
+ input.push({
265
+ type: 'message',
266
+ role: 'assistant',
267
+ ...(lcMsg.id && !zdrEnabled ? { id: lcMsg.id } : {}),
268
+ content: typeof content === 'string'
269
+ ? content
270
+ : content.flatMap((item) => {
271
+ if (item.type === 'text') {
272
+ return {
273
+ type: 'output_text',
274
+ text: item.text,
275
+ // @ts-expect-error TODO: add types for `annotations`
276
+ annotations: item.annotations ?? [],
277
+ };
278
+ }
279
+ if (item.type === 'output_text' || item.type === 'refusal') {
280
+ return item;
281
+ }
282
+ return [];
283
+ }),
284
+ });
285
+ const functionCallIds = additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY];
286
+ if (messages.isAIMessage(lcMsg) && !!lcMsg.tool_calls?.length) {
287
+ input.push(...lcMsg.tool_calls.map((toolCall) => ({
288
+ type: 'function_call',
289
+ name: toolCall.name,
290
+ arguments: JSON.stringify(toolCall.args),
291
+ call_id: toolCall.id,
292
+ ...(zdrEnabled ? { id: functionCallIds?.[toolCall.id] } : {}),
293
+ })));
294
+ }
295
+ else if (additional_kwargs.tool_calls) {
296
+ input.push(...additional_kwargs.tool_calls.map((toolCall) => ({
297
+ type: 'function_call',
298
+ name: toolCall.function.name,
299
+ call_id: toolCall.id,
300
+ arguments: toolCall.function.arguments,
301
+ ...(zdrEnabled ? { id: functionCallIds?.[toolCall.id] } : {}),
302
+ })));
303
+ }
304
+ const toolOutputs = lcMsg.response_metadata.output.length
305
+ ? lcMsg.response_metadata.output
306
+ : additional_kwargs.tool_outputs;
307
+ const fallthroughCallTypes = [
308
+ 'computer_call',
309
+ /** @ts-ignore */
310
+ 'mcp_call',
311
+ /** @ts-ignore */
312
+ 'code_interpreter_call',
313
+ /** @ts-ignore */
314
+ 'image_generation_call',
315
+ ];
316
+ if (toolOutputs != null) {
317
+ const castToolOutputs = toolOutputs;
318
+ const fallthroughCalls = castToolOutputs.filter((item) => fallthroughCallTypes.includes(item.type));
319
+ if (fallthroughCalls.length > 0)
320
+ input.push(...fallthroughCalls);
321
+ }
322
+ return input;
323
+ }
324
+ if (role === 'user' || role === 'system' || role === 'developer') {
325
+ if (typeof lcMsg.content === 'string') {
326
+ return { type: 'message', role, content: lcMsg.content };
327
+ }
328
+ const messages$1 = [];
329
+ const content = lcMsg.content.flatMap((item) => {
330
+ if (item.type === 'mcp_approval_response') {
331
+ messages$1.push({
332
+ // @ts-ignore
333
+ type: 'mcp_approval_response',
334
+ approval_request_id: item.approval_request_id,
335
+ approve: item.approve,
336
+ });
337
+ }
338
+ if (messages.isDataContentBlock(item)) {
339
+ return messages.convertToProviderContentBlock(item, completionsApiContentBlockConverter);
340
+ }
341
+ if (item.type === 'text') {
342
+ return {
343
+ type: 'input_text',
344
+ text: item.text,
345
+ };
346
+ }
347
+ if (item.type === 'image_url') {
348
+ return {
349
+ type: 'input_image',
350
+ image_url: typeof item.image_url === 'string'
351
+ ? item.image_url
352
+ : item.image_url.url,
353
+ detail: typeof item.image_url === 'string'
354
+ ? 'auto'
355
+ : item.image_url.detail,
356
+ };
357
+ }
358
+ if (item.type === 'input_text' ||
359
+ item.type === 'input_image' ||
360
+ item.type === 'input_file') {
361
+ return item;
362
+ }
363
+ return [];
364
+ });
365
+ if (content.length > 0) {
366
+ messages$1.push({ type: 'message', role, content });
367
+ }
368
+ return messages$1;
369
+ }
370
+ console.warn(`Unsupported role found when converting to OpenAI Responses API: ${role}`);
371
+ return [];
372
+ });
373
+ }
374
+ function isReasoningModel(model) {
375
+ return model != null && model && /^o\d/.test(model);
376
+ }
377
+ function _convertOpenAIResponsesMessageToBaseMessage(response) {
378
+ if (response.error) {
379
+ // TODO: add support for `addLangChainErrorFields`
380
+ const error = new Error(response.error.message);
381
+ error.name = response.error.code;
382
+ throw error;
383
+ }
384
+ let messageId;
385
+ const content = [];
386
+ const tool_calls = [];
387
+ const invalid_tool_calls = [];
388
+ const response_metadata = {
389
+ model: response.model,
390
+ created_at: response.created_at,
391
+ id: response.id,
392
+ incomplete_details: response.incomplete_details,
393
+ metadata: response.metadata,
394
+ object: response.object,
395
+ status: response.status,
396
+ user: response.user,
397
+ service_tier: response.service_tier,
398
+ // for compatibility with chat completion calls.
399
+ model_name: response.model,
400
+ };
401
+ const additional_kwargs = {};
402
+ for (const item of response.output) {
403
+ if (item.type === 'message') {
404
+ messageId = item.id;
405
+ content.push(...item.content.flatMap((part) => {
406
+ if (part.type === 'output_text') {
407
+ if ('parsed' in part && part.parsed != null) {
408
+ additional_kwargs.parsed = part.parsed;
409
+ }
410
+ return {
411
+ type: 'text',
412
+ text: part.text,
413
+ annotations: part.annotations,
414
+ };
415
+ }
416
+ if (part.type === 'refusal') {
417
+ additional_kwargs.refusal = part.refusal;
418
+ return [];
419
+ }
420
+ return part;
421
+ }));
422
+ }
423
+ else if (item.type === 'function_call') {
424
+ const fnAdapter = {
425
+ function: { name: item.name, arguments: item.arguments },
426
+ id: item.call_id,
427
+ };
428
+ try {
429
+ tool_calls.push(openai_tools.parseToolCall(fnAdapter, { returnId: true }));
430
+ }
431
+ catch (e) {
432
+ let errMessage;
433
+ if (typeof e === 'object' &&
434
+ e != null &&
435
+ 'message' in e &&
436
+ typeof e.message === 'string') {
437
+ errMessage = e.message;
438
+ }
439
+ invalid_tool_calls.push(openai_tools.makeInvalidToolCall(fnAdapter, errMessage));
440
+ }
441
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] ??= {};
442
+ if (item.id) {
443
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][item.call_id] = item.id;
444
+ }
445
+ }
446
+ else if (item.type === 'reasoning') {
447
+ additional_kwargs.reasoning = item;
448
+ }
449
+ else {
450
+ additional_kwargs.tool_outputs ??= [];
451
+ additional_kwargs.tool_outputs.push(item);
452
+ }
453
+ }
454
+ return new messages.AIMessage({
455
+ id: messageId,
456
+ content,
457
+ tool_calls,
458
+ invalid_tool_calls,
459
+ usage_metadata: response.usage,
460
+ additional_kwargs,
461
+ response_metadata,
462
+ });
463
+ }
464
+ function _convertOpenAIResponsesDeltaToBaseMessageChunk(chunk) {
465
+ const content = [];
466
+ let generationInfo = {};
467
+ let usage_metadata;
468
+ const tool_call_chunks = [];
469
+ const response_metadata = {};
470
+ const additional_kwargs = {};
471
+ let id;
472
+ if (chunk.type === 'response.output_text.delta') {
473
+ content.push({
474
+ type: 'text',
475
+ text: chunk.delta,
476
+ index: chunk.content_index,
477
+ });
478
+ /** @ts-ignore */
479
+ }
480
+ else if (chunk.type === 'response.output_text_annotation.added') {
481
+ content.push({
482
+ type: 'text',
483
+ text: '',
484
+ /** @ts-ignore */
485
+ annotations: [chunk.annotation],
486
+ /** @ts-ignore */
487
+ index: chunk.content_index,
488
+ });
489
+ }
490
+ else if (chunk.type === 'response.output_item.added' &&
491
+ chunk.item.type === 'message') {
492
+ id = chunk.item.id;
493
+ }
494
+ else if (chunk.type === 'response.output_item.added' &&
495
+ chunk.item.type === 'function_call') {
496
+ tool_call_chunks.push({
497
+ type: 'tool_call_chunk',
498
+ name: chunk.item.name,
499
+ args: chunk.item.arguments,
500
+ id: chunk.item.call_id,
501
+ index: chunk.output_index,
502
+ });
503
+ additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {
504
+ [chunk.item.call_id]: chunk.item.id,
505
+ };
506
+ }
507
+ else if (chunk.type === 'response.output_item.done' &&
508
+ [
509
+ 'web_search_call',
510
+ 'file_search_call',
511
+ 'computer_call',
512
+ 'code_interpreter_call',
513
+ 'mcp_call',
514
+ 'mcp_list_tools',
515
+ 'mcp_approval_request',
516
+ 'image_generation_call',
517
+ ].includes(chunk.item.type)) {
518
+ additional_kwargs.tool_outputs = [chunk.item];
519
+ }
520
+ else if (chunk.type === 'response.created') {
521
+ response_metadata.id = chunk.response.id;
522
+ response_metadata.model_name = chunk.response.model;
523
+ response_metadata.model = chunk.response.model;
524
+ }
525
+ else if (chunk.type === 'response.completed') {
526
+ const msg = _convertOpenAIResponsesMessageToBaseMessage(chunk.response);
527
+ usage_metadata = chunk.response.usage;
528
+ if (chunk.response.text?.format?.type === 'json_schema') {
529
+ additional_kwargs.parsed ??= JSON.parse(msg.text);
530
+ }
531
+ for (const [key, value] of Object.entries(chunk.response)) {
532
+ if (key !== 'id')
533
+ response_metadata[key] = value;
534
+ }
535
+ }
536
+ else if (chunk.type === 'response.function_call_arguments.delta') {
537
+ tool_call_chunks.push({
538
+ type: 'tool_call_chunk',
539
+ args: chunk.delta,
540
+ index: chunk.output_index,
541
+ });
542
+ }
543
+ else if (chunk.type === 'response.web_search_call.completed' ||
544
+ chunk.type === 'response.file_search_call.completed') {
545
+ generationInfo = {
546
+ tool_outputs: {
547
+ id: chunk.item_id,
548
+ type: chunk.type.replace('response.', '').replace('.completed', ''),
549
+ status: 'completed',
550
+ },
551
+ };
552
+ }
553
+ else if (chunk.type === 'response.refusal.done') {
554
+ additional_kwargs.refusal = chunk.refusal;
555
+ }
556
+ else if (chunk.type === 'response.output_item.added' &&
557
+ 'item' in chunk &&
558
+ chunk.item.type === 'reasoning') {
559
+ const summary = chunk
560
+ .item.summary
561
+ ? chunk.item.summary.map((s, index) => ({
562
+ ...s,
563
+ index,
564
+ }))
565
+ : undefined;
566
+ additional_kwargs.reasoning = {
567
+ // We only capture ID in the first chunk or else the concatenated result of all chunks will
568
+ // have an ID field that is repeated once per chunk. There is special handling for the `type`
569
+ // field that prevents this, however.
570
+ id: chunk.item.id,
571
+ type: chunk.item.type,
572
+ ...(summary ? { summary } : {}),
573
+ };
574
+ }
575
+ else if (chunk.type === 'response.reasoning_summary_part.added') {
576
+ additional_kwargs.reasoning = {
577
+ type: 'reasoning',
578
+ summary: [{ ...chunk.part, index: chunk.summary_index }],
579
+ };
580
+ }
581
+ else if (chunk.type === 'response.reasoning_summary_text.delta') {
582
+ additional_kwargs.reasoning = {
583
+ type: 'reasoning',
584
+ summary: [
585
+ { text: chunk.delta, type: 'summary_text', index: chunk.summary_index },
586
+ ],
587
+ };
588
+ /** @ts-ignore */
589
+ }
590
+ else if (chunk.type === 'response.image_generation_call.partial_image') {
591
+ // noop/fixme: retaining partial images in a message chunk means that _all_
592
+ // partial images get kept in history, so we don't do anything here.
593
+ return null;
594
+ }
595
+ else {
596
+ return null;
597
+ }
598
+ return new outputs.ChatGenerationChunk({
599
+ // Legacy reasons, `onLLMNewToken` should pulls this out
600
+ text: content.map((part) => part.text).join(''),
601
+ message: new messages.AIMessageChunk({
602
+ id,
603
+ content,
604
+ tool_call_chunks,
605
+ usage_metadata,
606
+ additional_kwargs,
607
+ response_metadata,
608
+ }),
609
+ generationInfo,
610
+ });
611
+ }
612
+
613
+ exports._convertMessagesToOpenAIResponsesParams = _convertMessagesToOpenAIResponsesParams;
614
+ exports._convertOpenAIResponsesDeltaToBaseMessageChunk = _convertOpenAIResponsesDeltaToBaseMessageChunk;
615
+ exports.isReasoningModel = isReasoningModel;
616
+ exports.messageToOpenAIRole = messageToOpenAIRole;
617
+ //# sourceMappingURL=index.cjs.map