@lobehub/chat 1.92.3 → 1.93.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/README.md +8 -8
  3. package/README.zh-CN.md +8 -8
  4. package/changelog/v1.json +9 -0
  5. package/docs/development/database-schema.dbml +51 -1
  6. package/locales/ar/modelProvider.json +4 -0
  7. package/locales/ar/models.json +64 -34
  8. package/locales/ar/providers.json +3 -0
  9. package/locales/bg-BG/modelProvider.json +4 -0
  10. package/locales/bg-BG/models.json +64 -34
  11. package/locales/bg-BG/providers.json +3 -0
  12. package/locales/de-DE/modelProvider.json +4 -0
  13. package/locales/de-DE/models.json +64 -34
  14. package/locales/de-DE/providers.json +3 -0
  15. package/locales/en-US/modelProvider.json +4 -0
  16. package/locales/en-US/models.json +64 -34
  17. package/locales/en-US/providers.json +3 -0
  18. package/locales/es-ES/modelProvider.json +4 -0
  19. package/locales/es-ES/models.json +64 -34
  20. package/locales/es-ES/providers.json +3 -0
  21. package/locales/fa-IR/modelProvider.json +4 -0
  22. package/locales/fa-IR/models.json +64 -34
  23. package/locales/fa-IR/providers.json +3 -0
  24. package/locales/fr-FR/modelProvider.json +4 -0
  25. package/locales/fr-FR/models.json +64 -34
  26. package/locales/fr-FR/providers.json +3 -0
  27. package/locales/it-IT/modelProvider.json +4 -0
  28. package/locales/it-IT/models.json +64 -34
  29. package/locales/it-IT/providers.json +3 -0
  30. package/locales/ja-JP/modelProvider.json +4 -0
  31. package/locales/ja-JP/models.json +64 -34
  32. package/locales/ja-JP/providers.json +3 -0
  33. package/locales/ko-KR/modelProvider.json +4 -0
  34. package/locales/ko-KR/models.json +64 -34
  35. package/locales/ko-KR/providers.json +3 -0
  36. package/locales/nl-NL/modelProvider.json +4 -0
  37. package/locales/nl-NL/models.json +64 -34
  38. package/locales/nl-NL/providers.json +3 -0
  39. package/locales/pl-PL/modelProvider.json +4 -0
  40. package/locales/pl-PL/models.json +64 -34
  41. package/locales/pl-PL/providers.json +3 -0
  42. package/locales/pt-BR/modelProvider.json +4 -0
  43. package/locales/pt-BR/models.json +64 -34
  44. package/locales/pt-BR/providers.json +3 -0
  45. package/locales/ru-RU/modelProvider.json +4 -0
  46. package/locales/ru-RU/models.json +63 -33
  47. package/locales/ru-RU/providers.json +3 -0
  48. package/locales/tr-TR/modelProvider.json +4 -0
  49. package/locales/tr-TR/models.json +64 -34
  50. package/locales/tr-TR/providers.json +3 -0
  51. package/locales/vi-VN/modelProvider.json +4 -0
  52. package/locales/vi-VN/models.json +64 -34
  53. package/locales/vi-VN/providers.json +3 -0
  54. package/locales/zh-CN/modelProvider.json +4 -0
  55. package/locales/zh-CN/models.json +59 -29
  56. package/locales/zh-CN/providers.json +3 -0
  57. package/locales/zh-TW/modelProvider.json +4 -0
  58. package/locales/zh-TW/models.json +64 -34
  59. package/locales/zh-TW/providers.json +3 -0
  60. package/package.json +1 -1
  61. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +16 -0
  62. package/src/config/modelProviders/openai.ts +3 -1
  63. package/src/database/client/migrations.json +25 -0
  64. package/src/database/migrations/0025_add_provider_config.sql +1 -0
  65. package/src/database/migrations/meta/0025_snapshot.json +5703 -0
  66. package/src/database/migrations/meta/_journal.json +7 -0
  67. package/src/database/models/__tests__/aiProvider.test.ts +2 -0
  68. package/src/database/models/aiProvider.ts +5 -2
  69. package/src/database/repositories/tableViewer/index.test.ts +1 -1
  70. package/src/database/schemas/_helpers.ts +5 -1
  71. package/src/database/schemas/aiInfra.ts +5 -1
  72. package/src/libs/model-runtime/openai/index.ts +21 -2
  73. package/src/libs/model-runtime/types/chat.ts +6 -9
  74. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +79 -5
  75. package/src/libs/model-runtime/utils/openaiHelpers.test.ts +145 -1
  76. package/src/libs/model-runtime/utils/openaiHelpers.ts +59 -0
  77. package/src/libs/model-runtime/utils/streams/openai/__snapshots__/responsesStream.test.ts.snap +193 -0
  78. package/src/libs/model-runtime/utils/streams/openai/index.ts +2 -0
  79. package/src/libs/model-runtime/utils/streams/{openai.test.ts → openai/openai.test.ts} +1 -1
  80. package/src/libs/model-runtime/utils/streams/{openai.ts → openai/openai.ts} +5 -5
  81. package/src/libs/model-runtime/utils/streams/openai/responsesStream.test.ts +826 -0
  82. package/src/libs/model-runtime/utils/streams/openai/responsesStream.ts +166 -0
  83. package/src/libs/model-runtime/utils/streams/protocol.ts +4 -1
  84. package/src/libs/model-runtime/utils/streams/utils.ts +20 -0
  85. package/src/libs/model-runtime/utils/usageConverter.ts +59 -0
  86. package/src/locales/default/modelProvider.ts +4 -0
  87. package/src/services/__tests__/chat.test.ts +27 -0
  88. package/src/services/chat.ts +8 -2
  89. package/src/store/aiInfra/slices/aiProvider/selectors.ts +11 -0
  90. package/src/types/aiProvider.ts +13 -1
@@ -175,6 +175,13 @@
175
175
  "when": 1749301573666,
176
176
  "tag": "0024_add_rbac_tables",
177
177
  "breakpoints": true
178
+ },
179
+ {
180
+ "idx": 25,
181
+ "version": "7",
182
+ "when": 1749309388370,
183
+ "tag": "0025_add_provider_config",
184
+ "breakpoints": true
178
185
  }
179
186
  ],
180
187
  "version": "6"
@@ -361,12 +361,14 @@ describe('AiProviderModel', () => {
361
361
  const config = await aiProviderModel.getAiProviderRuntimeConfig(mockDecryptor);
362
362
 
363
363
  expect(config.provider1).toEqual({
364
+ config: {},
364
365
  fetchOnClient: true,
365
366
  keyVaults: { decryptedKey: 'value' },
366
367
  settings: { setting1: true },
367
368
  });
368
369
 
369
370
  expect(config.provider2).toEqual({
371
+ config: {},
370
372
  fetchOnClient: undefined,
371
373
  keyVaults: {},
372
374
  settings: {},
@@ -119,6 +119,7 @@ export class AiProviderModel {
119
119
 
120
120
  const commonFields = {
121
121
  checkModel: value.checkModel,
122
+ config: value.config,
122
123
  fetchOnClient: value.fetchOnClient,
123
124
  keyVaults,
124
125
  };
@@ -129,11 +130,10 @@ export class AiProviderModel {
129
130
  ...commonFields,
130
131
  id,
131
132
  source: this.getProviderSource(id),
132
- updatedAt: new Date(),
133
133
  userId: this.userId,
134
134
  })
135
135
  .onConflictDoUpdate({
136
- set: { ...commonFields, updatedAt: new Date() },
136
+ set: commonFields,
137
137
  target: [aiProviders.id, aiProviders.userId],
138
138
  });
139
139
  };
@@ -184,6 +184,7 @@ export class AiProviderModel {
184
184
  const query = this.db
185
185
  .select({
186
186
  checkModel: aiProviders.checkModel,
187
+ config: aiProviders.config,
187
188
  description: aiProviders.description,
188
189
  enabled: aiProviders.enabled,
189
190
  fetchOnClient: aiProviders.fetchOnClient,
@@ -236,6 +237,7 @@ export class AiProviderModel {
236
237
  getAiProviderRuntimeConfig = async (decryptor?: DecryptUserKeyVaults) => {
237
238
  const result = await this.db
238
239
  .select({
240
+ config: aiProviders.config,
239
241
  fetchOnClient: aiProviders.fetchOnClient,
240
242
  id: aiProviders.id,
241
243
  keyVaults: aiProviders.keyVaults,
@@ -262,6 +264,7 @@ export class AiProviderModel {
262
264
  }
263
265
 
264
266
  runtimeConfig[item.id] = {
267
+ config: item.config || {},
265
268
  fetchOnClient: typeof item.fetchOnClient === 'boolean' ? item.fetchOnClient : undefined,
266
269
  keyVaults,
267
270
  settings: !!builtin ? merge(builtin.settings, userSettings) : userSettings,
@@ -23,7 +23,7 @@ describe('TableViewerRepo', () => {
23
23
  it('should return all tables with counts', async () => {
24
24
  const result = await repo.getAllTables();
25
25
 
26
- expect(result.length).toEqual(51);
26
+ expect(result.length).toEqual(55);
27
27
  expect(result[0]).toEqual({ name: 'agents', count: 0, type: 'BASE TABLE' });
28
28
  });
29
29
 
@@ -3,7 +3,11 @@ import { timestamp } from 'drizzle-orm/pg-core';
3
3
  export const timestamptz = (name: string) => timestamp(name, { withTimezone: true });
4
4
 
5
5
  export const createdAt = () => timestamptz('created_at').notNull().defaultNow();
6
- export const updatedAt = () => timestamptz('updated_at').notNull().defaultNow();
6
+ export const updatedAt = () =>
7
+ timestamptz('updated_at')
8
+ .notNull()
9
+ .defaultNow()
10
+ .$onUpdate(() => new Date());
7
11
  export const accessedAt = () => timestamptz('accessed_at').notNull().defaultNow();
8
12
 
9
13
  // columns.helpers.ts
@@ -3,7 +3,7 @@ import { boolean, integer, jsonb, pgTable, primaryKey, text, varchar } from 'dri
3
3
 
4
4
  import { timestamps } from '@/database/schemas/_helpers';
5
5
  import { users } from '@/database/schemas/user';
6
- import { AiProviderSettings } from '@/types/aiProvider';
6
+ import { AiProviderConfig, AiProviderSettings } from '@/types/aiProvider';
7
7
 
8
8
  export const aiProviders = pgTable(
9
9
  'ai_providers',
@@ -29,6 +29,10 @@ export const aiProviders = pgTable(
29
29
  .$defaultFn(() => ({}))
30
30
  .$type<AiProviderSettings>(),
31
31
 
32
+ config: jsonb('config')
33
+ .$defaultFn(() => ({}))
34
+ .$type<AiProviderConfig>(),
35
+
32
36
  ...timestamps,
33
37
  },
34
38
  (table) => [primaryKey({ columns: [table.id, table.userId] })],
@@ -1,7 +1,7 @@
1
- import { ModelProvider } from '../types';
1
+ import { ChatStreamPayload, ModelProvider } from '../types';
2
+ import { processMultiProviderModelList } from '../utils/modelParse';
2
3
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
4
  import { pruneReasoningPayload } from '../utils/openaiHelpers';
4
- import { processMultiProviderModelList } from '../utils/modelParse';
5
5
 
6
6
  export interface OpenAIModelCard {
7
7
  id: string;
@@ -15,6 +15,10 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
15
15
  handlePayload: (payload) => {
16
16
  const { model } = payload;
17
17
 
18
+ if (model === 'o1-pro') {
19
+ return { ...payload, apiMode: 'responses' } as ChatStreamPayload;
20
+ }
21
+
18
22
  if (prunePrefixes.some((prefix) => model.startsWith(prefix))) {
19
23
  return pruneReasoningPayload(payload) as any;
20
24
  }
@@ -42,6 +46,7 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
42
46
  },
43
47
  debug: {
44
48
  chatCompletion: () => process.env.DEBUG_OPENAI_CHAT_COMPLETION === '1',
49
+ responses: () => process.env.DEBUG_OPENAI_RESPONSES === '1',
45
50
  },
46
51
  models: async ({ client }) => {
47
52
  const modelsPage = (await client.models.list()) as any;
@@ -51,4 +56,18 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
51
56
  return processMultiProviderModelList(modelList);
52
57
  },
53
58
  provider: ModelProvider.OpenAI,
59
+ responses: {
60
+ handlePayload: (payload: ChatStreamPayload) => {
61
+ const { model } = payload;
62
+ if (prunePrefixes.some((prefix) => model.startsWith(prefix))) {
63
+ if (!payload.reasoning) {
64
+ payload.reasoning = { summary: 'auto' };
65
+ } else {
66
+ payload.reasoning.summary = 'auto';
67
+ }
68
+ }
69
+
70
+ return { ...payload, stream: payload.stream ?? true };
71
+ },
72
+ },
54
73
  });
@@ -67,6 +67,7 @@ export interface OpenAIChatMessage {
67
67
  * @title Chat Stream Payload
68
68
  */
69
69
  export interface ChatStreamPayload {
70
+ apiMode?: 'chatCompletion' | 'responses';
70
71
  /**
71
72
  * 开启上下文缓存
72
73
  */
@@ -96,21 +97,17 @@ export interface ChatStreamPayload {
96
97
  * @title 返回的文本数量
97
98
  */
98
99
  n?: number;
99
- /**
100
- * 开启的插件列表
101
- */
102
- plugins?: string[];
103
100
  /**
104
101
  * @title 控制生成文本中的惩罚系数,用于减少主题的变化
105
102
  * @default 0
106
103
  */
107
104
  presence_penalty?: number;
108
-
109
- /**
110
- * @default openai
111
- */
112
105
  provider?: string;
113
- responseMode?: 'streamText' | 'json';
106
+ reasoning?: {
107
+ effort?: string;
108
+ summary?: string;
109
+ };
110
+ responseMode?: 'stream' | 'json';
114
111
  /**
115
112
  * @title 是否开启流式请求
116
113
  * @default true
@@ -8,9 +8,11 @@ import type { ChatModelCard } from '@/types/llm';
8
8
 
9
9
  import { LobeRuntimeAI } from '../../BaseAI';
10
10
  import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../error';
11
- import type {
11
+ import {
12
12
  ChatCompletionErrorPayload,
13
+ ChatCompletionTool,
13
14
  ChatMethodOptions,
15
+ ChatStreamCallbacks,
14
16
  ChatStreamPayload,
15
17
  Embeddings,
16
18
  EmbeddingsOptions,
@@ -20,14 +22,13 @@ import type {
20
22
  TextToSpeechOptions,
21
23
  TextToSpeechPayload,
22
24
  } from '../../types';
23
- import { ChatStreamCallbacks } from '../../types';
24
25
  import { AgentRuntimeError } from '../createError';
25
26
  import { debugResponse, debugStream } from '../debugStream';
26
27
  import { desensitizeUrl } from '../desensitizeUrl';
27
28
  import { handleOpenAIError } from '../handleOpenAIError';
28
- import { convertOpenAIMessages } from '../openaiHelpers';
29
+ import { convertOpenAIMessages, convertOpenAIResponseInputs } from '../openaiHelpers';
29
30
  import { StreamingResponse } from '../response';
30
- import { OpenAIStream, OpenAIStreamOptions } from '../streams';
31
+ import { OpenAIResponsesStream, OpenAIStream, OpenAIStreamOptions } from '../streams';
31
32
 
32
33
  // the model contains the following keywords is not a chat model, so we should filter them out
33
34
  export const CHAT_MODELS_BLOCK_LIST = [
@@ -83,6 +84,7 @@ interface OpenAICompatibleFactoryOptions<T extends Record<string, any> = any> {
83
84
  customClient?: CustomClientOptions<T>;
84
85
  debug?: {
85
86
  chatCompletion: () => boolean;
87
+ responses?: () => boolean;
86
88
  };
87
89
  errorType?: {
88
90
  bizError: ILobeAgentRuntimeErrorType;
@@ -94,6 +96,12 @@ interface OpenAICompatibleFactoryOptions<T extends Record<string, any> = any> {
94
96
  transformModel?: (model: OpenAI.Model) => ChatModelCard;
95
97
  };
96
98
  provider: string;
99
+ responses?: {
100
+ handlePayload?: (
101
+ payload: ChatStreamPayload,
102
+ options: ConstructorOptions<T>,
103
+ ) => ChatStreamPayload;
104
+ };
97
105
  }
98
106
 
99
107
  /**
@@ -160,6 +168,7 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
160
168
  chatCompletion,
161
169
  models,
162
170
  customClient,
171
+ responses,
163
172
  }: OpenAICompatibleFactoryOptions<T>) => {
164
173
  const ErrorType = {
165
174
  bizError: errorType?.bizError || AgentRuntimeErrorType.ProviderBizError,
@@ -199,7 +208,15 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
199
208
  this.id = options.id || provider;
200
209
  }
201
210
 
202
- async chat({ responseMode, ...payload }: ChatStreamPayload, options?: ChatMethodOptions) {
211
+ async chat(
212
+ { responseMode, apiMode, ...payload }: ChatStreamPayload,
213
+ options?: ChatMethodOptions,
214
+ ) {
215
+ // new openai Response API
216
+ if (apiMode === 'responses') {
217
+ return this.handleResponseAPIMode(payload, options);
218
+ }
219
+
203
220
  try {
204
221
  const inputStartAt = Date.now();
205
222
  const postPayload = chatCompletion?.handlePayload
@@ -454,5 +471,62 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
454
471
  provider: this.id as ModelProvider,
455
472
  });
456
473
  }
474
+
475
+ private async handleResponseAPIMode(
476
+ payload: ChatStreamPayload,
477
+ options?: ChatMethodOptions,
478
+ ): Promise<Response> {
479
+ const inputStartAt = Date.now();
480
+
481
+ const { messages, ...res } = responses?.handlePayload
482
+ ? (responses?.handlePayload(payload, this._options) as ChatStreamPayload)
483
+ : payload;
484
+
485
+ // remove penalty params
486
+ delete res.frequency_penalty;
487
+ delete res.presence_penalty;
488
+
489
+ const input = await convertOpenAIResponseInputs(messages as any);
490
+
491
+ const postPayload = {
492
+ ...res,
493
+ input,
494
+ store: false,
495
+ tools: payload.tools?.map((tool) => this.convertChatCompletionToolToResponseTool(tool)),
496
+ } as OpenAI.Responses.ResponseCreateParamsStreaming;
497
+
498
+ if (debug?.responses?.()) {
499
+ console.log('[requestPayload]');
500
+ console.log(JSON.stringify(postPayload), '\n');
501
+ }
502
+
503
+ const response = await this.client.responses.create(postPayload, {
504
+ headers: options?.requestHeaders,
505
+ signal: options?.signal,
506
+ });
507
+
508
+ const [prod, useForDebug] = response.tee();
509
+
510
+ if (debug?.responses?.()) {
511
+ const useForDebugStream =
512
+ useForDebug instanceof ReadableStream ? useForDebug : useForDebug.toReadableStream();
513
+
514
+ debugStream(useForDebugStream).catch(console.error);
515
+ }
516
+
517
+ const streamOptions: OpenAIStreamOptions = {
518
+ bizErrorTypeTransformer: chatCompletion?.handleStreamBizErrorType,
519
+ callbacks: options?.callback,
520
+ provider: this.id,
521
+ };
522
+
523
+ return StreamingResponse(OpenAIResponsesStream(prod, { ...streamOptions, inputStartAt }), {
524
+ headers: options?.headers,
525
+ });
526
+ }
527
+
528
+ private convertChatCompletionToolToResponseTool = (tool: ChatCompletionTool) => {
529
+ return { type: tool.type, ...tool.function };
530
+ };
457
531
  };
458
532
  };
@@ -3,7 +3,11 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
4
  import { imageUrlToBase64 } from '@/utils/imageToBase64';
5
5
 
6
- import { convertMessageContent, convertOpenAIMessages } from './openaiHelpers';
6
+ import {
7
+ convertMessageContent,
8
+ convertOpenAIMessages,
9
+ convertOpenAIResponseInputs,
10
+ } from './openaiHelpers';
7
11
  import { parseDataUri } from './uriParser';
8
12
 
9
13
  // 模拟依赖
@@ -144,3 +148,143 @@ describe('convertOpenAIMessages', () => {
144
148
  expect(Promise.all).toHaveBeenCalledTimes(2); // 一次用于消息数组,一次用于内容数组
145
149
  });
146
150
  });
151
+
152
+ describe('convertOpenAIResponseInputs', () => {
153
+ it('应该正确转换普通文本消息', async () => {
154
+ const messages: OpenAI.ChatCompletionMessageParam[] = [
155
+ { role: 'user', content: 'Hello' },
156
+ { role: 'assistant', content: 'Hi there!' },
157
+ ];
158
+
159
+ const result = await convertOpenAIResponseInputs(messages);
160
+
161
+ expect(result).toEqual([
162
+ { role: 'user', content: 'Hello' },
163
+ { role: 'assistant', content: 'Hi there!' },
164
+ ]);
165
+ });
166
+
167
+ it('应该正确转换带有工具调用的消息', async () => {
168
+ const messages: OpenAI.ChatCompletionMessageParam[] = [
169
+ {
170
+ role: 'assistant',
171
+ content: '',
172
+ tool_calls: [
173
+ {
174
+ id: 'call_123',
175
+ type: 'function',
176
+ function: {
177
+ name: 'test_function',
178
+ arguments: '{"key": "value"}',
179
+ },
180
+ },
181
+ ],
182
+ },
183
+ ];
184
+
185
+ const result = await convertOpenAIResponseInputs(messages);
186
+
187
+ expect(result).toEqual([
188
+ {
189
+ arguments: 'test_function',
190
+ call_id: 'call_123',
191
+ name: 'test_function',
192
+ type: 'function_call',
193
+ },
194
+ ]);
195
+ });
196
+
197
+ it('应该正确转换工具响应消息', async () => {
198
+ const messages: OpenAI.ChatCompletionMessageParam[] = [
199
+ {
200
+ role: 'tool',
201
+ content: 'Function result',
202
+ tool_call_id: 'call_123',
203
+ },
204
+ ];
205
+
206
+ const result = await convertOpenAIResponseInputs(messages);
207
+
208
+ expect(result).toEqual([
209
+ {
210
+ call_id: 'call_123',
211
+ output: 'Function result',
212
+ type: 'function_call_output',
213
+ },
214
+ ]);
215
+ });
216
+
217
+ it('应该正确转换包含图片的消息', async () => {
218
+ const messages: OpenAI.ChatCompletionMessageParam[] = [
219
+ {
220
+ role: 'user',
221
+ content: [
222
+ { type: 'text', text: 'Here is an image' },
223
+ {
224
+ type: 'image_url',
225
+ image_url: {
226
+ url: 'data:image/jpeg;base64,test123',
227
+ },
228
+ },
229
+ ],
230
+ },
231
+ ];
232
+
233
+ const result = await convertOpenAIResponseInputs(messages);
234
+
235
+ expect(result).toEqual([
236
+ {
237
+ role: 'user',
238
+ content: [
239
+ { type: 'input_text', text: 'Here is an image' },
240
+ {
241
+ type: 'input_image',
242
+ image_url: 'data:image/jpeg;base64,test123',
243
+ },
244
+ ],
245
+ },
246
+ ]);
247
+ });
248
+
249
+ it('应该正确处理混合类型的消息序列', async () => {
250
+ const messages: OpenAI.ChatCompletionMessageParam[] = [
251
+ { role: 'user', content: 'I need help with a function' },
252
+ {
253
+ role: 'assistant',
254
+ content: '',
255
+ tool_calls: [
256
+ {
257
+ id: 'call_456',
258
+ type: 'function',
259
+ function: {
260
+ name: 'get_data',
261
+ arguments: '{}',
262
+ },
263
+ },
264
+ ],
265
+ },
266
+ {
267
+ role: 'tool',
268
+ content: '{"result": "success"}',
269
+ tool_call_id: 'call_456',
270
+ },
271
+ ];
272
+
273
+ const result = await convertOpenAIResponseInputs(messages);
274
+
275
+ expect(result).toEqual([
276
+ { role: 'user', content: 'I need help with a function' },
277
+ {
278
+ arguments: 'get_data',
279
+ call_id: 'call_456',
280
+ name: 'get_data',
281
+ type: 'function_call',
282
+ },
283
+ {
284
+ call_id: 'call_456',
285
+ output: '{"result": "success"}',
286
+ type: 'function_call_output',
287
+ },
288
+ ]);
289
+ });
290
+ });
@@ -41,6 +41,65 @@ export const convertOpenAIMessages = async (messages: OpenAI.ChatCompletionMessa
41
41
  )) as OpenAI.ChatCompletionMessageParam[];
42
42
  };
43
43
 
44
+ export const convertOpenAIResponseInputs = async (
45
+ messages: OpenAI.ChatCompletionMessageParam[],
46
+ ) => {
47
+ let input: OpenAI.Responses.ResponseInputItem[] = [];
48
+ await Promise.all(
49
+ messages.map(async (message) => {
50
+ // if message is assistant messages with tool calls , transform it to function type item
51
+ if (message.role === 'assistant' && message.tool_calls && message.tool_calls?.length > 0) {
52
+ message.tool_calls?.forEach((tool) => {
53
+ input.push({
54
+ arguments: tool.function.name,
55
+ call_id: tool.id,
56
+ name: tool.function.name,
57
+ type: 'function_call',
58
+ });
59
+ });
60
+
61
+ return;
62
+ }
63
+
64
+ if (message.role === 'tool') {
65
+ input.push({
66
+ call_id: message.tool_call_id,
67
+ output: message.content,
68
+ type: 'function_call_output',
69
+ } as OpenAI.Responses.ResponseFunctionToolCallOutputItem);
70
+
71
+ return;
72
+ }
73
+
74
+ // default item
75
+ // also need handle image
76
+ const item = {
77
+ ...message,
78
+ content:
79
+ typeof message.content === 'string'
80
+ ? message.content
81
+ : await Promise.all(
82
+ (message.content || []).map(async (c) => {
83
+ if (c.type === 'text') {
84
+ return { ...c, type: 'input_text' };
85
+ }
86
+
87
+ const image = await convertMessageContent(c as OpenAI.ChatCompletionContentPart);
88
+ return {
89
+ image_url: (image as OpenAI.ChatCompletionContentPartImage).image_url?.url,
90
+ type: 'input_image',
91
+ };
92
+ }),
93
+ ),
94
+ } as OpenAI.Responses.ResponseInputItem;
95
+
96
+ input.push(item);
97
+ }),
98
+ );
99
+
100
+ return input;
101
+ };
102
+
44
103
  export const pruneReasoningPayload = (payload: ChatStreamPayload) => {
45
104
  return {
46
105
  ...payload,