@lobehub/chat 1.92.2 → 1.93.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/README.md +8 -8
  3. package/README.zh-CN.md +8 -8
  4. package/changelog/v1.json +21 -0
  5. package/docs/development/database-schema.dbml +51 -1
  6. package/locales/ar/modelProvider.json +4 -0
  7. package/locales/ar/models.json +64 -34
  8. package/locales/ar/providers.json +3 -0
  9. package/locales/bg-BG/modelProvider.json +4 -0
  10. package/locales/bg-BG/models.json +64 -34
  11. package/locales/bg-BG/providers.json +3 -0
  12. package/locales/de-DE/modelProvider.json +4 -0
  13. package/locales/de-DE/models.json +64 -34
  14. package/locales/de-DE/providers.json +3 -0
  15. package/locales/en-US/modelProvider.json +4 -0
  16. package/locales/en-US/models.json +64 -34
  17. package/locales/en-US/providers.json +3 -0
  18. package/locales/es-ES/modelProvider.json +4 -0
  19. package/locales/es-ES/models.json +64 -34
  20. package/locales/es-ES/providers.json +3 -0
  21. package/locales/fa-IR/modelProvider.json +4 -0
  22. package/locales/fa-IR/models.json +64 -34
  23. package/locales/fa-IR/providers.json +3 -0
  24. package/locales/fr-FR/modelProvider.json +4 -0
  25. package/locales/fr-FR/models.json +64 -34
  26. package/locales/fr-FR/providers.json +3 -0
  27. package/locales/it-IT/modelProvider.json +4 -0
  28. package/locales/it-IT/models.json +64 -34
  29. package/locales/it-IT/providers.json +3 -0
  30. package/locales/ja-JP/modelProvider.json +4 -0
  31. package/locales/ja-JP/models.json +64 -34
  32. package/locales/ja-JP/providers.json +3 -0
  33. package/locales/ko-KR/modelProvider.json +4 -0
  34. package/locales/ko-KR/models.json +64 -34
  35. package/locales/ko-KR/providers.json +3 -0
  36. package/locales/nl-NL/modelProvider.json +4 -0
  37. package/locales/nl-NL/models.json +64 -34
  38. package/locales/nl-NL/providers.json +3 -0
  39. package/locales/pl-PL/modelProvider.json +4 -0
  40. package/locales/pl-PL/models.json +64 -34
  41. package/locales/pl-PL/providers.json +3 -0
  42. package/locales/pt-BR/modelProvider.json +4 -0
  43. package/locales/pt-BR/models.json +64 -34
  44. package/locales/pt-BR/providers.json +3 -0
  45. package/locales/ru-RU/modelProvider.json +4 -0
  46. package/locales/ru-RU/models.json +63 -33
  47. package/locales/ru-RU/providers.json +3 -0
  48. package/locales/tr-TR/modelProvider.json +4 -0
  49. package/locales/tr-TR/models.json +64 -34
  50. package/locales/tr-TR/providers.json +3 -0
  51. package/locales/vi-VN/modelProvider.json +4 -0
  52. package/locales/vi-VN/models.json +64 -34
  53. package/locales/vi-VN/providers.json +3 -0
  54. package/locales/zh-CN/modelProvider.json +4 -0
  55. package/locales/zh-CN/models.json +59 -29
  56. package/locales/zh-CN/providers.json +3 -0
  57. package/locales/zh-TW/modelProvider.json +4 -0
  58. package/locales/zh-TW/models.json +64 -34
  59. package/locales/zh-TW/providers.json +3 -0
  60. package/package.json +2 -2
  61. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +16 -0
  62. package/src/config/aiModels/openrouter.ts +44 -0
  63. package/src/config/modelProviders/openai.ts +3 -1
  64. package/src/database/client/migrations.json +25 -0
  65. package/src/database/migrations/0025_add_provider_config.sql +1 -0
  66. package/src/database/migrations/meta/0025_snapshot.json +5703 -0
  67. package/src/database/migrations/meta/_journal.json +7 -0
  68. package/src/database/models/__tests__/aiProvider.test.ts +2 -0
  69. package/src/database/models/aiProvider.ts +5 -2
  70. package/src/database/repositories/tableViewer/index.test.ts +1 -1
  71. package/src/database/schemas/_helpers.ts +5 -1
  72. package/src/database/schemas/aiInfra.ts +5 -1
  73. package/src/libs/model-runtime/openai/index.ts +21 -2
  74. package/src/libs/model-runtime/openrouter/index.ts +55 -43
  75. package/src/libs/model-runtime/types/chat.ts +6 -9
  76. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +79 -5
  77. package/src/libs/model-runtime/utils/openaiHelpers.test.ts +145 -1
  78. package/src/libs/model-runtime/utils/openaiHelpers.ts +59 -0
  79. package/src/libs/model-runtime/utils/streams/openai/__snapshots__/responsesStream.test.ts.snap +193 -0
  80. package/src/libs/model-runtime/utils/streams/openai/index.ts +2 -0
  81. package/src/libs/model-runtime/utils/streams/{openai.test.ts → openai/openai.test.ts} +1 -1
  82. package/src/libs/model-runtime/utils/streams/{openai.ts → openai/openai.ts} +5 -5
  83. package/src/libs/model-runtime/utils/streams/openai/responsesStream.test.ts +826 -0
  84. package/src/libs/model-runtime/utils/streams/openai/responsesStream.ts +166 -0
  85. package/src/libs/model-runtime/utils/streams/protocol.ts +4 -1
  86. package/src/libs/model-runtime/utils/streams/utils.ts +20 -0
  87. package/src/libs/model-runtime/utils/usageConverter.ts +59 -0
  88. package/src/locales/default/modelProvider.ts +4 -0
  89. package/src/services/__tests__/chat.test.ts +27 -0
  90. package/src/services/chat.ts +8 -2
  91. package/src/services/file/ClientS3/index.test.ts +8 -8
  92. package/src/services/file/ClientS3/index.ts +2 -1
  93. package/src/store/aiInfra/slices/aiProvider/selectors.ts +11 -0
  94. package/src/types/aiProvider.ts +13 -1
@@ -41,6 +41,65 @@ export const convertOpenAIMessages = async (messages: OpenAI.ChatCompletionMessa
41
41
  )) as OpenAI.ChatCompletionMessageParam[];
42
42
  };
43
43
 
44
+ export const convertOpenAIResponseInputs = async (
45
+ messages: OpenAI.ChatCompletionMessageParam[],
46
+ ) => {
47
+ let input: OpenAI.Responses.ResponseInputItem[] = [];
48
+ await Promise.all(
49
+ messages.map(async (message) => {
50
+ // if message is assistant messages with tool calls , transform it to function type item
51
+ if (message.role === 'assistant' && message.tool_calls && message.tool_calls?.length > 0) {
52
+ message.tool_calls?.forEach((tool) => {
53
+ input.push({
54
+ arguments: tool.function.name,
55
+ call_id: tool.id,
56
+ name: tool.function.name,
57
+ type: 'function_call',
58
+ });
59
+ });
60
+
61
+ return;
62
+ }
63
+
64
+ if (message.role === 'tool') {
65
+ input.push({
66
+ call_id: message.tool_call_id,
67
+ output: message.content,
68
+ type: 'function_call_output',
69
+ } as OpenAI.Responses.ResponseFunctionToolCallOutputItem);
70
+
71
+ return;
72
+ }
73
+
74
+ // default item
75
+ // also need handle image
76
+ const item = {
77
+ ...message,
78
+ content:
79
+ typeof message.content === 'string'
80
+ ? message.content
81
+ : await Promise.all(
82
+ (message.content || []).map(async (c) => {
83
+ if (c.type === 'text') {
84
+ return { ...c, type: 'input_text' };
85
+ }
86
+
87
+ const image = await convertMessageContent(c as OpenAI.ChatCompletionContentPart);
88
+ return {
89
+ image_url: (image as OpenAI.ChatCompletionContentPartImage).image_url?.url,
90
+ type: 'input_image',
91
+ };
92
+ }),
93
+ ),
94
+ } as OpenAI.Responses.ResponseInputItem;
95
+
96
+ input.push(item);
97
+ }),
98
+ );
99
+
100
+ return input;
101
+ };
102
+
44
103
  export const pruneReasoningPayload = (payload: ChatStreamPayload) => {
45
104
  return {
46
105
  ...payload,
@@ -0,0 +1,193 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`OpenAIResponsesStream > Reasoning > summary 1`] = `
4
+ [
5
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
6
+ ",
7
+ "event: data
8
+ ",
9
+ "data: "in_progress"
10
+
11
+ ",
12
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
13
+ ",
14
+ "event: data
15
+ ",
16
+ "data: {"type":"response.in_progress","response":{"id":"resp_684313b89200819087f27686e0c822260b502bf083132d0d","object":"response","created_at":1749226424,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"o4-mini","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":"medium","summary":"detailed"},"service_tier":"auto","store":false,"temperature":1,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"a search service. Useful for when you need to answer questions about current events. Input should be a search query. Output is a JSON array of the query results","name":"lobe-web-browsing____search____builtin","parameters":{"properties":{"query":{"description":"The search query","type":"string"},"searchCategories":{"description":"The search categories you can set:","items":{"enum":["general","images","news","science","videos"],"type":"string"},"type":"array"},"searchEngines":{"description":"The search engines you can use:","items":{"enum":["google","bilibili","bing","duckduckgo","npm","pypi","github","arxiv","google scholar","z-library","reddit","imdb","brave","wikipedia","pinterest","unsplash","vimeo","youtube"],"type":"string"},"type":"array"},"searchTimeRange":{"description":"The time range you can set:","enum":["anytime","day","week","month","year"],"type":"string"}},"required":["query"],"type":"object"},"strict":true},{"type":"function","description":"A crawler can visit page content. Output is a JSON object of title, content, url and website","name":"lobe-web-browsing____crawlSinglePage____builtin","parameters":{"properties":{"url":{"description":"The url need to be crawled","type":"string"}},"required":["url"],"type":"object"},"strict":true},{"type":"function","description":"A crawler can visit multi pages. If need to visit multi website, use this one. Output is an array of JSON object of title, content, url and website","name":"lobe-web-browsing____crawlMultiPages____builtin","parameters":{"properties":{"urls":{"items":{"description":"The urls need to be crawled","type":"string"},"type":"array"}},"required":["urls"],"type":"object"},"strict":true}],"top_p":1,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
17
+
18
+ ",
19
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
20
+ ",
21
+ "event: data
22
+ ",
23
+ "data: {"id":"rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d","type":"reasoning","summary":[]}
24
+
25
+ ",
26
+ "id: rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d
27
+ ",
28
+ "event: reasoning
29
+ ",
30
+ "data: ""
31
+
32
+ ",
33
+ "id: rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d
34
+ ",
35
+ "event: reasoning
36
+ ",
37
+ "data: "**Answering a"
38
+
39
+ ",
40
+ "id: rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d
41
+ ",
42
+ "event: reasoning
43
+ ",
44
+ "data: " numeric or 9.92"
45
+
46
+ ",
47
+ "id: rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d
48
+ ",
49
+ "event: reasoning
50
+ ",
51
+ "data: "."
52
+
53
+ ",
54
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
55
+ ",
56
+ "event: data
57
+ ",
58
+ "data: {"type":"response.reasoning_summary_text.done","item_id":"rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d","output_index":0,"summary_index":0,"text":"**Answering a numeric comparison**\\n\\nThe user is asking in Chinese which number is larger: 9.1 or 9.92. This is straightforward since 9.92 is clearly larger, as it's greater than 9.1. We can respond with \\"9.92大于9.1\\" without needing to search for more information. It's a simple comparison, but Iould also add a little explanation, noting that 9.92 is indeed 0.82 more than 9.1. However, keeping it simple with \\"9.92 > 9.1\\" is perfectly fine!"}
59
+
60
+ ",
61
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
62
+ ",
63
+ "event: data
64
+ ",
65
+ "data: {"type":"response.reasoning_summary_part.done","item_id":"rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d","output_index":0,"summary_index":0,"part":{"type":"summary_text","text":"**Answering a numeric comparison**\\n\\nThe user is asking in Chinese which number is larger: 9.1 or 9.92. This is straightforward since 9.92 is clearly larger, as it's greater than 9.1. We can respond with \\"9.92大于9.1\\" without needing to search for more information. Is a simple comparison, but I could also add a little explanation, noting that 9.92 is indeed 0.82 more than 9.1. However, keeping it simple with \\"9.92 > 9.1\\" is perfectly fine!"}}
66
+
67
+ ",
68
+ "id: rs_6843fe13e73c8190a49d9372ef8cd46f08c019075e7c8955
69
+ ",
70
+ "event: reasoning
71
+ ",
72
+ "data: "\\n"
73
+
74
+ ",
75
+ "id: rs_6843fe13e73c8190a49d9372ef8cd46f08c019075e7c8955
76
+ ",
77
+ "event: reasoning
78
+ ",
79
+ "data: "**Exploring a mathematical sequence**"
80
+
81
+ ",
82
+ "id: rs_6843fe13e73c8190a49d9372ef8cd46f08c019075e7c8955
83
+ ",
84
+ "event: reasoning
85
+ ",
86
+ "data: " analyzing"
87
+
88
+ ",
89
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
90
+ ",
91
+ "event: data
92
+ ",
93
+ "data: {"type":"response.output_item.done","output_index":0,"item":{"id":"rs_684313b9774481908ee856625f82fb8c0b502bf083132d0d","type":"reasoning","summary":[{"type":"summary_text","text":"**Answering a numeric comparison**\\n\\nThe user is asking in Chinese which number is larger: 9.1 or 9.92. This is straightforward since 9.92 is clearly larger, as it's greater than 9.1. We can respond with \\"9.92大于9.1\\" without needing to search for more information. It's simple comparison, but I could also add a little explanation, noting that 9.92 is indeed 0.82 more than 9.1. However, keeping it simple with \\"9.92 > 9.1\\" is perfectly fine!"}]}}
94
+
95
+ ",
96
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
97
+ ",
98
+ "event: data
99
+ ",
100
+ "data: {"id":"msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d","type":"message","status":"in_progress","content":[],"role":"assistant"}
101
+
102
+ ",
103
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
104
+ ",
105
+ "event: data
106
+ ",
107
+ "data: {"type":"response.content_part.added","item_id":"msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"text":""}}
108
+
109
+ ",
110
+ "id: msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d
111
+ ",
112
+ "event: text
113
+ ",
114
+ "data: "9.92 比 9.1 大。"
115
+
116
+ ",
117
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
118
+ ",
119
+ "event: data
120
+ ",
121
+ "data: {"type":"response.output_text.done","item_id":"msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d","output_index":1,"content_index":0,"text":"9.92 比 9.1 大。"}
122
+
123
+ ",
124
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
125
+ ",
126
+ "event: data
127
+ ",
128
+ "data: {"type":"response.content_part.done","item_id":"msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"text":"9.92 比 9.1 大。"}}
129
+
130
+ ",
131
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
132
+ ",
133
+ "event: data
134
+ ",
135
+ "data: {"type":"response.output_item.done","output_index":1,"item":{"id":"msg_684313bee2c88190b0f4b09621ad7dc60b502bf083132d0d","type":"message","status":"completed","content":[{"type":"output_text","annotations":[],"text":"9.92 比 9. 大。"}],"role":"assistant"}}
136
+
137
+ ",
138
+ "id: resp_684313b89200819087f27686e0c822260b502bf083132d0d
139
+ ",
140
+ "event: usage
141
+ ",
142
+ "data: {"inputCacheMissTokens":93,"inputCachedTokens":2298,"inputTextTokens":2391,"outputReasoningTokens":128,"outputTextTokens":16,"totalInputTokens":2391,"totalOutputTokens":144,"totalTokens":2535}
143
+
144
+ ",
145
+ ]
146
+ `;
147
+
148
+ exports[`OpenAIResponsesStream > should transform OpenAI stream to protocol stream 1`] = `
149
+ [
150
+ "id: resp_683e7b8ca3308190b6837f20d2c015cd0cf93af363cdcf58
151
+ ",
152
+ "event: data
153
+ ",
154
+ "data: "in_progress"
155
+
156
+ ",
157
+ "id: resp_683e7b8ca3308190b6837f20d2c015cd0cf93af363cdcf58
158
+ ",
159
+ "event: data
160
+ ",
161
+ "data: {"type":"response.in_progress","response":{"id":"resp_683e7b8ca3308190b6837f20d2c015cd0cf93af363cdcf58","object":"response","created_at":1748925324,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"o4-mini","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":"medium","summary":null},"service_tier":"auto","store":false,"temperature":1,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"a search service. Useful for when you need to answer questions about current events. Input should be a search query. Output is a JSON array of the query results","name":"lobe-web-browsing____search____builtin","parameters":{"properties":{"query":{"description":"The search query","type":"string"},"searchCategories":{"description":"The search categories you can set:","items":{"enum":["general","images","news","science","videos"],"type":"string"},"type":"array"},"searchEngines":{"description":"The search engines you can use:","items":{"enum":["google","bilibili","bing","duckduckgo","npm","pypi","github","arxiv","google scholar","z-library","reddit","imdb","brave","wikipedia","pinterest","unsplash","vimeo","youtube"],"type":"string"},"type":"array"},"searchTimeRange":{"description":"The time range you can set:","enum":["anytime","day","week","month","year"],"type":"string"}},"required":["query"],"type":"object"},"strict":true},{"type":"function","description":"A crawler can visit page content. Output is a JSON object of title, content, url and website","name":"lobe-web-browsing____crawlSinglePage____builtin","parameters":{"properties":{"url":{"description":"The url need to be crawled","type":"string"}},"required":["url"],"type":"object"},"strict":true},{"type":"function","description":"A crawler can visit multi pages. If need to visit multi website, use this one. Output is an array of JSON object of title, content, url and website","name":"lobe-web-browsing____crawlMultiPages____builtin","parameters":{"properties":{"urls":{"items":{"description":"The urls need to be crawled","type":"string"},"type":"array"}},"required":["urls"],"type":"object"},"strict":true}],"top_p":1,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
162
+
163
+ ",
164
+ "id: resp_683e7b8ca3308190b6837f20d2c015cd0cf93af363cdcf58
165
+ ",
166
+ "event: data
167
+ ",
168
+ "data: {"id":"rs_683e7bc80a9c81908f6e3d61ad63cc1e0cf93af363cdcf58","type":"reasoning","summary":[]}
169
+
170
+ ",
171
+ "id: resp_683e7b8ca3308190b6837f20d2c015cd0cf93af363cdcf58
172
+ ",
173
+ "event: data
174
+ ",
175
+ "data: {"id":"msg_683e7bde8b0c8190970ab8c719c0fc1c0cf93af363cdcf58","type":"message","status":"in_progress","content":[],"role":"assistant"}
176
+
177
+ ",
178
+ "id: resp_683e7b8ca3308190b6837f20d2c015cd0cf93af363cdcf58
179
+ ",
180
+ "event: data
181
+ ",
182
+ "data: {"type":"response.content_part.added","item_id":"msg_683e7bde8b0c8190970ab8c719c0fc1c0cf93af363cdcf58","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"text":"Hello"}}
183
+
184
+ ",
185
+ "id: resp_683e7b8ca3308190b6837f20d2c015cd0cf93af363cdcf58
186
+ ",
187
+ "event: data
188
+ ",
189
+ "data: {"type":"response.content_part.added","item_id":"msg_683e7bde8b0c8190970ab8c719c0fc1c0cf93af363cdcf58","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"text":" world"}}
190
+
191
+ ",
192
+ ]
193
+ `;
@@ -0,0 +1,2 @@
1
+ export * from './openai';
2
+ export * from './responsesStream';
@@ -2,8 +2,8 @@ import { describe, expect, it, vi } from 'vitest';
2
2
 
3
3
  import { AgentRuntimeErrorType } from '@/libs/model-runtime';
4
4
 
5
+ import { FIRST_CHUNK_ERROR_KEY } from '../protocol';
5
6
  import { OpenAIStream } from './openai';
6
- import { FIRST_CHUNK_ERROR_KEY } from './protocol';
7
7
 
8
8
  describe('OpenAIStream', () => {
9
9
  it('should transform OpenAI stream to protocol stream', async () => {
@@ -3,9 +3,9 @@ import type { Stream } from 'openai/streaming';
3
3
 
4
4
  import { ChatMessageError, CitationItem } from '@/types/message';
5
5
 
6
- import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../error';
7
- import { ChatStreamCallbacks } from '../../types';
8
- import { convertUsage } from '../usageConverter';
6
+ import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../../error';
7
+ import { ChatStreamCallbacks } from '../../../types';
8
+ import { convertUsage } from '../../usageConverter';
9
9
  import {
10
10
  FIRST_CHUNK_ERROR_KEY,
11
11
  StreamContext,
@@ -18,9 +18,9 @@ import {
18
18
  createSSEProtocolTransformer,
19
19
  createTokenSpeedCalculator,
20
20
  generateToolCallId,
21
- } from './protocol';
21
+ } from '../protocol';
22
22
 
23
- export const transformOpenAIStream = (
23
+ const transformOpenAIStream = (
24
24
  chunk: OpenAI.ChatCompletionChunk,
25
25
  streamContext: StreamContext,
26
26
  ): StreamProtocolChunk | StreamProtocolChunk[] => {