@lobehub/chat 1.124.0 → 1.124.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. package/.env.example +5 -0
  2. package/.github/scripts/pr-comment.js +11 -2
  3. package/.github/workflows/desktop-pr-build.yml +86 -12
  4. package/.github/workflows/release-desktop-beta.yml +91 -20
  5. package/CHANGELOG.md +58 -0
  6. package/Dockerfile +2 -0
  7. package/Dockerfile.database +2 -0
  8. package/Dockerfile.pglite +2 -0
  9. package/apps/desktop/electron-builder.js +8 -4
  10. package/changelog/v1.json +21 -0
  11. package/docs/self-hosting/environment-variables/model-provider.mdx +18 -0
  12. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +20 -0
  13. package/locales/ar/chat.json +2 -0
  14. package/locales/bg-BG/chat.json +2 -0
  15. package/locales/de-DE/chat.json +2 -0
  16. package/locales/en-US/chat.json +2 -0
  17. package/locales/es-ES/chat.json +2 -0
  18. package/locales/fa-IR/chat.json +2 -0
  19. package/locales/fr-FR/chat.json +2 -0
  20. package/locales/it-IT/chat.json +2 -0
  21. package/locales/ja-JP/chat.json +2 -0
  22. package/locales/ko-KR/chat.json +2 -0
  23. package/locales/nl-NL/chat.json +2 -0
  24. package/locales/pl-PL/chat.json +2 -0
  25. package/locales/pt-BR/chat.json +2 -0
  26. package/locales/ru-RU/chat.json +2 -0
  27. package/locales/tr-TR/chat.json +2 -0
  28. package/locales/vi-VN/chat.json +2 -0
  29. package/locales/zh-CN/chat.json +2 -0
  30. package/locales/zh-CN/modelProvider.json +1 -1
  31. package/locales/zh-TW/chat.json +2 -0
  32. package/package.json +1 -1
  33. package/packages/const/src/hotkeys.ts +1 -1
  34. package/packages/const/src/index.ts +1 -0
  35. package/packages/const/src/settings/hotkey.ts +3 -2
  36. package/packages/const/src/trace.ts +1 -1
  37. package/packages/const/src/user.ts +1 -2
  38. package/packages/database/src/client/db.test.ts +19 -13
  39. package/packages/electron-server-ipc/src/ipcClient.test.ts +783 -1
  40. package/packages/file-loaders/src/loadFile.test.ts +61 -0
  41. package/packages/file-loaders/src/utils/isTextReadableFile.test.ts +43 -0
  42. package/packages/file-loaders/src/utils/parser-utils.test.ts +155 -0
  43. package/packages/model-bank/src/aiModels/aihubmix.ts +38 -4
  44. package/packages/model-bank/src/aiModels/groq.ts +26 -8
  45. package/packages/model-bank/src/aiModels/hunyuan.ts +3 -3
  46. package/packages/model-bank/src/aiModels/modelscope.ts +13 -2
  47. package/packages/model-bank/src/aiModels/moonshot.ts +25 -5
  48. package/packages/model-bank/src/aiModels/novita.ts +40 -9
  49. package/packages/model-bank/src/aiModels/openrouter.ts +0 -13
  50. package/packages/model-bank/src/aiModels/qwen.ts +62 -1
  51. package/packages/model-bank/src/aiModels/siliconcloud.ts +20 -0
  52. package/packages/model-bank/src/aiModels/volcengine.ts +141 -15
  53. package/packages/model-runtime/package.json +2 -1
  54. package/packages/model-runtime/src/ai21/index.test.ts +2 -2
  55. package/packages/model-runtime/src/ai360/index.test.ts +2 -2
  56. package/packages/model-runtime/src/akashchat/index.test.ts +19 -0
  57. package/packages/model-runtime/src/anthropic/index.test.ts +1 -2
  58. package/packages/model-runtime/src/baichuan/index.test.ts +1 -2
  59. package/packages/model-runtime/src/bedrock/index.test.ts +1 -2
  60. package/packages/model-runtime/src/bfl/createImage.test.ts +1 -2
  61. package/packages/model-runtime/src/bfl/index.test.ts +1 -2
  62. package/packages/model-runtime/src/cloudflare/index.test.ts +1 -2
  63. package/packages/model-runtime/src/cohere/index.test.ts +19 -0
  64. package/packages/model-runtime/src/deepseek/index.test.ts +2 -2
  65. package/packages/model-runtime/src/fireworksai/index.test.ts +2 -2
  66. package/packages/model-runtime/src/giteeai/index.test.ts +2 -2
  67. package/packages/model-runtime/src/github/index.test.ts +2 -2
  68. package/packages/model-runtime/src/google/createImage.test.ts +1 -2
  69. package/packages/model-runtime/src/google/index.test.ts +1 -1
  70. package/packages/model-runtime/src/groq/index.test.ts +2 -3
  71. package/packages/model-runtime/src/huggingface/index.test.ts +40 -0
  72. package/packages/model-runtime/src/hunyuan/index.test.ts +2 -3
  73. package/packages/model-runtime/src/internlm/index.test.ts +2 -2
  74. package/packages/model-runtime/src/jina/index.test.ts +19 -0
  75. package/packages/model-runtime/src/lmstudio/index.test.ts +2 -2
  76. package/packages/model-runtime/src/minimax/index.test.ts +19 -0
  77. package/packages/model-runtime/src/mistral/index.test.ts +2 -3
  78. package/packages/model-runtime/src/modelscope/index.test.ts +19 -0
  79. package/packages/model-runtime/src/moonshot/index.test.ts +1 -2
  80. package/packages/model-runtime/src/nebius/index.test.ts +19 -0
  81. package/packages/model-runtime/src/newapi/index.test.ts +49 -42
  82. package/packages/model-runtime/src/newapi/index.ts +124 -143
  83. package/packages/model-runtime/src/novita/index.test.ts +3 -4
  84. package/packages/model-runtime/src/nvidia/index.test.ts +19 -0
  85. package/packages/model-runtime/src/openrouter/index.test.ts +2 -3
  86. package/packages/model-runtime/src/perplexity/index.test.ts +2 -3
  87. package/packages/model-runtime/src/ppio/index.test.ts +3 -4
  88. package/packages/model-runtime/src/qwen/index.test.ts +2 -2
  89. package/packages/model-runtime/src/sambanova/index.test.ts +19 -0
  90. package/packages/model-runtime/src/search1api/index.test.ts +19 -0
  91. package/packages/model-runtime/src/sensenova/index.test.ts +2 -2
  92. package/packages/model-runtime/src/spark/index.test.ts +2 -2
  93. package/packages/model-runtime/src/stepfun/index.test.ts +2 -2
  94. package/packages/model-runtime/src/taichu/index.test.ts +4 -5
  95. package/packages/model-runtime/src/tencentcloud/index.test.ts +1 -1
  96. package/packages/model-runtime/src/togetherai/index.test.ts +1 -2
  97. package/packages/model-runtime/src/upstage/index.test.ts +1 -2
  98. package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.test.ts +9 -7
  99. package/packages/model-runtime/src/utils/streams/anthropic.ts +2 -2
  100. package/packages/model-runtime/src/utils/streams/openai/openai.ts +20 -13
  101. package/packages/model-runtime/src/utils/streams/openai/responsesStream.test.ts +1 -2
  102. package/packages/model-runtime/src/utils/streams/openai/responsesStream.ts +2 -2
  103. package/packages/model-runtime/src/utils/streams/protocol.ts +2 -2
  104. package/packages/model-runtime/src/wenxin/index.test.ts +2 -3
  105. package/packages/model-runtime/src/xai/index.test.ts +2 -2
  106. package/packages/model-runtime/src/zeroone/index.test.ts +1 -2
  107. package/packages/model-runtime/src/zhipu/index.test.ts +2 -3
  108. package/packages/model-runtime/vitest.config.mts +0 -7
  109. package/packages/types/src/index.ts +2 -0
  110. package/packages/types/src/message/base.ts +1 -1
  111. package/packages/types/src/openai/chat.ts +2 -3
  112. package/packages/utils/package.json +2 -1
  113. package/packages/utils/src/_deprecated/parseModels.test.ts +1 -1
  114. package/packages/utils/src/_deprecated/parseModels.ts +1 -1
  115. package/packages/utils/src/client/topic.test.ts +1 -2
  116. package/packages/utils/src/client/topic.ts +1 -2
  117. package/packages/utils/src/electron/desktopRemoteRPCFetch.ts +1 -1
  118. package/packages/utils/src/fetch/fetchSSE.ts +7 -8
  119. package/packages/utils/src/fetch/parseError.ts +1 -3
  120. package/packages/utils/src/format.test.ts +1 -2
  121. package/packages/utils/src/index.ts +1 -0
  122. package/packages/utils/src/toolManifest.ts +1 -2
  123. package/packages/utils/src/trace.ts +1 -1
  124. package/packages/utils/vitest.config.mts +1 -1
  125. package/packages/web-crawler/src/__tests__/urlRules.test.ts +275 -0
  126. package/packages/web-crawler/src/crawImpl/__tests__/exa.test.ts +269 -0
  127. package/packages/web-crawler/src/crawImpl/__tests__/firecrawl.test.ts +284 -0
  128. package/packages/web-crawler/src/crawImpl/__tests__/naive.test.ts +234 -0
  129. package/packages/web-crawler/src/crawImpl/__tests__/tavily.test.ts +359 -0
  130. package/packages/web-crawler/src/utils/__tests__/errorType.test.ts +217 -0
  131. package/packages/web-crawler/vitest.config.mts +3 -0
  132. package/scripts/electronWorkflow/mergeMacReleaseFiles.ts +207 -0
  133. package/src/app/[variants]/(main)/settings/provider/(detail)/newapi/page.tsx +1 -1
  134. package/src/components/Thinking/index.tsx +2 -3
  135. package/src/config/llm.ts +8 -0
  136. package/src/features/ChatInput/Desktop/index.tsx +16 -4
  137. package/src/features/ChatInput/StoreUpdater.tsx +2 -0
  138. package/src/libs/traces/index.ts +1 -1
  139. package/src/locales/default/chat.ts +1 -0
  140. package/src/locales/default/modelProvider.ts +1 -1
  141. package/src/server/modules/ModelRuntime/trace.ts +1 -2
  142. package/src/store/chat/slices/aiChat/actions/__tests__/cancel-functionality.test.ts +107 -0
  143. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChatV2.test.ts +352 -7
  144. package/src/store/chat/slices/aiChat/actions/generateAIChatV2.ts +2 -1
  145. package/packages/model-runtime/src/openrouter/__snapshots__/index.test.ts.snap +0 -113
@@ -1,3 +1,4 @@
1
+ import { LOBE_DEFAULT_MODEL_LIST } from 'model-bank';
1
2
  import urlJoin from 'url-join';
2
3
 
3
4
  import { createRouterRuntime } from '../RouterRuntime';
@@ -54,9 +55,6 @@ const getProviderFromOwnedBy = (ownedBy: string): string => {
54
55
  return 'openai';
55
56
  };
56
57
 
57
- // 全局的模型路由映射,在 models 函数执行后被填充
58
- let globalModelRouteMap: Map<string, string> = new Map();
59
-
60
58
  export const LobeNewAPIAI = createRouterRuntime({
61
59
  debug: {
62
60
  chatCompletion: () => process.env.DEBUG_NEWAPI_CHAT_COMPLETION === '1',
@@ -66,180 +64,163 @@ export const LobeNewAPIAI = createRouterRuntime({
66
64
  },
67
65
  id: ModelProvider.NewAPI,
68
66
  models: async ({ client: openAIClient }) => {
69
- // 每次调用 models 时清空并重建路由映射
70
- globalModelRouteMap.clear();
71
-
72
- // 获取基础 URL(移除末尾的 /v1)
73
- const baseURL = openAIClient.baseURL.replace(/\/v1\/?$/, '');
74
-
75
- const modelsPage = (await openAIClient.models.list()) as any;
76
- const modelList: NewAPIModelCard[] = modelsPage.data || [];
77
-
78
- // 尝试获取 pricing 信息以补充模型详细信息
79
- let pricingMap: Map<string, NewAPIPricing> = new Map();
80
- try {
81
- // 使用保存的 baseURL
82
- const pricingResponse = await fetch(`${baseURL}/api/pricing`, {
83
- headers: {
84
- Authorization: `Bearer ${openAIClient.apiKey}`,
85
- },
86
- });
87
-
88
- if (pricingResponse.ok) {
89
- const pricingData = await pricingResponse.json();
90
- if (pricingData.success && pricingData.data) {
91
- (pricingData.data as NewAPIPricing[]).forEach((pricing) => {
92
- pricingMap.set(pricing.model_name, pricing);
93
- });
94
- }
67
+ // 获取基础 URL(移除末尾的 API 版本路径如 /v1、/v1beta 等)
68
+ const baseURL = openAIClient.baseURL.replace(/\/v\d+[a-z]*\/?$/, '');
69
+
70
+ const modelsPage = (await openAIClient.models.list()) as any;
71
+ const modelList: NewAPIModelCard[] = modelsPage.data || [];
72
+
73
+ // 尝试获取 pricing 信息以补充模型详细信息
74
+ let pricingMap: Map<string, NewAPIPricing> = new Map();
75
+ try {
76
+ // 使用保存的 baseURL
77
+ const pricingResponse = await fetch(`${baseURL}/api/pricing`, {
78
+ headers: {
79
+ Authorization: `Bearer ${openAIClient.apiKey}`,
80
+ },
81
+ });
82
+
83
+ if (pricingResponse.ok) {
84
+ const pricingData = await pricingResponse.json();
85
+ if (pricingData.success && pricingData.data) {
86
+ (pricingData.data as NewAPIPricing[]).forEach((pricing) => {
87
+ pricingMap.set(pricing.model_name, pricing);
88
+ });
95
89
  }
96
- } catch (error) {
97
- // If fetching pricing information fails, continue using the basic model information
98
- console.debug('Failed to fetch NewAPI pricing info:', error);
99
90
  }
100
-
101
- // Process the model list: determine the provider for each model based on priority rules
102
- const enrichedModelList = modelList.map((model) => {
103
- let enhancedModel: any = { ...model };
104
-
105
- // 1. 添加 pricing 信息
106
- const pricing = pricingMap.get(model.id);
107
- if (pricing) {
108
- // NewAPI 的价格计算逻辑:
109
- // - quota_type: 0 表示按量计费(按 token),1 表示按次计费
110
- // - model_ratio: 相对于基础价格的倍率(基础价格 = $0.002/1K tokens)
111
- // - model_price: 直接指定的价格(优先使用)
112
- // - completion_ratio: 输出价格相对于输入价格的倍率
113
- //
114
- // LobeChat 需要的格式:美元/百万 token
115
-
116
- let inputPrice: number | undefined;
117
- let outputPrice: number | undefined;
118
-
119
- if (pricing.quota_type === 0) {
120
- // 按量计费
121
- if (pricing.model_price && pricing.model_price > 0) {
122
- // model_price is a direct price value; need to confirm its unit.
123
- // Assumption: model_price is the price per 1,000 tokens (i.e., $/1K tokens).
124
- // To convert to price per 1,000,000 tokens ($/1M tokens), multiply by 1,000,000 / 1,000 = 1,000.
125
- // Since the base price is $0.002/1K tokens, multiplying by 2 gives $2/1M tokens.
126
- // Therefore, inputPrice = model_price * 2 converts the price to $/1M tokens for LobeChat.
127
- inputPrice = pricing.model_price * 2;
128
- } else if (pricing.model_ratio) {
129
- // model_ratio × $0.002/1K = model_ratio × $2/1M
130
- inputPrice = pricing.model_ratio * 2; // 转换为 $/1M tokens
131
- }
132
-
133
- if (inputPrice !== undefined) {
134
- // 计算输出价格
135
- outputPrice = inputPrice * (pricing.completion_ratio || 1);
136
-
137
- enhancedModel.pricing = {
138
- input: inputPrice,
139
- output: outputPrice,
140
- };
141
- }
91
+ } catch (error) {
92
+ // If fetching pricing information fails, continue using the basic model information
93
+ console.debug('Failed to fetch NewAPI pricing info:', error);
94
+ }
95
+
96
+ // Process the model list: determine the provider for each model based on priority rules
97
+ const enrichedModelList = modelList.map((model) => {
98
+ let enhancedModel: any = { ...model };
99
+
100
+ // 1. 添加 pricing 信息
101
+ const pricing = pricingMap.get(model.id);
102
+ if (pricing) {
103
+ // NewAPI 的价格计算逻辑:
104
+ // - quota_type: 0 表示按量计费(按 token),1 表示按次计费
105
+ // - model_ratio: 相对于基础价格的倍率(基础价格 = $0.002/1K tokens)
106
+ // - model_price: 直接指定的价格(优先使用)
107
+ // - completion_ratio: 输出价格相对于输入价格的倍率
108
+ //
109
+ // LobeChat 需要的格式:美元/百万 token
110
+
111
+ let inputPrice: number | undefined;
112
+ let outputPrice: number | undefined;
113
+
114
+ if (pricing.quota_type === 0) {
115
+ // 按量计费
116
+ if (pricing.model_price && pricing.model_price > 0) {
117
+ // model_price is a direct price value; need to confirm its unit.
118
+ // Assumption: model_price is the price per 1,000 tokens (i.e., $/1K tokens).
119
+ // To convert to price per 1,000,000 tokens ($/1M tokens), multiply by 1,000,000 / 1,000 = 1,000.
120
+ // Since the base price is $0.002/1K tokens, multiplying by 2 gives $2/1M tokens.
121
+ // Therefore, inputPrice = model_price * 2 converts the price to $/1M tokens for LobeChat.
122
+ inputPrice = pricing.model_price * 2;
123
+ } else if (pricing.model_ratio) {
124
+ // model_ratio × $0.002/1K = model_ratio × $2/1M
125
+ inputPrice = pricing.model_ratio * 2; // 转换为 $/1M tokens
142
126
  }
143
- // quota_type === 1 按次计费暂不支持
144
- }
145
127
 
146
- // 2. 根据优先级处理 provider 信息并缓存路由
147
- let detectedProvider = 'openai'; // 默认
148
-
149
- // 优先级1:使用 supported_endpoint_types
150
- if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
151
- if (model.supported_endpoint_types.includes('anthropic')) {
152
- detectedProvider = 'anthropic';
153
- } else if (model.supported_endpoint_types.includes('gemini')) {
154
- detectedProvider = 'google';
155
- } else if (model.supported_endpoint_types.includes('xai')) {
156
- detectedProvider = 'xai';
128
+ if (inputPrice !== undefined) {
129
+ // 计算输出价格
130
+ outputPrice = inputPrice * (pricing.completion_ratio || 1);
131
+
132
+ enhancedModel.pricing = {
133
+ input: inputPrice,
134
+ output: outputPrice,
135
+ };
157
136
  }
158
137
  }
159
- // 优先级2:使用 owned_by 字段
160
- else if (model.owned_by) {
161
- detectedProvider = getProviderFromOwnedBy(model.owned_by);
162
- }
163
- // 优先级3:基于模型名称检测
164
- else {
165
- detectedProvider = detectModelProvider(model.id);
138
+ // quota_type === 1 按次计费暂不支持
139
+ }
140
+
141
+ // 2. 根据优先级处理 provider 信息并缓存路由
142
+ let detectedProvider = 'openai'; // 默认
143
+
144
+ // 优先级1:使用 supported_endpoint_types
145
+ if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
146
+ if (model.supported_endpoint_types.includes('anthropic')) {
147
+ detectedProvider = 'anthropic';
148
+ } else if (model.supported_endpoint_types.includes('gemini')) {
149
+ detectedProvider = 'google';
150
+ } else if (model.supported_endpoint_types.includes('xai')) {
151
+ detectedProvider = 'xai';
166
152
  }
153
+ }
154
+ // 优先级2:使用 owned_by 字段
155
+ else if (model.owned_by) {
156
+ detectedProvider = getProviderFromOwnedBy(model.owned_by);
157
+ }
158
+ // 优先级3:基于模型名称检测
159
+ else {
160
+ detectedProvider = detectModelProvider(model.id);
161
+ }
167
162
 
168
- // 将检测到的 provider 信息附加到模型上,供路由使用
169
- enhancedModel._detectedProvider = detectedProvider;
170
- // 同时更新全局路由映射表
171
- globalModelRouteMap.set(model.id, detectedProvider);
163
+ // 将检测到的 provider 信息附加到模型上
164
+ enhancedModel._detectedProvider = detectedProvider;
172
165
 
173
- return enhancedModel;
174
- });
166
+ return enhancedModel;
167
+ });
175
168
 
176
- // 使用 processMultiProviderModelList 处理模型能力
177
- const processedModels = await processMultiProviderModelList(enrichedModelList, 'newapi');
169
+ // 使用 processMultiProviderModelList 处理模型能力
170
+ const processedModels = await processMultiProviderModelList(enrichedModelList, 'newapi');
178
171
 
179
- // 如果我们检测到了 provider,确保它被正确应用
180
- return processedModels.map((model: any) => {
181
- if (model._detectedProvider) {
182
- // Here you can adjust certain model properties as needed.
183
- // FIXME: The current data structure does not support storing provider information, and the official NewAPI does not provide a corresponding field. Consider extending the model schema if provider tracking is required in the future.
184
- delete model._detectedProvider; // Remove temporary field
185
- }
186
- return model;
187
- });
188
- },
189
- // 使用动态 routers 配置,在构造时获取用户的 baseURL
190
- routers: (options) => {
191
- // 使用全局的模型路由映射
192
- const userBaseURL = options.baseURL?.replace(/\/v1\/?$/, '') || '';
193
-
194
- return [
172
+ // 清理临时字段
173
+ return processedModels.map((model: any) => {
174
+ if (model._detectedProvider) {
175
+ delete model._detectedProvider;
176
+ }
177
+ return model;
178
+ });
179
+ },
180
+ routers: (options) => {
181
+ const userBaseURL = options.baseURL?.replace(/\/v\d+[a-z]*\/?$/, '') || '';
182
+
183
+ return [
195
184
  {
196
185
  apiType: 'anthropic',
197
- models: () =>
198
- Promise.resolve(
199
- Array.from(globalModelRouteMap.entries())
200
- .filter(([, provider]) => provider === 'anthropic')
201
- .map(([modelId]) => modelId),
202
- ),
186
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
187
+ (id) => detectModelProvider(id) === 'anthropic',
188
+ ),
203
189
  options: {
204
- // Anthropic 在 NewAPI 中使用 /v1 路径,会自动转换为 /v1/messages
205
- baseURL: urlJoin(userBaseURL, '/v1'),
190
+ ...options,
191
+ baseURL: userBaseURL,
206
192
  },
207
193
  },
208
194
  {
209
195
  apiType: 'google',
210
- models: () =>
211
- Promise.resolve(
212
- Array.from(globalModelRouteMap.entries())
213
- .filter(([, provider]) => provider === 'google')
214
- .map(([modelId]) => modelId),
215
- ),
196
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
197
+ (id) => detectModelProvider(id) === 'google',
198
+ ),
216
199
  options: {
217
- // Gemini 在 NewAPI 中使用 /v1beta 路径
218
- baseURL: urlJoin(userBaseURL, '/v1beta'),
200
+ ...options,
201
+ baseURL: userBaseURL,
219
202
  },
220
203
  },
221
204
  {
222
205
  apiType: 'xai',
223
- models: () =>
224
- Promise.resolve(
225
- Array.from(globalModelRouteMap.entries())
226
- .filter(([, provider]) => provider === 'xai')
227
- .map(([modelId]) => modelId),
228
- ),
206
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
207
+ (id) => detectModelProvider(id) === 'xai',
208
+ ),
229
209
  options: {
230
- // xAI 使用标准 OpenAI 格式,走 /v1 路径
210
+ ...options,
231
211
  baseURL: urlJoin(userBaseURL, '/v1'),
232
212
  },
233
213
  },
234
214
  {
235
215
  apiType: 'openai',
236
216
  options: {
217
+ ...options,
237
218
  baseURL: urlJoin(userBaseURL, '/v1'),
238
219
  chatCompletion: {
239
220
  handlePayload,
240
221
  },
241
222
  },
242
223
  },
243
- ];
244
- },
224
+ ];
225
+ },
245
226
  });
@@ -1,10 +1,9 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
3
+ import { ModelProvider } from '@lobechat/model-runtime';
2
4
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
5
 
4
- import { LobeOpenAICompatibleRuntime } from '@/libs/model-runtime';
5
- import { ModelProvider } from '@/libs/model-runtime';
6
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
7
-
6
+ import { testProvider } from '../providerTestUtils';
8
7
  import models from './fixtures/models.json';
9
8
  import { LobeNovitaAI } from './index';
10
9
 
@@ -0,0 +1,19 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
+
4
+ import { testProvider } from '../providerTestUtils';
5
+ import { LobeNvidiaAI } from './index';
6
+
7
+ const provider = ModelProvider.Nvidia;
8
+ const defaultBaseURL = 'https://integrate.api.nvidia.com/v1';
9
+
10
+ testProvider({
11
+ Runtime: LobeNvidiaAI,
12
+ provider,
13
+ defaultBaseURL,
14
+ chatDebugEnv: 'DEBUG_NVIDIA_CHAT_COMPLETION',
15
+ chatModel: 'meta/llama-3.1-8b-instruct',
16
+ test: {
17
+ skipAPICall: true,
18
+ },
19
+ });
@@ -1,9 +1,8 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
2
3
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
- import { LobeOpenAICompatibleRuntime } from '@/libs/model-runtime';
5
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
6
-
5
+ import { testProvider } from '../providerTestUtils';
7
6
  import models from './fixtures/models.json';
8
7
  import { LobeOpenRouterAI } from './index';
9
8
 
@@ -1,9 +1,8 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime, ModelProvider } from '@lobechat/model-runtime';
2
3
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
- import { LobeOpenAICompatibleRuntime, ModelProvider } from '@/libs/model-runtime';
5
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
6
-
5
+ import { testProvider } from '../providerTestUtils';
7
6
  import { LobePerplexityAI } from './index';
8
7
 
9
8
  testProvider({
@@ -1,10 +1,9 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
3
+ import { ModelProvider } from '@lobechat/model-runtime';
2
4
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
5
 
4
- import { LobeOpenAICompatibleRuntime } from '@/libs/model-runtime';
5
- import { ModelProvider } from '@/libs/model-runtime';
6
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
7
-
6
+ import { testProvider } from '../providerTestUtils';
8
7
  import models from './fixtures/models.json';
9
8
  import { LobePPIOAI } from './index';
10
9
 
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeQwenAI } from './index';
6
6
 
7
7
  const provider = ModelProvider.Qwen;
@@ -0,0 +1,19 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
+
4
+ import { testProvider } from '../providerTestUtils';
5
+ import { LobeSambaNovaAI } from './index';
6
+
7
+ const provider = ModelProvider.SambaNova;
8
+ const defaultBaseURL = 'https://api.sambanova.ai/v1';
9
+
10
+ testProvider({
11
+ Runtime: LobeSambaNovaAI,
12
+ provider,
13
+ defaultBaseURL,
14
+ chatDebugEnv: 'DEBUG_SAMBANOVA_CHAT_COMPLETION',
15
+ chatModel: 'Meta-Llama-3.1-8B-Instruct',
16
+ test: {
17
+ skipAPICall: true,
18
+ },
19
+ });
@@ -0,0 +1,19 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
+
4
+ import { testProvider } from '../providerTestUtils';
5
+ import { LobeSearch1API } from './index';
6
+
7
+ const provider = ModelProvider.Search1API;
8
+ const defaultBaseURL = 'https://api.search1api.com/v1';
9
+
10
+ testProvider({
11
+ Runtime: LobeSearch1API,
12
+ provider,
13
+ defaultBaseURL,
14
+ chatDebugEnv: 'DEBUG_SEARCH1API_CHAT_COMPLETION',
15
+ chatModel: 'gpt-4o-mini',
16
+ test: {
17
+ skipAPICall: true,
18
+ },
19
+ });
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeSenseNovaAI } from './index';
6
6
 
7
7
  const provider = ModelProvider.SenseNova;
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeSparkAI } from './index';
6
6
 
7
7
  const provider = ModelProvider.Spark;
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeStepfunAI } from './index';
6
6
 
7
7
  const provider = ModelProvider.Stepfun;
@@ -1,15 +1,14 @@
1
1
  // @vitest-environment node
2
- import OpenAI from 'openai';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
-
5
2
  import {
6
3
  ChatStreamCallbacks,
7
4
  LobeMoonshotAI,
8
5
  LobeOpenAICompatibleRuntime,
9
6
  ModelProvider,
10
- } from '@/libs/model-runtime';
11
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
7
+ } from '@lobechat/model-runtime';
8
+ import OpenAI from 'openai';
9
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
12
10
 
11
+ import { testProvider } from '../providerTestUtils';
13
12
  import * as debugStreamModule from '../utils/debugStream';
14
13
  import { LobeTaichuAI } from './index';
15
14
 
@@ -1,5 +1,5 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
3
 
4
4
  import { testProvider } from '../providerTestUtils';
5
5
  import { LobeTencentCloudAI } from './index';
@@ -1,6 +1,5 @@
1
1
  // @vitest-environment node
2
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
3
-
2
+ import { testProvider } from '../providerTestUtils';
4
3
  import { LobeTogetherAI } from './index';
5
4
 
6
5
  testProvider({
@@ -1,6 +1,5 @@
1
1
  // @vitest-environment node
2
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
3
-
2
+ import { testProvider } from '../providerTestUtils';
4
3
  import { LobeUpstageAI } from './index';
5
4
 
6
5
  testProvider({
@@ -1,22 +1,24 @@
1
1
  // @vitest-environment node
2
- import OpenAI from 'openai';
3
- import type { Stream } from 'openai/streaming';
4
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
-
6
2
  import {
7
3
  AgentRuntimeErrorType,
8
4
  ChatStreamCallbacks,
9
5
  ChatStreamPayload,
10
6
  LobeOpenAICompatibleRuntime,
11
7
  ModelProvider,
12
- } from '@/libs/model-runtime';
13
- import officalOpenAIModels from '@/libs/model-runtime/openai/fixtures/openai-models.json';
14
- import { sleep } from '@/utils/sleep';
8
+ } from '@lobechat/model-runtime';
9
+ import OpenAI from 'openai';
10
+ import type { Stream } from 'openai/streaming';
11
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
15
12
 
16
13
  import * as debugStreamModule from '../debugStream';
17
14
  import * as openaiHelpers from '../openaiHelpers';
18
15
  import { createOpenAICompatibleRuntime } from './index';
19
16
 
17
+ const sleep = async (ms: number) =>
18
+ await new Promise((resolve) => {
19
+ setTimeout(resolve, ms);
20
+ });
21
+
20
22
  const provider = 'groq';
21
23
  const defaultBaseURL = 'https://api.groq.com/openai/v1';
22
24
  const bizErrorType = 'ProviderBizError';
@@ -1,7 +1,7 @@
1
1
  import Anthropic from '@anthropic-ai/sdk';
2
2
  import type { Stream } from '@anthropic-ai/sdk/streaming';
3
3
 
4
- import { CitationItem, ModelTokensUsage } from '@/types/message';
4
+ import { ChatCitationItem, ModelTokensUsage } from '@/types/message';
5
5
 
6
6
  import { ChatStreamCallbacks } from '../../types';
7
7
  import {
@@ -180,7 +180,7 @@ export const transformAnthropicStream = (
180
180
  context.returnedCitationArray.push({
181
181
  title: citations.title,
182
182
  url: citations.url,
183
- } as CitationItem);
183
+ } as ChatCitationItem);
184
184
  }
185
185
 
186
186
  return { data: null, id: context.id, type: 'text' };