@lobehub/chat 1.118.4 → 1.118.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. package/.github/workflows/test.yml +36 -1
  2. package/CHANGELOG.md +59 -0
  3. package/changelog/v1.json +18 -0
  4. package/locales/ar/setting.json +4 -0
  5. package/locales/bg-BG/setting.json +4 -0
  6. package/locales/de-DE/setting.json +4 -0
  7. package/locales/en-US/setting.json +4 -0
  8. package/locales/es-ES/setting.json +4 -0
  9. package/locales/fa-IR/setting.json +4 -0
  10. package/locales/fr-FR/setting.json +4 -0
  11. package/locales/it-IT/setting.json +4 -0
  12. package/locales/ja-JP/setting.json +4 -0
  13. package/locales/ko-KR/setting.json +4 -0
  14. package/locales/nl-NL/setting.json +4 -0
  15. package/locales/pl-PL/setting.json +4 -0
  16. package/locales/pt-BR/setting.json +4 -0
  17. package/locales/ru-RU/setting.json +4 -0
  18. package/locales/tr-TR/setting.json +4 -0
  19. package/locales/vi-VN/setting.json +4 -0
  20. package/locales/zh-CN/setting.json +4 -0
  21. package/locales/zh-TW/setting.json +4 -0
  22. package/package.json +2 -1
  23. package/packages/const/package.json +4 -1
  24. package/packages/const/src/image.ts +1 -1
  25. package/packages/const/src/settings/agent.ts +1 -0
  26. package/packages/database/src/repositories/aiInfra/index.ts +7 -2
  27. package/packages/model-bank/package.json +75 -0
  28. package/{src/config → packages/model-bank/src}/aiModels/bfl.ts +1 -1
  29. package/{src/config → packages/model-bank/src}/aiModels/fal.ts +1 -1
  30. package/{src/config → packages/model-bank/src}/aiModels/google.ts +1 -1
  31. package/{src/config → packages/model-bank/src}/aiModels/index.ts +2 -2
  32. package/{src/config → packages/model-bank/src}/aiModels/openai.ts +1 -1
  33. package/packages/model-bank/src/exports.test.ts +37 -0
  34. package/packages/model-bank/src/index.ts +2 -0
  35. package/{src/libs → packages/model-bank/src}/standard-parameters/index.ts +1 -1
  36. package/packages/model-bank/vitest.config.mts +11 -0
  37. package/packages/model-runtime/package.json +1 -0
  38. package/packages/model-runtime/src/ai360/index.ts +1 -1
  39. package/packages/model-runtime/src/aihubmix/index.ts +1 -1
  40. package/packages/model-runtime/src/anthropic/index.ts +6 -6
  41. package/packages/model-runtime/src/baichuan/index.ts +1 -1
  42. package/packages/model-runtime/src/bfl/createImage.ts +1 -2
  43. package/packages/model-runtime/src/cloudflare/index.ts +1 -1
  44. package/packages/model-runtime/src/cohere/index.ts +1 -1
  45. package/packages/model-runtime/src/deepseek/index.ts +1 -1
  46. package/packages/model-runtime/src/fal/index.ts +1 -2
  47. package/packages/model-runtime/src/fireworksai/index.ts +1 -1
  48. package/packages/model-runtime/src/groq/index.ts +1 -1
  49. package/packages/model-runtime/src/higress/index.ts +1 -1
  50. package/packages/model-runtime/src/huggingface/index.ts +1 -1
  51. package/packages/model-runtime/src/hunyuan/index.ts +1 -1
  52. package/packages/model-runtime/src/infiniai/index.ts +1 -1
  53. package/packages/model-runtime/src/internlm/index.ts +1 -1
  54. package/packages/model-runtime/src/jina/index.ts +1 -1
  55. package/packages/model-runtime/src/lmstudio/index.ts +1 -1
  56. package/packages/model-runtime/src/minimax/index.ts +1 -1
  57. package/packages/model-runtime/src/mistral/index.ts +1 -1
  58. package/packages/model-runtime/src/novita/__snapshots__/index.test.ts.snap +309 -21
  59. package/packages/model-runtime/src/novita/index.ts +31 -1
  60. package/packages/model-runtime/src/ollama/index.ts +1 -1
  61. package/packages/model-runtime/src/openai/__snapshots__/index.test.ts.snap +28 -0
  62. package/packages/model-runtime/src/openai/index.test.ts +0 -3
  63. package/packages/model-runtime/src/openrouter/__snapshots__/index.test.ts.snap +46 -0
  64. package/packages/model-runtime/src/openrouter/index.test.ts +21 -45
  65. package/packages/model-runtime/src/openrouter/index.ts +22 -25
  66. package/packages/model-runtime/src/openrouter/type.ts +12 -24
  67. package/packages/model-runtime/src/ppio/index.ts +1 -1
  68. package/packages/model-runtime/src/search1api/index.ts +1 -1
  69. package/packages/model-runtime/src/sensenova/index.ts +1 -1
  70. package/packages/model-runtime/src/stepfun/index.ts +1 -1
  71. package/packages/model-runtime/src/tencentcloud/index.ts +1 -1
  72. package/packages/model-runtime/src/togetherai/index.ts +1 -1
  73. package/packages/model-runtime/src/types/image.ts +1 -1
  74. package/packages/model-runtime/src/utils/modelParse.test.ts +5 -5
  75. package/packages/model-runtime/src/utils/modelParse.ts +47 -22
  76. package/packages/model-runtime/src/utils/openaiCompatibleFactory/createImage.ts +1 -2
  77. package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.ts +1 -1
  78. package/packages/model-runtime/src/vllm/index.ts +1 -1
  79. package/packages/model-runtime/src/xinference/index.ts +1 -1
  80. package/packages/types/src/agent/chatConfig.ts +6 -0
  81. package/packages/types/src/aiModel.ts +1 -2
  82. package/packages/types/src/llm.ts +1 -1
  83. package/packages/utils/src/getFallbackModelProperty.test.ts +1 -1
  84. package/packages/utils/src/getFallbackModelProperty.ts +1 -1
  85. package/packages/utils/src/parseModels.test.ts +1 -2
  86. package/packages/utils/src/parseModels.ts +1 -1
  87. package/src/app/[variants]/(main)/image/features/GenerationFeed/BatchItem.tsx +1 -1
  88. package/src/app/[variants]/(main)/profile/features/ClerkProfile.tsx +1 -1
  89. package/src/app/[variants]/(main)/settings/_layout/Desktop/index.tsx +1 -5
  90. package/src/features/AgentSetting/AgentModal/index.tsx +9 -0
  91. package/src/locales/default/models.ts +1 -1
  92. package/src/locales/default/setting.ts +4 -0
  93. package/src/server/globalConfig/genServerAiProviderConfig.test.ts +3 -3
  94. package/src/server/globalConfig/genServerAiProviderConfig.ts +1 -1
  95. package/src/server/routers/async/image.ts +1 -1
  96. package/src/server/services/discover/index.test.ts +1 -1
  97. package/src/server/services/discover/index.ts +16 -8
  98. package/src/services/chat.ts +8 -1
  99. package/src/store/agent/slices/chat/selectors/__snapshots__/agent.test.ts.snap +1 -0
  100. package/src/store/aiInfra/slices/aiProvider/action.ts +1 -1
  101. package/src/store/image/slices/generationConfig/action.test.ts +2 -6
  102. package/src/store/image/slices/generationConfig/action.ts +3 -3
  103. package/src/store/image/slices/generationConfig/hooks.test.ts +2 -2
  104. package/src/store/image/slices/generationConfig/hooks.ts +1 -1
  105. package/src/store/image/slices/generationConfig/initialState.ts +2 -3
  106. package/src/store/image/slices/generationConfig/selectors.test.ts +1 -2
  107. package/src/store/image/slices/generationConfig/selectors.ts +1 -1
  108. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +2 -0
  109. /package/{src/config → packages/model-bank/src}/aiModels/ai21.ts +0 -0
  110. /package/{src/config → packages/model-bank/src}/aiModels/ai302.ts +0 -0
  111. /package/{src/config → packages/model-bank/src}/aiModels/ai360.ts +0 -0
  112. /package/{src/config → packages/model-bank/src}/aiModels/aihubmix.ts +0 -0
  113. /package/{src/config → packages/model-bank/src}/aiModels/akashchat.ts +0 -0
  114. /package/{src/config → packages/model-bank/src}/aiModels/anthropic.ts +0 -0
  115. /package/{src/config → packages/model-bank/src}/aiModels/azure.ts +0 -0
  116. /package/{src/config → packages/model-bank/src}/aiModels/azureai.ts +0 -0
  117. /package/{src/config → packages/model-bank/src}/aiModels/baichuan.ts +0 -0
  118. /package/{src/config → packages/model-bank/src}/aiModels/bedrock.ts +0 -0
  119. /package/{src/config → packages/model-bank/src}/aiModels/cloudflare.ts +0 -0
  120. /package/{src/config → packages/model-bank/src}/aiModels/cohere.ts +0 -0
  121. /package/{src/config → packages/model-bank/src}/aiModels/deepseek.ts +0 -0
  122. /package/{src/config → packages/model-bank/src}/aiModels/fireworksai.ts +0 -0
  123. /package/{src/config → packages/model-bank/src}/aiModels/giteeai.ts +0 -0
  124. /package/{src/config → packages/model-bank/src}/aiModels/github.ts +0 -0
  125. /package/{src/config → packages/model-bank/src}/aiModels/groq.ts +0 -0
  126. /package/{src/config → packages/model-bank/src}/aiModels/higress.ts +0 -0
  127. /package/{src/config → packages/model-bank/src}/aiModels/huggingface.ts +0 -0
  128. /package/{src/config → packages/model-bank/src}/aiModels/hunyuan.ts +0 -0
  129. /package/{src/config → packages/model-bank/src}/aiModels/infiniai.ts +0 -0
  130. /package/{src/config → packages/model-bank/src}/aiModels/internlm.ts +0 -0
  131. /package/{src/config → packages/model-bank/src}/aiModels/jina.ts +0 -0
  132. /package/{src/config → packages/model-bank/src}/aiModels/lmstudio.ts +0 -0
  133. /package/{src/config → packages/model-bank/src}/aiModels/lobehub.ts +0 -0
  134. /package/{src/config → packages/model-bank/src}/aiModels/minimax.ts +0 -0
  135. /package/{src/config → packages/model-bank/src}/aiModels/mistral.ts +0 -0
  136. /package/{src/config → packages/model-bank/src}/aiModels/modelscope.ts +0 -0
  137. /package/{src/config → packages/model-bank/src}/aiModels/moonshot.ts +0 -0
  138. /package/{src/config → packages/model-bank/src}/aiModels/novita.ts +0 -0
  139. /package/{src/config → packages/model-bank/src}/aiModels/nvidia.ts +0 -0
  140. /package/{src/config → packages/model-bank/src}/aiModels/ollama.ts +0 -0
  141. /package/{src/config → packages/model-bank/src}/aiModels/openrouter.ts +0 -0
  142. /package/{src/config → packages/model-bank/src}/aiModels/perplexity.ts +0 -0
  143. /package/{src/config → packages/model-bank/src}/aiModels/ppio.ts +0 -0
  144. /package/{src/config → packages/model-bank/src}/aiModels/qiniu.ts +0 -0
  145. /package/{src/config → packages/model-bank/src}/aiModels/qwen.ts +0 -0
  146. /package/{src/config → packages/model-bank/src}/aiModels/sambanova.ts +0 -0
  147. /package/{src/config → packages/model-bank/src}/aiModels/search1api.ts +0 -0
  148. /package/{src/config → packages/model-bank/src}/aiModels/sensenova.ts +0 -0
  149. /package/{src/config → packages/model-bank/src}/aiModels/siliconcloud.ts +0 -0
  150. /package/{src/config → packages/model-bank/src}/aiModels/spark.ts +0 -0
  151. /package/{src/config → packages/model-bank/src}/aiModels/stepfun.ts +0 -0
  152. /package/{src/config → packages/model-bank/src}/aiModels/taichu.ts +0 -0
  153. /package/{src/config → packages/model-bank/src}/aiModels/tencentcloud.ts +0 -0
  154. /package/{src/config → packages/model-bank/src}/aiModels/togetherai.ts +0 -0
  155. /package/{src/config → packages/model-bank/src}/aiModels/upstage.ts +0 -0
  156. /package/{src/config → packages/model-bank/src}/aiModels/v0.ts +0 -0
  157. /package/{src/config → packages/model-bank/src}/aiModels/vertexai.ts +0 -0
  158. /package/{src/config → packages/model-bank/src}/aiModels/vllm.ts +0 -0
  159. /package/{src/config → packages/model-bank/src}/aiModels/volcengine.ts +0 -0
  160. /package/{src/config → packages/model-bank/src}/aiModels/wenxin.ts +0 -0
  161. /package/{src/config → packages/model-bank/src}/aiModels/xai.ts +0 -0
  162. /package/{src/config → packages/model-bank/src}/aiModels/xinference.ts +0 -0
  163. /package/{src/config → packages/model-bank/src}/aiModels/zeroone.ts +0 -0
  164. /package/{src/config → packages/model-bank/src}/aiModels/zhipu.ts +0 -0
  165. /package/{src/libs → packages/model-bank/src}/standard-parameters/index.test.ts +0 -0
@@ -25,16 +25,16 @@ export const MODEL_LIST_CONFIGS = {
25
25
  reasoningKeywords: ['thinking', '-2.5-'],
26
26
  visionKeywords: ['gemini', 'learnlm'],
27
27
  },
28
- llama: {
29
- functionCallKeywords: ['llama-3.2', 'llama-3.3', 'llama-4'],
30
- reasoningKeywords: [],
31
- visionKeywords: ['llava'],
32
- },
33
28
  moonshot: {
34
29
  functionCallKeywords: ['moonshot', 'kimi'],
35
30
  reasoningKeywords: ['thinking'],
36
31
  visionKeywords: ['vision', 'kimi-latest', 'kimi-thinking-preview'],
37
32
  },
33
+ ollama: {
34
+ functionCallKeywords: ['llama-3.2', 'llama-3.3', 'llama-4'],
35
+ reasoningKeywords: [],
36
+ visionKeywords: ['llava'],
37
+ },
38
38
  openai: {
39
39
  excludeKeywords: ['audio'],
40
40
  functionCallKeywords: ['4o', '4.1', 'o3', 'o4', 'oss'],
@@ -86,8 +86,8 @@ export const PROVIDER_DETECTION_CONFIG = {
86
86
  anthropic: ['claude'],
87
87
  deepseek: ['deepseek'],
88
88
  google: ['gemini', 'imagen'],
89
- llama: ['llama', 'llava'],
90
89
  moonshot: ['moonshot', 'kimi'],
90
+ ollama: ['llama', 'llava'],
91
91
  openai: ['o1', 'o3', 'o4', 'gpt-'],
92
92
  qwen: ['qwen', 'qwq', 'qvq'],
93
93
  v0: ['v0'],
@@ -119,12 +119,7 @@ export const IMAGE_MODEL_KEYWORDS = [
119
119
  ] as const;
120
120
 
121
121
  // 嵌入模型关键词配置
122
- export const EMBEDDING_MODEL_KEYWORDS = [
123
- 'embedding',
124
- 'embed',
125
- 'bge',
126
- 'm3e',
127
- ] as const;
122
+ export const EMBEDDING_MODEL_KEYWORDS = ['embedding', 'embed', 'bge', 'm3e'] as const;
128
123
 
129
124
  /**
130
125
  * 检测关键词列表是否匹配模型ID(支持多种匹配模式)
@@ -178,8 +173,9 @@ const findKnownModelByProvider = async (
178
173
 
179
174
  try {
180
175
  // 尝试动态导入对应的配置文件
181
- const moduleImport = await import(`@/config/aiModels/${provider}.ts`);
182
- const providerModels = moduleImport.default;
176
+ const modules = await import('model-bank');
177
+
178
+ const providerModels = modules[provider];
183
179
 
184
180
  // 如果导入成功且有数据,进行查找
185
181
  if (Array.isArray(providerModels)) {
@@ -287,9 +283,9 @@ const processModelCard = (
287
283
  )
288
284
  ? 'image'
289
285
  : isKeywordListMatch(
290
- model.id.toLowerCase(),
291
- EMBEDDING_MODEL_KEYWORDS.map((k) => k.toLowerCase()),
292
- )
286
+ model.id.toLowerCase(),
287
+ EMBEDDING_MODEL_KEYWORDS.map((k) => k.toLowerCase()),
288
+ )
293
289
  ? 'embedding'
294
290
  : 'chat');
295
291
 
@@ -298,6 +294,34 @@ const processModelCard = (
298
294
  return undefined;
299
295
  }
300
296
 
297
+ const formatPricing = (pricing?: { input?: number; output?: number; units?: any[] }) => {
298
+ if (!pricing || typeof pricing !== 'object') return undefined;
299
+ if (Array.isArray(pricing.units)) {
300
+ return { units: pricing.units };
301
+ }
302
+ const { input, output } = pricing;
303
+ if (typeof input !== 'number' && typeof output !== 'number') return undefined;
304
+
305
+ const units = [];
306
+ if (typeof input === 'number') {
307
+ units.push({
308
+ name: 'textInput' as const,
309
+ rate: input,
310
+ strategy: 'fixed' as const,
311
+ unit: 'millionTokens' as const,
312
+ });
313
+ }
314
+ if (typeof output === 'number') {
315
+ units.push({
316
+ name: 'textOutput' as const,
317
+ rate: output,
318
+ strategy: 'fixed' as const,
319
+ unit: 'millionTokens' as const,
320
+ });
321
+ }
322
+ return { units };
323
+ };
324
+
301
325
  return {
302
326
  contextWindowTokens: model.contextWindowTokens ?? knownModel?.contextWindowTokens ?? undefined,
303
327
  description: model.description ?? knownModel?.description ?? '',
@@ -312,7 +336,7 @@ const processModelCard = (
312
336
  false),
313
337
  id: model.id,
314
338
  maxOutput: model.maxOutput ?? knownModel?.maxOutput ?? undefined,
315
- // pricing: knownModel?.pricing ?? undefined,
339
+ pricing: formatPricing(model?.pricing) ?? undefined,
316
340
  reasoning:
317
341
  model.reasoning ??
318
342
  knownModel?.abilities?.reasoning ??
@@ -342,7 +366,7 @@ export const processModelList = async (
342
366
  config: ModelProcessorConfig,
343
367
  provider?: keyof typeof MODEL_LIST_CONFIGS,
344
368
  ): Promise<ChatModelCard[]> => {
345
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
369
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
346
370
 
347
371
  return Promise.all(
348
372
  modelList.map(async (model) => {
@@ -375,14 +399,15 @@ export const processMultiProviderModelList = async (
375
399
  modelList: Array<{ id: string }>,
376
400
  providerid?: ModelProviderKey,
377
401
  ): Promise<ChatModelCard[]> => {
378
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
402
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
379
403
 
380
404
  // 如果提供了 providerid,尝试获取该提供商的本地配置
381
405
  let providerLocalConfig: any[] | null = null;
382
406
  if (providerid) {
383
407
  try {
384
- const moduleImport = await import(`@/config/aiModels/${providerid}.ts`);
385
- providerLocalConfig = moduleImport.default;
408
+ const modules = await import('model-bank');
409
+
410
+ providerLocalConfig = modules[providerid];
386
411
  } catch {
387
412
  // 如果配置文件不存在或导入失败,保持为 null
388
413
  providerLocalConfig = null;
@@ -1,9 +1,8 @@
1
1
  import { imageUrlToBase64 } from '@lobechat/utils';
2
2
  import createDebug from 'debug';
3
+ import { RuntimeImageGenParamsValue } from 'model-bank';
3
4
  import OpenAI from 'openai';
4
5
 
5
- import { RuntimeImageGenParamsValue } from '@/libs/standard-parameters/index';
6
-
7
6
  import { CreateImagePayload, CreateImageResponse } from '../../types/image';
8
7
  import { convertImageUrlToFile } from '../openaiHelpers';
9
8
  import { parseDataUri } from '../uriParser';
@@ -1,10 +1,10 @@
1
1
  import { getModelPropertyWithFallback } from '@lobechat/utils';
2
2
  import dayjs from 'dayjs';
3
3
  import utc from 'dayjs/plugin/utc';
4
+ import { LOBE_DEFAULT_MODEL_LIST } from 'model-bank';
4
5
  import OpenAI, { ClientOptions } from 'openai';
5
6
  import { Stream } from 'openai/streaming';
6
7
 
7
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
8
8
  import type { AiModelType } from '@/types/aiModel';
9
9
  import type { ChatModelCard } from '@/types/llm';
10
10
 
@@ -13,7 +13,7 @@ export const LobeVLLMAI = createOpenAICompatibleRuntime({
13
13
  chatCompletion: () => process.env.DEBUG_VLLM_CHAT_COMPLETION === '1',
14
14
  },
15
15
  models: async ({ client }) => {
16
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
16
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
17
17
 
18
18
  const modelsPage = (await client.models.list()) as any;
19
19
  const modelList: VLLMModelCard[] = modelsPage.data;
@@ -18,7 +18,7 @@ export const LobeXinferenceAI = createOpenAICompatibleRuntime({
18
18
  chatCompletion: () => process.env.DEBUG_XINFERENCE_CHAT_COMPLETION === '1',
19
19
  },
20
20
  models: async ({ client }) => {
21
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
21
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
22
22
 
23
23
  const modelsPage = (await client.models.list()) as any;
24
24
  const modelList: XinferenceModelCard[] = modelsPage.data;
@@ -16,6 +16,11 @@ export interface LobeAgentChatConfig {
16
16
 
17
17
  enableMaxTokens?: boolean;
18
18
 
19
+ /**
20
+ * 是否开启流式输出
21
+ */
22
+ enableStreaming?: boolean;
23
+
19
24
  /**
20
25
  * 是否开启推理
21
26
  */
@@ -68,6 +73,7 @@ export const AgentChatConfigSchema = z.object({
68
73
  enableMaxTokens: z.boolean().optional(),
69
74
  enableReasoning: z.boolean().optional(),
70
75
  enableReasoningEffort: z.boolean().optional(),
76
+ enableStreaming: z.boolean().optional(),
71
77
  historyCount: z.number().optional(),
72
78
  reasoningBudgetToken: z.number().optional(),
73
79
  searchFCModel: z
@@ -1,7 +1,6 @@
1
+ import { ModelParamsSchema } from 'model-bank';
1
2
  import { z } from 'zod';
2
3
 
3
- import { ModelParamsSchema } from '@/libs/standard-parameters';
4
-
5
4
  export type ModelPriceCurrency = 'CNY' | 'USD';
6
5
 
7
6
  export const AiModelSourceEnum = {
@@ -1,6 +1,6 @@
1
+ import { ModelParamsSchema } from 'model-bank';
1
2
  import { ReactNode } from 'react';
2
3
 
3
- import { ModelParamsSchema } from '@/libs/standard-parameters';
4
4
  import { AiModelType, Pricing } from '@/types/aiModel';
5
5
  import { AiProviderSettings } from '@/types/aiProvider';
6
6
 
@@ -3,7 +3,7 @@ import { describe, expect, it, vi } from 'vitest';
3
3
  import { getModelPropertyWithFallback } from './getFallbackModelProperty';
4
4
 
5
5
  // Mock LOBE_DEFAULT_MODEL_LIST for testing
6
- vi.mock('@/config/aiModels', () => ({
6
+ vi.mock('model-bank', () => ({
7
7
  LOBE_DEFAULT_MODEL_LIST: [
8
8
  {
9
9
  id: 'gpt-4',
@@ -12,7 +12,7 @@ export const getModelPropertyWithFallback = async <T>(
12
12
  propertyName: keyof AiFullModelCard,
13
13
  providerId?: string,
14
14
  ): Promise<T> => {
15
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
15
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
16
16
 
17
17
  // Step 1: If providerId is provided, prioritize an exact match (same provider + same id)
18
18
  if (providerId) {
@@ -1,7 +1,6 @@
1
+ import { LOBE_DEFAULT_MODEL_LIST, openaiChatModels } from 'model-bank';
1
2
  import { describe, expect, it } from 'vitest';
2
3
 
3
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
4
- import { openaiChatModels } from '@/config/aiModels/openai';
5
4
  import { AiFullModelCard } from '@/types/aiModel';
6
5
 
7
6
  import { extractEnabledModels, parseModelString, transformToAiModelList } from './parseModels';
@@ -141,7 +141,7 @@ export const transformToAiModelList = async ({
141
141
  }
142
142
 
143
143
  // 异步获取配置
144
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
144
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
145
145
 
146
146
  return produce(chatModels, (draft) => {
147
147
  // 处理添加或替换逻辑
@@ -9,12 +9,12 @@ import dayjs from 'dayjs';
9
9
  import relativeTime from 'dayjs/plugin/relativeTime';
10
10
  import { omit } from 'lodash-es';
11
11
  import { CopyIcon, RotateCcwSquareIcon, Trash2 } from 'lucide-react';
12
+ import { RuntimeImageGenParams } from 'model-bank';
12
13
  import { memo, useMemo } from 'react';
13
14
  import { useTranslation } from 'react-i18next';
14
15
  import { Flexbox } from 'react-layout-kit';
15
16
 
16
17
  import InvalidAPIKey from '@/components/InvalidAPIKey';
17
- import { RuntimeImageGenParams } from '@/libs/standard-parameters/index';
18
18
  import { useImageStore } from '@/store/image';
19
19
  import { AsyncTaskErrorType } from '@/types/asyncTask';
20
20
  import { GenerationBatch } from '@/types/generation';
@@ -48,7 +48,7 @@ export const useStyles = createStyles(
48
48
  height: 100%;
49
49
  `,
50
50
  scrollBox: css`
51
- background: transparent;
51
+ background: transparent !important;
52
52
  `,
53
53
  }) as Partial<Record<keyof ElementsConfig, any>>,
54
54
  );
@@ -2,7 +2,7 @@
2
2
 
3
3
  import { useResponsive, useTheme } from 'antd-style';
4
4
  import { usePathname } from 'next/navigation';
5
- import { PropsWithChildren, memo, useEffect, useRef } from 'react';
5
+ import { PropsWithChildren, memo, useRef } from 'react';
6
6
  import { Flexbox } from 'react-layout-kit';
7
7
 
8
8
  import InitClientDB from '@/features/InitClientDB';
@@ -31,10 +31,6 @@ const Layout = memo<LayoutProps>(({ children, category }) => {
31
31
  const { md = true } = useResponsive();
32
32
  const theme = useTheme();
33
33
 
34
- useEffect(() => {
35
- console.log('settings render');
36
- });
37
-
38
34
  return (
39
35
  <Flexbox
40
36
  height={'100%'}
@@ -31,6 +31,15 @@ const AgentModal = memo(() => {
31
31
  name: '_modalConfig',
32
32
  tag: 'model',
33
33
  },
34
+ {
35
+ children: <Switch />,
36
+ desc: t('settingChat.enableStreaming.desc'),
37
+ label: t('settingChat.enableStreaming.title'),
38
+ layout: 'horizontal',
39
+ minWidth: undefined,
40
+ name: 'enableStreaming',
41
+ valuePropName: 'checked',
42
+ },
34
43
  {
35
44
  children: <SliderWithInput max={2} min={0} step={0.1} />,
36
45
  desc: t('settingModel.temperature.desc'),
@@ -1,4 +1,4 @@
1
- import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
1
+ import { LOBE_DEFAULT_MODEL_LIST } from 'model-bank';
2
2
 
3
3
  const locales: {
4
4
  [key: string]: {
@@ -241,6 +241,10 @@ export default {
241
241
  title: '限制历史消息数',
242
242
  unlimited: '不限历史消息数',
243
243
  },
244
+ enableStreaming: {
245
+ desc: '启用流式输出以实时显示响应。禁用后仅显示完整响应。',
246
+ title: '启用流式输出',
247
+ },
244
248
  historyCount: {
245
249
  desc: '每次请求携带的消息数(包括最新编写的提问。每个提问和回答都计算1)',
246
250
  title: '附带消息数',
@@ -3,8 +3,8 @@ import { beforeEach, describe, expect, it, vi } from 'vitest';
3
3
  import { genServerAiProvidersConfig } from './genServerAiProviderConfig';
4
4
 
5
5
  // Mock dependencies using importOriginal to preserve real provider data
6
- vi.mock('@/config/aiModels', async (importOriginal) => {
7
- const actual = await importOriginal<typeof import('@/config/aiModels')>();
6
+ vi.mock('model-bank', async (importOriginal) => {
7
+ const actual = await importOriginal<typeof import('model-bank')>();
8
8
  return {
9
9
  ...actual,
10
10
  // Keep the original exports but we can override specific ones if needed
@@ -189,7 +189,7 @@ describe('genServerAiProvidersConfig Error Handling', () => {
189
189
  vi.resetModules();
190
190
 
191
191
  // Mock dependencies with a missing provider scenario
192
- vi.doMock('@/config/aiModels', () => ({
192
+ vi.doMock('model-bank', () => ({
193
193
  // Explicitly set openai to undefined to simulate missing provider
194
194
  openai: undefined,
195
195
  anthropic: [
@@ -1,6 +1,6 @@
1
1
  import { ModelProvider } from '@lobechat/model-runtime';
2
+ import * as AiModels from 'model-bank';
2
3
 
3
- import * as AiModels from '@/config/aiModels';
4
4
  import { getLLMConfig } from '@/config/llm';
5
5
  import { AiFullModelCard } from '@/types/aiModel';
6
6
  import { ProviderConfig } from '@/types/user/settings';
@@ -1,11 +1,11 @@
1
1
  import debug from 'debug';
2
+ import { RuntimeImageGenParams } from 'model-bank';
2
3
  import { z } from 'zod';
3
4
 
4
5
  import { ASYNC_TASK_TIMEOUT, AsyncTaskModel } from '@/database/models/asyncTask';
5
6
  import { FileModel } from '@/database/models/file';
6
7
  import { GenerationModel } from '@/database/models/generation';
7
8
  import { AgentRuntimeErrorType } from '@/libs/model-runtime/error';
8
- import { RuntimeImageGenParams } from '@/libs/standard-parameters/index';
9
9
  import { asyncAuthedProcedure, asyncRouter as router } from '@/libs/trpc/async';
10
10
  import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
11
11
  import { GenerationService } from '@/server/services/generation';
@@ -23,7 +23,7 @@ vi.mock('@/locales/resources', () => ({
23
23
  process.env.MARKET_BASE_URL = 'http://localhost:8787/api';
24
24
 
25
25
  // Mock constants with inline data
26
- vi.mock('@/config/aiModels', () => ({
26
+ vi.mock('model-bank', () => ({
27
27
  LOBE_DEFAULT_MODEL_LIST: [
28
28
  {
29
29
  id: 'gpt-4',
@@ -43,7 +43,11 @@ import {
43
43
  ProviderQueryParams,
44
44
  ProviderSorts,
45
45
  } from '@/types/discover';
46
- import { getAudioInputUnitRate, getTextInputUnitRate, getTextOutputUnitRate } from '@/utils/pricing';
46
+ import {
47
+ getAudioInputUnitRate,
48
+ getTextInputUnitRate,
49
+ getTextOutputUnitRate,
50
+ } from '@/utils/pricing';
47
51
 
48
52
  const log = debug('lobe-server:discover');
49
53
 
@@ -728,7 +732,7 @@ export class DiscoverService {
728
732
  private _getProviderList = async (): Promise<DiscoverProviderItem[]> => {
729
733
  log('_getProviderList: fetching provider list');
730
734
  const [{ LOBE_DEFAULT_MODEL_LIST }, { DEFAULT_MODEL_PROVIDER_LIST }] = await Promise.all([
731
- import('@/config/aiModels'),
735
+ import('model-bank'),
732
736
  import('@/config/modelProviders'),
733
737
  ]);
734
738
  const result = DEFAULT_MODEL_PROVIDER_LIST.map((item) => {
@@ -754,7 +758,7 @@ export class DiscoverService {
754
758
  }): Promise<DiscoverProviderDetail | undefined> => {
755
759
  log('getProviderDetail: params=%O', params);
756
760
  const { identifier, locale, withReadme } = params;
757
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
761
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
758
762
  const all = await this._getProviderList();
759
763
  let provider = all.find((item) => item.identifier === identifier);
760
764
  if (!provider) {
@@ -890,7 +894,7 @@ export class DiscoverService {
890
894
 
891
895
  private _getRawModelList = async (): Promise<DiscoverModelItem[]> => {
892
896
  log('_getRawModelList: fetching raw model list');
893
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
897
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
894
898
  const result = LOBE_DEFAULT_MODEL_LIST.map((item) => {
895
899
  const identifier = (item.id.split('/').at(-1) || item.id).toLowerCase();
896
900
  const providers = uniq(
@@ -983,7 +987,7 @@ export class DiscoverService {
983
987
  getModelCategories = async (params: CategoryListQuery = {}): Promise<CategoryItem[]> => {
984
988
  log('getModelCategories: params=%O', params);
985
989
  const { q } = params;
986
- const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
990
+ const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
987
991
  let list = LOBE_DEFAULT_MODEL_LIST;
988
992
  if (q) {
989
993
  const originalCount = list.length;
@@ -1018,7 +1022,7 @@ export class DiscoverService {
1018
1022
  }): Promise<DiscoverModelDetail | undefined> => {
1019
1023
  log('getModelDetail: params=%O', params);
1020
1024
  const [{ LOBE_DEFAULT_MODEL_LIST }, { DEFAULT_MODEL_PROVIDER_LIST }] = await Promise.all([
1021
- import('@/config/aiModels'),
1025
+ import('model-bank'),
1022
1026
  import('@/config/modelProviders'),
1023
1027
  ]);
1024
1028
  const { identifier } = params;
@@ -1158,9 +1162,13 @@ export class DiscoverService {
1158
1162
  case ModelSorts.OutputPrice: {
1159
1163
  list = list.sort((a, b) => {
1160
1164
  if (order === 'asc') {
1161
- return (getTextOutputUnitRate(a.pricing) || 0) - (getTextOutputUnitRate(b.pricing) || 0);
1165
+ return (
1166
+ (getTextOutputUnitRate(a.pricing) || 0) - (getTextOutputUnitRate(b.pricing) || 0)
1167
+ );
1162
1168
  } else {
1163
- return (getTextOutputUnitRate(b.pricing) || 0) - (getTextOutputUnitRate(a.pricing) || 0);
1169
+ return (
1170
+ (getTextOutputUnitRate(b.pricing) || 0) - (getTextOutputUnitRate(a.pricing) || 0)
1171
+ );
1164
1172
  }
1165
1173
  });
1166
1174
  break;
@@ -365,8 +365,15 @@ class ChatService {
365
365
  ? 'responses'
366
366
  : undefined;
367
367
 
368
+ // Get the chat config to check streaming preference
369
+ const chatConfig = agentChatConfigSelectors.currentChatConfig(getAgentStoreState());
370
+
368
371
  const payload = merge(
369
- { model: DEFAULT_AGENT_CONFIG.model, stream: true, ...DEFAULT_AGENT_CONFIG.params },
372
+ {
373
+ model: DEFAULT_AGENT_CONFIG.model,
374
+ stream: chatConfig.enableStreaming !== false, // Default to true if not set
375
+ ...DEFAULT_AGENT_CONFIG.params,
376
+ },
370
377
  { ...res, apiMode, model },
371
378
  );
372
379
 
@@ -9,6 +9,7 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
9
9
  "enableCompressHistory": true,
10
10
  "enableHistoryCount": true,
11
11
  "enableReasoning": false,
12
+ "enableStreaming": true,
12
13
  "historyCount": 20,
13
14
  "reasoningBudgetToken": 1024,
14
15
  "searchFCModel": {
@@ -232,7 +232,7 @@ export const createAiProviderSlice: StateCreator<
232
232
  !isDeprecatedEdition ? [AiProviderSwrKey.fetchAiProviderRuntimeState, isLogin] : null,
233
233
  async ([, isLogin]) => {
234
234
  const [{ LOBE_DEFAULT_MODEL_LIST: builtinAiModelList }, { DEFAULT_MODEL_PROVIDER_LIST }] =
235
- await Promise.all([import('@/config/aiModels'), import('@/config/modelProviders')]);
235
+ await Promise.all([import('model-bank'), import('@/config/modelProviders')]);
236
236
 
237
237
  if (isLogin) {
238
238
  const data = await aiProviderService.getAiProviderRuntimeState();
@@ -1,12 +1,8 @@
1
1
  import { act, renderHook } from '@testing-library/react';
2
+ import { ModelParamsSchema, RuntimeImageGenParams, extractDefaultValues } from 'model-bank';
3
+ import { fluxSchnellParamsSchema } from 'model-bank';
2
4
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
5
 
4
- import { fluxSchnellParamsSchema } from '@/config/aiModels/fal';
5
- import {
6
- ModelParamsSchema,
7
- RuntimeImageGenParams,
8
- extractDefaultValues,
9
- } from '@/libs/standard-parameters/index';
10
6
  import { useImageStore } from '@/store/image';
11
7
  import { AIImageModelCard } from '@/types/aiModel';
12
8
 
@@ -1,12 +1,12 @@
1
- import { StateCreator } from 'zustand/vanilla';
2
-
3
1
  import {
4
2
  ModelParamsSchema,
5
3
  RuntimeImageGenParams,
6
4
  RuntimeImageGenParamsKeys,
7
5
  RuntimeImageGenParamsValue,
8
6
  extractDefaultValues,
9
- } from '@/libs/standard-parameters/index';
7
+ } from 'model-bank';
8
+ import { StateCreator } from 'zustand/vanilla';
9
+
10
10
  import { aiProviderSelectors, getAiInfraStoreState } from '@/store/aiInfra';
11
11
  import { AIImageModelCard } from '@/types/aiModel';
12
12
 
@@ -1,8 +1,8 @@
1
1
  import { act, renderHook } from '@testing-library/react';
2
+ import { ModelParamsSchema, RuntimeImageGenParams } from 'model-bank';
3
+ import { fluxSchnellParamsSchema } from 'model-bank';
2
4
  import { describe, expect, it, vi } from 'vitest';
3
5
 
4
- import { fluxSchnellParamsSchema } from '@/config/aiModels/fal';
5
- import { ModelParamsSchema, RuntimeImageGenParams } from '@/libs/standard-parameters/index';
6
6
  import { useImageStore } from '@/store/image';
7
7
  import { AIImageModelCard } from '@/types/aiModel';
8
8
 
@@ -1,7 +1,7 @@
1
+ import { RuntimeImageGenParams, RuntimeImageGenParamsKeys } from 'model-bank';
1
2
  import { useCallback, useMemo } from 'react';
2
3
 
3
4
  import { DEFAULT_ASPECT_RATIO, PRESET_ASPECT_RATIOS } from '@/const/image';
4
- import { RuntimeImageGenParams, RuntimeImageGenParamsKeys } from '@/libs/standard-parameters/index';
5
5
 
6
6
  import { useImageStore } from '../../store';
7
7
  import { imageGenerationConfigSelectors } from './selectors';
@@ -1,12 +1,11 @@
1
1
  /* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
2
2
  import { ModelProvider } from '@lobechat/model-runtime';
3
-
4
- import { gptImage1ParamsSchema } from '@/config/aiModels/openai';
5
3
  import {
6
4
  ModelParamsSchema,
7
5
  RuntimeImageGenParams,
8
6
  extractDefaultValues,
9
- } from '@/libs/standard-parameters/index';
7
+ gptImage1ParamsSchema,
8
+ } from 'model-bank';
10
9
 
11
10
  export const DEFAULT_AI_IMAGE_PROVIDER = ModelProvider.OpenAI;
12
11
  export const DEFAULT_AI_IMAGE_MODEL = 'gpt-image-1';
@@ -1,7 +1,6 @@
1
+ import { ModelParamsSchema, RuntimeImageGenParams, gptImage1ParamsSchema } from 'model-bank';
1
2
  import { describe, expect, it, vi } from 'vitest';
2
3
 
3
- import { gptImage1ParamsSchema } from '@/config/aiModels/openai';
4
- import { ModelParamsSchema, RuntimeImageGenParams } from '@/libs/standard-parameters/index';
5
4
  import { ImageStore } from '@/store/image';
6
5
  import { initialState } from '@/store/image/initialState';
7
6
  import { AIImageModelCard } from '@/types/aiModel';
@@ -1,4 +1,4 @@
1
- import { RuntimeImageGenParamsKeys } from '@/libs/standard-parameters/index';
1
+ import { RuntimeImageGenParamsKeys } from 'model-bank';
2
2
 
3
3
  import { GenerationConfigState } from './initialState';
4
4
 
@@ -112,6 +112,7 @@ exports[`settingsSelectors > defaultAgent > should merge DEFAULT_AGENT and s.set
112
112
  "enableCompressHistory": true,
113
113
  "enableHistoryCount": true,
114
114
  "enableReasoning": false,
115
+ "enableStreaming": true,
115
116
  "historyCount": 20,
116
117
  "reasoningBudgetToken": 1024,
117
118
  "searchFCModel": {
@@ -156,6 +157,7 @@ exports[`settingsSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CON
156
157
  "enableCompressHistory": true,
157
158
  "enableHistoryCount": true,
158
159
  "enableReasoning": false,
160
+ "enableStreaming": true,
159
161
  "historyCount": 20,
160
162
  "reasoningBudgetToken": 1024,
161
163
  "searchFCModel": {