@lobehub/chat 1.119.1 → 1.119.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (227) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +5 -0
  3. package/package.json +5 -3
  4. package/packages/const/src/auth.ts +0 -36
  5. package/packages/const/src/index.ts +3 -1
  6. package/packages/database/src/models/__tests__/aiModel.test.ts +1 -2
  7. package/packages/database/src/models/aiModel.ts +2 -3
  8. package/packages/database/src/repositories/aiInfra/index.test.ts +1 -1
  9. package/packages/database/src/repositories/aiInfra/index.ts +4 -4
  10. package/packages/model-bank/src/aiModels/ai21.ts +1 -1
  11. package/packages/model-bank/src/aiModels/ai302.ts +1 -1
  12. package/packages/model-bank/src/aiModels/ai360.ts +1 -1
  13. package/packages/model-bank/src/aiModels/aihubmix.ts +2 -2
  14. package/packages/model-bank/src/aiModels/akashchat.ts +1 -1
  15. package/packages/model-bank/src/aiModels/anthropic.ts +1 -1
  16. package/packages/model-bank/src/aiModels/azure.ts +1 -1
  17. package/packages/model-bank/src/aiModels/azureai.ts +1 -1
  18. package/packages/model-bank/src/aiModels/baichuan.ts +1 -1
  19. package/packages/model-bank/src/aiModels/bedrock.ts +1 -1
  20. package/packages/model-bank/src/aiModels/bfl.ts +2 -3
  21. package/packages/model-bank/src/aiModels/cloudflare.ts +1 -1
  22. package/packages/model-bank/src/aiModels/cohere.ts +1 -1
  23. package/packages/model-bank/src/aiModels/deepseek.ts +1 -1
  24. package/packages/model-bank/src/aiModels/fal.ts +1 -1
  25. package/packages/model-bank/src/aiModels/fireworksai.ts +1 -1
  26. package/packages/model-bank/src/aiModels/giteeai.ts +1 -1
  27. package/packages/model-bank/src/aiModels/github.ts +1 -1
  28. package/packages/model-bank/src/aiModels/google.ts +2 -3
  29. package/packages/model-bank/src/aiModels/groq.ts +1 -1
  30. package/packages/model-bank/src/aiModels/higress.ts +1 -1
  31. package/packages/model-bank/src/aiModels/huggingface.ts +1 -1
  32. package/packages/model-bank/src/aiModels/hunyuan.ts +1 -1
  33. package/packages/model-bank/src/aiModels/index.ts +1 -1
  34. package/packages/model-bank/src/aiModels/infiniai.ts +1 -1
  35. package/packages/model-bank/src/aiModels/internlm.ts +1 -1
  36. package/packages/model-bank/src/aiModels/jina.ts +1 -1
  37. package/packages/model-bank/src/aiModels/lmstudio.ts +1 -1
  38. package/packages/model-bank/src/aiModels/lobehub.ts +1 -1
  39. package/packages/model-bank/src/aiModels/minimax.ts +1 -1
  40. package/packages/model-bank/src/aiModels/mistral.ts +1 -1
  41. package/packages/model-bank/src/aiModels/modelscope.ts +1 -1
  42. package/packages/model-bank/src/aiModels/moonshot.ts +1 -1
  43. package/packages/model-bank/src/aiModels/novita.ts +1 -1
  44. package/packages/model-bank/src/aiModels/nvidia.ts +1 -1
  45. package/packages/model-bank/src/aiModels/ollama.ts +1 -1
  46. package/packages/model-bank/src/aiModels/openai.ts +1 -1
  47. package/packages/model-bank/src/aiModels/openrouter.ts +1 -1
  48. package/packages/model-bank/src/aiModels/perplexity.ts +1 -1
  49. package/packages/model-bank/src/aiModels/ppio.ts +1 -1
  50. package/packages/model-bank/src/aiModels/qiniu.ts +1 -1
  51. package/packages/model-bank/src/aiModels/qwen.ts +1 -1
  52. package/packages/model-bank/src/aiModels/sambanova.ts +1 -1
  53. package/packages/model-bank/src/aiModels/search1api.ts +1 -1
  54. package/packages/model-bank/src/aiModels/sensenova.ts +1 -1
  55. package/packages/model-bank/src/aiModels/siliconcloud.ts +1 -1
  56. package/packages/model-bank/src/aiModels/spark.ts +1 -1
  57. package/packages/model-bank/src/aiModels/stepfun.ts +1 -1
  58. package/packages/model-bank/src/aiModels/taichu.ts +1 -1
  59. package/packages/model-bank/src/aiModels/tencentcloud.ts +1 -1
  60. package/packages/model-bank/src/aiModels/togetherai.ts +1 -1
  61. package/packages/model-bank/src/aiModels/upstage.ts +1 -1
  62. package/packages/model-bank/src/aiModels/v0.ts +1 -1
  63. package/packages/model-bank/src/aiModels/vertexai.ts +1 -1
  64. package/packages/model-bank/src/aiModels/vllm.ts +1 -1
  65. package/packages/model-bank/src/aiModels/volcengine.ts +1 -1
  66. package/packages/model-bank/src/aiModels/wenxin.ts +1 -1
  67. package/packages/model-bank/src/aiModels/xai.ts +1 -1
  68. package/packages/model-bank/src/aiModels/xinference.ts +1 -1
  69. package/packages/model-bank/src/aiModels/zeroone.ts +1 -1
  70. package/packages/model-bank/src/aiModels/zhipu.ts +1 -1
  71. package/packages/model-bank/src/index.ts +1 -0
  72. package/packages/model-bank/src/standard-parameters/index.ts +48 -0
  73. package/packages/{types/src → model-bank/src/types}/aiModel.ts +12 -1
  74. package/packages/model-bank/src/types/index.ts +1 -0
  75. package/packages/model-runtime/package.json +4 -1
  76. package/packages/model-runtime/src/BaseAI.ts +2 -2
  77. package/packages/model-runtime/src/ModelRuntime.test.ts +4 -4
  78. package/packages/model-runtime/src/RouterRuntime/createRuntime.ts +3 -7
  79. package/packages/model-runtime/src/ai302/index.ts +1 -1
  80. package/packages/model-runtime/src/aihubmix/index.ts +1 -2
  81. package/packages/model-runtime/src/anthropic/index.ts +1 -1
  82. package/packages/model-runtime/src/azureOpenai/index.ts +2 -3
  83. package/packages/model-runtime/src/azureai/index.ts +2 -3
  84. package/packages/model-runtime/src/bedrock/index.ts +1 -1
  85. package/packages/model-runtime/src/bfl/createImage.test.ts +4 -4
  86. package/packages/model-runtime/src/bfl/createImage.ts +2 -2
  87. package/packages/model-runtime/src/bfl/index.ts +1 -1
  88. package/packages/model-runtime/src/cloudflare/index.ts +1 -1
  89. package/packages/model-runtime/src/const/models.ts +64 -0
  90. package/packages/model-runtime/src/fal/index.test.ts +2 -3
  91. package/packages/model-runtime/src/fal/index.ts +1 -1
  92. package/packages/model-runtime/src/github/index.ts +1 -1
  93. package/packages/model-runtime/src/google/createImage.test.ts +1 -1
  94. package/packages/model-runtime/src/google/createImage.ts +1 -1
  95. package/packages/model-runtime/src/google/index.test.ts +1 -1
  96. package/packages/model-runtime/src/google/index.ts +4 -3
  97. package/packages/model-runtime/src/groq/index.ts +1 -1
  98. package/packages/model-runtime/src/helpers/parseToolCalls.ts +1 -2
  99. package/packages/model-runtime/src/huggingface/index.ts +1 -1
  100. package/packages/model-runtime/src/index.ts +3 -1
  101. package/packages/model-runtime/src/infiniai/index.ts +1 -1
  102. package/packages/model-runtime/src/ollama/index.test.ts +1 -1
  103. package/packages/model-runtime/src/ollama/index.ts +2 -3
  104. package/packages/model-runtime/src/openai/index.ts +16 -8
  105. package/packages/model-runtime/src/providerTestUtils.ts +1 -2
  106. package/packages/model-runtime/src/qiniu/index.test.ts +2 -3
  107. package/packages/model-runtime/src/siliconcloud/index.ts +1 -1
  108. package/packages/model-runtime/src/types/chat.ts +2 -22
  109. package/packages/model-runtime/src/{error.ts → types/error.ts} +29 -0
  110. package/packages/model-runtime/src/types/index.ts +4 -0
  111. package/packages/model-runtime/src/types/toolsCalling.ts +48 -0
  112. package/packages/model-runtime/src/types/type.ts +1 -1
  113. package/packages/model-runtime/src/types/usage.ts +27 -0
  114. package/packages/model-runtime/src/utils/anthropicHelpers.test.ts +2 -2
  115. package/packages/model-runtime/src/utils/anthropicHelpers.ts +1 -1
  116. package/packages/model-runtime/src/utils/createError.ts +1 -1
  117. package/packages/model-runtime/src/utils/errorResponse.test.ts +110 -0
  118. package/packages/model-runtime/src/utils/errorResponse.ts +64 -0
  119. package/packages/{utils/src → model-runtime/src/utils}/getFallbackModelProperty.ts +1 -1
  120. package/packages/model-runtime/src/utils/googleErrorParser.test.ts +1 -1
  121. package/packages/model-runtime/src/utils/googleErrorParser.ts +1 -1
  122. package/packages/model-runtime/src/utils/handleOpenAIError.ts +1 -1
  123. package/packages/model-runtime/src/utils/imageToBase64.test.ts +91 -0
  124. package/packages/model-runtime/src/utils/imageToBase64.ts +62 -0
  125. package/packages/model-runtime/src/utils/modelParse.test.ts +2 -2
  126. package/packages/model-runtime/src/utils/modelParse.ts +16 -10
  127. package/packages/model-runtime/src/utils/openaiCompatibleFactory/createImage.ts +1 -1
  128. package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.ts +3 -3
  129. package/packages/model-runtime/src/utils/openaiHelpers.test.ts +2 -2
  130. package/packages/model-runtime/src/utils/openaiHelpers.ts +3 -4
  131. package/packages/model-runtime/src/utils/postProcessModelList.ts +2 -2
  132. package/packages/model-runtime/src/utils/safeParseJSON.test.ts +71 -0
  133. package/packages/model-runtime/src/utils/safeParseJSON.ts +12 -0
  134. package/packages/model-runtime/src/utils/streams/bedrock/claude.ts +1 -1
  135. package/packages/model-runtime/src/utils/streams/bedrock/llama.test.ts +1 -2
  136. package/packages/model-runtime/src/utils/streams/bedrock/llama.ts +1 -1
  137. package/packages/model-runtime/src/utils/streams/google-ai.test.ts +1 -1
  138. package/packages/model-runtime/src/utils/streams/google-ai.ts +1 -1
  139. package/packages/model-runtime/src/utils/streams/ollama.test.ts +1 -1
  140. package/packages/model-runtime/src/utils/streams/ollama.ts +2 -3
  141. package/packages/model-runtime/src/utils/streams/openai/openai.test.ts +1 -2
  142. package/packages/model-runtime/src/utils/streams/openai/openai.ts +1 -1
  143. package/packages/model-runtime/src/utils/streams/openai/responsesStream.ts +1 -1
  144. package/packages/model-runtime/src/utils/streams/protocol.ts +3 -3
  145. package/packages/model-runtime/src/utils/streams/vertex-ai.test.ts +1 -1
  146. package/packages/model-runtime/src/utils/streams/vertex-ai.ts +2 -2
  147. package/packages/model-runtime/src/utils/uuid.ts +7 -0
  148. package/packages/model-runtime/src/vertexai/index.ts +1 -1
  149. package/packages/types/src/agent/index.ts +2 -1
  150. package/packages/types/src/aiProvider.ts +10 -2
  151. package/packages/types/src/auth.ts +35 -0
  152. package/packages/types/src/discover/models.ts +1 -1
  153. package/packages/types/src/discover/providers.ts +1 -1
  154. package/packages/types/src/index.ts +4 -0
  155. package/packages/types/src/llm.ts +2 -47
  156. package/packages/types/src/session/agentSession.ts +3 -3
  157. package/packages/types/src/session/index.ts +2 -2
  158. package/packages/types/src/session/sessionGroup.ts +0 -2
  159. package/packages/types/src/user/settings/general.ts +1 -1
  160. package/packages/types/src/user/settings/modelProvider.ts +1 -1
  161. package/packages/utils/src/fetch/fetchSSE.ts +1 -1
  162. package/packages/utils/src/format.ts +2 -3
  163. package/packages/utils/src/index.ts +3 -1
  164. package/packages/utils/src/number.test.ts +1 -2
  165. package/packages/utils/src/number.ts +1 -2
  166. package/packages/utils/src/parseModels.test.ts +1 -2
  167. package/packages/utils/src/parseModels.ts +2 -3
  168. package/packages/utils/src/pricing.test.ts +1 -2
  169. package/packages/utils/src/pricing.ts +1 -1
  170. package/packages/utils/src/server/xor.ts +3 -1
  171. package/src/app/(backend)/middleware/auth/index.ts +1 -2
  172. package/src/app/(backend)/webapi/chat/vertexai/route.ts +1 -1
  173. package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +1 -2
  174. package/src/app/[variants]/(main)/discover/(list)/model/features/List/ModelTypeIcon.tsx +1 -2
  175. package/src/app/[variants]/(main)/image/@menu/components/SeedNumberInput/index.tsx +1 -1
  176. package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/hooks/useAutoDimensions.ts +4 -3
  177. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +1 -1
  178. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelItem.tsx +1 -1
  179. package/src/app/[variants]/(main)/settings/provider/features/ModelList/SortModelModal/ListItem.tsx +1 -1
  180. package/src/app/[variants]/(main)/settings/provider/features/ModelList/SortModelModal/index.tsx +1 -1
  181. package/src/components/ModelSelect/index.tsx +1 -1
  182. package/src/database/_deprecated/core/migrations/migrateSettingsToUser/type.ts +2 -5
  183. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +1 -1
  184. package/src/features/Conversation/Extras/Usage/UsageDetail/pricing.ts +3 -4
  185. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +1 -1
  186. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +3 -2
  187. package/src/libs/trpc/async/context.ts +2 -1
  188. package/src/libs/trpc/edge/context.ts +2 -6
  189. package/src/libs/trpc/lambda/context.ts +1 -1
  190. package/src/migrations/FromV5ToV6/types/v5.ts +2 -2
  191. package/src/migrations/FromV5ToV6/types/v6.ts +2 -1
  192. package/src/server/globalConfig/genServerAiProviderConfig.ts +3 -3
  193. package/src/server/modules/ModelRuntime/index.test.ts +1 -1
  194. package/src/server/modules/ModelRuntime/index.ts +1 -1
  195. package/src/server/routers/async/caller.ts +2 -1
  196. package/src/server/routers/async/image.ts +2 -2
  197. package/src/server/routers/lambda/aiModel.ts +1 -1
  198. package/src/server/services/chunk/index.ts +2 -1
  199. package/src/server/services/generation/index.ts +2 -2
  200. package/src/services/_auth.ts +2 -1
  201. package/src/services/aiModel/server.test.ts +1 -1
  202. package/src/services/aiModel/type.ts +1 -1
  203. package/src/services/chat.ts +1 -1
  204. package/src/services/upload.ts +3 -3
  205. package/src/store/agent/slices/chat/action.ts +1 -1
  206. package/src/store/aiInfra/slices/aiModel/action.ts +6 -6
  207. package/src/store/aiInfra/slices/aiModel/initialState.ts +1 -1
  208. package/src/store/aiInfra/slices/aiModel/selectors.test.ts +1 -1
  209. package/src/store/aiInfra/slices/aiModel/selectors.ts +2 -1
  210. package/src/store/aiInfra/slices/aiProvider/__tests__/action.test.ts +7 -7
  211. package/src/store/aiInfra/slices/aiProvider/action.ts +8 -8
  212. package/src/store/aiInfra/slices/aiProvider/initialState.ts +2 -1
  213. package/src/store/electron/actions/app.ts +1 -1
  214. package/src/store/image/slices/generationConfig/action.test.ts +1 -1
  215. package/src/store/image/slices/generationConfig/action.ts +1 -1
  216. package/src/store/image/slices/generationConfig/hooks.test.ts +1 -1
  217. package/src/store/image/slices/generationConfig/hooks.ts +6 -3
  218. package/src/store/image/slices/generationConfig/selectors.test.ts +1 -1
  219. package/src/store/user/slices/auth/action.ts +1 -1
  220. package/src/store/user/slices/auth/selectors.ts +3 -4
  221. package/src/store/user/slices/modelList/action.ts +8 -7
  222. package/src/store/user/slices/modelList/selectors/modelProvider.ts +8 -5
  223. package/src/store/user/slices/preference/selectors.ts +3 -2
  224. package/src/store/user/slices/settings/selectors/settings.ts +1 -2
  225. package/src/store/user/slices/sync/selectors.ts +1 -1
  226. package/packages/const/src/image.ts +0 -51
  227. /package/packages/{utils/src → model-runtime/src/utils}/getFallbackModelProperty.test.ts +0 -0
@@ -1,4 +1,4 @@
1
- import { AIChatModelCard } from '@/types/aiModel';
1
+ import { AIChatModelCard } from '../types/aiModel';
2
2
 
3
3
  const xinferenceChatModels: AIChatModelCard[] = [
4
4
  {
@@ -1,4 +1,4 @@
1
- import { AIChatModelCard } from '@/types/aiModel';
1
+ import { AIChatModelCard } from '../types/aiModel';
2
2
 
3
3
  const zerooneChatModels: AIChatModelCard[] = [
4
4
  {
@@ -1,4 +1,4 @@
1
- import { AIChatModelCard, AIImageModelCard } from '@/types/aiModel';
1
+ import { AIChatModelCard, AIImageModelCard } from '../types/aiModel';
2
2
 
3
3
  const zhipuChatModels: AIChatModelCard[] = [
4
4
  {
@@ -1,2 +1,3 @@
1
1
  export * from './aiModels';
2
2
  export * from './standard-parameters';
3
+ export * from './types';
@@ -3,6 +3,54 @@ import { z } from 'zod';
3
3
 
4
4
  export const MAX_SEED = 2 ** 31 - 1;
5
5
 
6
+ /**
7
+ * 默认宽高比,当模型不支持原生宽高比时使用
8
+ */
9
+ export const DEFAULT_ASPECT_RATIO = '1:1';
10
+
11
+ export const PRESET_ASPECT_RATIOS = [
12
+ DEFAULT_ASPECT_RATIO, // '1:1' - 正方形,最常用
13
+ '16:9', // 现代显示器/电视/视频标准
14
+ '9:16', // 手机竖屏/短视频
15
+ '4:3', // 传统显示器/照片
16
+ '3:4', // 传统竖屏照片
17
+ '3:2', // 经典照片比例横屏
18
+ '2:3', // 经典照片比例竖屏
19
+ ];
20
+
21
+ /**
22
+ * Image generation and processing configuration constants
23
+ */
24
+ export const IMAGE_GENERATION_CONFIG = {
25
+ /**
26
+ * Maximum cover image size in pixels (longest edge)
27
+ * Used for generating cover images from source images
28
+ */
29
+ COVER_MAX_SIZE: 256,
30
+
31
+ /**
32
+ * Maximum thumbnail size in pixels (longest edge)
33
+ * Used for generating thumbnail images from original images
34
+ */
35
+ THUMBNAIL_MAX_SIZE: 512,
36
+ } as const;
37
+
38
+ /**
39
+ * Default dimension constraints for image upload auto-setting
40
+ * Used when model schema doesn't provide min/max values
41
+ */
42
+ export const DEFAULT_DIMENSION_CONSTRAINTS = {
43
+ MAX_SIZE: 1024,
44
+ MIN_SIZE: 512,
45
+ } as const;
46
+
47
+ export const CHAT_MODEL_IMAGE_GENERATION_PARAMS: ModelParamsSchema = {
48
+ imageUrl: {
49
+ default: null,
50
+ },
51
+ prompt: { default: '' },
52
+ };
53
+
6
54
  // 定义顶层的元规范 - 平铺结构
7
55
  export const ModelParamsMetaSchema = z.object({
8
56
  aspectRatio: z
@@ -1,6 +1,7 @@
1
- import { ModelParamsSchema } from 'model-bank';
2
1
  import { z } from 'zod';
3
2
 
3
+ import { ModelParamsSchema } from '../standard-parameters';
4
+
4
5
  export type ModelPriceCurrency = 'CNY' | 'USD';
5
6
 
6
7
  export const AiModelSourceEnum = {
@@ -8,6 +9,7 @@ export const AiModelSourceEnum = {
8
9
  Custom: 'custom',
9
10
  Remote: 'remote',
10
11
  } as const;
12
+
11
13
  export type AiModelSourceType = (typeof AiModelSourceEnum)[keyof typeof AiModelSourceEnum];
12
14
 
13
15
  export type AiModelType =
@@ -41,6 +43,10 @@ export interface ModelAbilities {
41
43
  * whether model supports search web
42
44
  */
43
45
  search?: boolean;
46
+ /**
47
+ * whether model supports video
48
+ */
49
+ video?: boolean;
44
50
  /**
45
51
  * whether model supports vision
46
52
  */
@@ -71,6 +77,11 @@ export interface LLMParams {
71
77
  * @default 0
72
78
  */
73
79
  presence_penalty?: number;
80
+ /**
81
+ * 生成文本的随机度量,用于控制文本的创造性和多样性
82
+ * @default 1
83
+ */
84
+ reasoning_effort?: string;
74
85
  /**
75
86
  * 生成文本的随机度量,用于控制文本的创造性和多样性
76
87
  * @default 1
@@ -0,0 +1 @@
1
+ export * from './aiModel';
@@ -2,7 +2,10 @@
2
2
  "name": "@lobechat/model-runtime",
3
3
  "version": "1.0.0",
4
4
  "private": true,
5
- "main": "./src/index.ts",
5
+ "exports": {
6
+ ".": "./src/index.ts",
7
+ "./vertexai": "./src/vertexai/index.ts"
8
+ },
6
9
  "scripts": {
7
10
  "test": "vitest",
8
11
  "test:coverage": "vitest --coverage"
@@ -1,6 +1,6 @@
1
1
  import OpenAI from 'openai';
2
2
 
3
- import { ChatModelCard } from '@/types/llm';
3
+ import { AIBaseModelCard } from 'model-bank';
4
4
 
5
5
  import {
6
6
  ChatMethodOptions,
@@ -45,7 +45,7 @@ export abstract class LobeOpenAICompatibleRuntime {
45
45
  abstract chat(payload: ChatStreamPayload, options?: ChatMethodOptions): Promise<Response>;
46
46
  abstract createImage(payload: CreateImagePayload): Promise<CreateImageResponse>;
47
47
 
48
- abstract models(): Promise<ChatModelCard[]>;
48
+ abstract models(): Promise<AIBaseModelCard[]>;
49
49
 
50
50
  abstract embeddings(
51
51
  payload: EmbeddingsPayload,
@@ -1,17 +1,17 @@
1
1
  // @vitest-environment node
2
2
  import { TraceNameMap } from '@lobechat/types';
3
+ import { ClientSecretPayload } from '@lobechat/types';
3
4
  import { Langfuse } from 'langfuse';
4
5
  import { LangfuseGenerationClient, LangfuseTraceClient } from 'langfuse-core';
5
6
  import { beforeEach, describe, expect, it, vi } from 'vitest';
6
7
 
7
8
  import * as langfuseCfg from '@/config/langfuse';
8
- import { ClientSecretPayload } from '@/const/auth';
9
- import { ChatStreamPayload, LobeOpenAI, ModelProvider, ModelRuntime } from '@/libs/model-runtime';
10
- import { providerRuntimeMap } from '@/libs/model-runtime/runtimeMap';
11
- import { CreateImagePayload } from '@/libs/model-runtime/types/image';
12
9
  import { createTraceOptions } from '@/server/modules/ModelRuntime';
13
10
 
11
+ import { ChatStreamPayload, LobeOpenAI, ModelProvider, ModelRuntime } from '.';
14
12
  import { AgentChatOptions } from './ModelRuntime';
13
+ import { providerRuntimeMap } from './runtimeMap';
14
+ import { CreateImagePayload } from './types/image';
15
15
 
16
16
  const specialProviders = [
17
17
  { id: 'openai', payload: { apiKey: 'user-openai-key', baseURL: 'user-endpoint' } },
@@ -4,17 +4,11 @@
4
4
  import OpenAI, { ClientOptions } from 'openai';
5
5
  import { Stream } from 'openai/streaming';
6
6
 
7
- import { ILobeAgentRuntimeErrorType } from '@/libs/model-runtime';
8
- import { CreateImagePayload, CreateImageResponse } from '@/libs/model-runtime/types/image';
9
- import {
10
- CreateImageOptions,
11
- CustomClientOptions,
12
- } from '@/libs/model-runtime/utils/openaiCompatibleFactory';
13
- import { postProcessModelList } from '@/libs/model-runtime/utils/postProcessModelList';
14
7
  import type { ChatModelCard } from '@/types/llm';
15
8
 
16
9
  import { LobeRuntimeAI } from '../BaseAI';
17
10
  import { LobeOpenAI } from '../openai';
11
+ import { CreateImagePayload, CreateImageResponse, ILobeAgentRuntimeErrorType } from '../types';
18
12
  import {
19
13
  type ChatCompletionErrorPayload,
20
14
  ChatMethodOptions,
@@ -25,6 +19,8 @@ import {
25
19
  TextToImagePayload,
26
20
  TextToSpeechPayload,
27
21
  } from '../types';
22
+ import { CreateImageOptions, CustomClientOptions } from '../utils/openaiCompatibleFactory';
23
+ import { postProcessModelList } from '../utils/postProcessModelList';
28
24
  import { baseRuntimeMap } from './baseRuntimeMap';
29
25
 
30
26
  export interface RuntimeItem {
@@ -1,5 +1,5 @@
1
- import { AgentRuntimeErrorType } from '../error';
2
1
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
2
+ import { AgentRuntimeErrorType } from '../types/error';
3
3
  import { processMultiProviderModelList } from '../utils/modelParse';
4
4
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
@@ -1,9 +1,8 @@
1
1
  import { LOBE_DEFAULT_MODEL_LIST } from 'model-bank';
2
2
  import urlJoin from 'url-join';
3
3
 
4
- import { responsesAPIModels } from '@/const/models';
5
-
6
4
  import { createRouterRuntime } from '../RouterRuntime';
5
+ import { responsesAPIModels } from '../const/models';
7
6
  import { ModelProvider } from '../types';
8
7
  import { ChatStreamPayload } from '../types/chat';
9
8
  import { detectModelProvider, processMultiProviderModelList } from '../utils/modelParse';
@@ -1,13 +1,13 @@
1
1
  import Anthropic, { ClientOptions } from '@anthropic-ai/sdk';
2
2
 
3
3
  import { LobeRuntimeAI } from '../BaseAI';
4
- import { AgentRuntimeErrorType } from '../error';
5
4
  import {
6
5
  type ChatCompletionErrorPayload,
7
6
  ChatMethodOptions,
8
7
  ChatStreamPayload,
9
8
  ModelProvider,
10
9
  } from '../types';
10
+ import { AgentRuntimeErrorType } from '../types/error';
11
11
  import { buildAnthropicMessages, buildAnthropicTools } from '../utils/anthropicHelpers';
12
12
  import { AgentRuntimeError } from '../utils/createError';
13
13
  import { debugStream } from '../utils/debugStream';
@@ -2,10 +2,8 @@ import debug from 'debug';
2
2
  import OpenAI, { AzureOpenAI } from 'openai';
3
3
  import type { Stream } from 'openai/streaming';
4
4
 
5
- import { systemToUserModels } from '@/const/models';
6
-
7
5
  import { LobeRuntimeAI } from '../BaseAI';
8
- import { AgentRuntimeErrorType } from '../error';
6
+ import { systemToUserModels } from '../const/models';
9
7
  import {
10
8
  ChatMethodOptions,
11
9
  ChatStreamPayload,
@@ -14,6 +12,7 @@ import {
14
12
  EmbeddingsPayload,
15
13
  ModelProvider,
16
14
  } from '../types';
15
+ import { AgentRuntimeErrorType } from '../types/error';
17
16
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
18
17
  import { AgentRuntimeError } from '../utils/createError';
19
18
  import { debugStream } from '../utils/debugStream';
@@ -2,11 +2,10 @@ import createClient, { ModelClient } from '@azure-rest/ai-inference';
2
2
  import { AzureKeyCredential } from '@azure/core-auth';
3
3
  import OpenAI from 'openai';
4
4
 
5
- import { systemToUserModels } from '@/const/models';
6
-
7
5
  import { LobeRuntimeAI } from '../BaseAI';
8
- import { AgentRuntimeErrorType } from '../error';
6
+ import { systemToUserModels } from '../const/models';
9
7
  import { ChatMethodOptions, ChatStreamPayload, ModelProvider } from '../types';
8
+ import { AgentRuntimeErrorType } from '../types/error';
10
9
  import { AgentRuntimeError } from '../utils/createError';
11
10
  import { debugStream } from '../utils/debugStream';
12
11
  import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
@@ -5,7 +5,6 @@ import {
5
5
  } from '@aws-sdk/client-bedrock-runtime';
6
6
 
7
7
  import { LobeRuntimeAI } from '../BaseAI';
8
- import { AgentRuntimeErrorType } from '../error';
9
8
  import {
10
9
  ChatMethodOptions,
11
10
  ChatStreamPayload,
@@ -14,6 +13,7 @@ import {
14
13
  EmbeddingsPayload,
15
14
  ModelProvider,
16
15
  } from '../types';
16
+ import { AgentRuntimeErrorType } from '../types/error';
17
17
  import { buildAnthropicMessages, buildAnthropicTools } from '../utils/anthropicHelpers';
18
18
  import { AgentRuntimeError } from '../utils/createError';
19
19
  import { debugStream } from '../utils/debugStream';
@@ -7,7 +7,7 @@ import { createBflImage } from './createImage';
7
7
  import { BflStatusResponse } from './types';
8
8
 
9
9
  // Mock external dependencies
10
- vi.mock('@lobechat/utils', () => ({
10
+ vi.mock('../utils/imageToBase64', () => ({
11
11
  imageUrlToBase64: vi.fn(),
12
12
  }));
13
13
 
@@ -188,7 +188,7 @@ describe('createBflImage', () => {
188
188
  it('should convert single imageUrl to image_prompt base64', async () => {
189
189
  // Arrange
190
190
  const { parseDataUri } = await import('../utils/uriParser');
191
- const { imageUrlToBase64 } = await import('@lobechat/utils');
191
+ const { imageUrlToBase64 } = await import('../utils/imageToBase64');
192
192
  const { asyncifyPolling } = await import('../utils/asyncifyPolling');
193
193
 
194
194
  const mockParseDataUri = vi.mocked(parseDataUri);
@@ -291,7 +291,7 @@ describe('createBflImage', () => {
291
291
  it('should convert multiple imageUrls for Kontext models', async () => {
292
292
  // Arrange
293
293
  const { parseDataUri } = await import('../utils/uriParser');
294
- const { imageUrlToBase64 } = await import('@lobechat/utils');
294
+ const { imageUrlToBase64 } = await import('../utils/imageToBase64');
295
295
  const { asyncifyPolling } = await import('../utils/asyncifyPolling');
296
296
 
297
297
  const mockParseDataUri = vi.mocked(parseDataUri);
@@ -351,7 +351,7 @@ describe('createBflImage', () => {
351
351
  it('should limit imageUrls to maximum 4 images', async () => {
352
352
  // Arrange
353
353
  const { parseDataUri } = await import('../utils/uriParser');
354
- const { imageUrlToBase64 } = await import('@lobechat/utils');
354
+ const { imageUrlToBase64 } = await import('../utils/imageToBase64');
355
355
  const { asyncifyPolling } = await import('../utils/asyncifyPolling');
356
356
 
357
357
  const mockParseDataUri = vi.mocked(parseDataUri);
@@ -1,11 +1,11 @@
1
- import { imageUrlToBase64 } from '@lobechat/utils';
2
1
  import createDebug from 'debug';
3
2
  import { RuntimeImageGenParamsValue } from 'model-bank';
4
3
 
5
- import { AgentRuntimeErrorType } from '../error';
4
+ import { AgentRuntimeErrorType } from '../types/error';
6
5
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
7
6
  import { type TaskResult, asyncifyPolling } from '../utils/asyncifyPolling';
8
7
  import { AgentRuntimeError } from '../utils/createError';
8
+ import { imageUrlToBase64 } from '../utils/imageToBase64';
9
9
  import { parseDataUri } from '../utils/uriParser';
10
10
  import {
11
11
  BFL_ENDPOINTS,
@@ -2,7 +2,7 @@ import createDebug from 'debug';
2
2
  import { ClientOptions } from 'openai';
3
3
 
4
4
  import { LobeRuntimeAI } from '../BaseAI';
5
- import { AgentRuntimeErrorType } from '../error';
5
+ import { AgentRuntimeErrorType } from '../types/error';
6
6
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
7
7
  import { AgentRuntimeError } from '../utils/createError';
8
8
  import { createBflImage } from './createImage';
@@ -1,8 +1,8 @@
1
1
  import { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { LobeRuntimeAI } from '../BaseAI';
4
- import { AgentRuntimeErrorType } from '../error';
5
4
  import { ChatMethodOptions, ChatStreamPayload, ModelProvider } from '../types';
5
+ import { AgentRuntimeErrorType } from '../types/error';
6
6
  import {
7
7
  CloudflareStreamTransformer,
8
8
  DEFAULT_BASE_URL_PREFIX,
@@ -0,0 +1,64 @@
1
+ export const systemToUserModels = new Set([
2
+ 'o1-preview',
3
+ 'o1-preview-2024-09-12',
4
+ 'o1-mini',
5
+ 'o1-mini-2024-09-12',
6
+ ]);
7
+
8
+ // TODO: 临时写法,后续要重构成 model card 展示配置
9
+ export const disableStreamModels = new Set([
10
+ 'o1',
11
+ 'o1-2024-12-17',
12
+ 'o1-pro',
13
+ 'o1-pro-2025-03-19',
14
+ /*
15
+ 官网显示不支持,但是实际试下来支持 Streaming,暂时注释掉
16
+ 'o3-pro',
17
+ 'o3-pro-2025-06-10',
18
+ */
19
+ 'computer-use-preview',
20
+ 'computer-use-preview-2025-03-11',
21
+ ]);
22
+
23
+ /**
24
+ * models use Responses API only
25
+ */
26
+ export const responsesAPIModels = new Set([
27
+ 'o1-pro',
28
+ 'o1-pro-2025-03-19',
29
+ 'o3-deep-research',
30
+ 'o3-deep-research-2025-06-26',
31
+ 'o3-pro',
32
+ 'o3-pro-2025-06-10',
33
+ 'o4-mini-deep-research',
34
+ 'o4-mini-deep-research-2025-06-26',
35
+ 'codex-mini-latest',
36
+ 'computer-use-preview',
37
+ 'computer-use-preview-2025-03-11',
38
+ ]);
39
+
40
+ /**
41
+ * models support context caching
42
+ */
43
+ export const contextCachingModels = new Set([
44
+ 'claude-opus-4-latest',
45
+ 'claude-opus-4-20250514',
46
+ 'claude-sonnet-4-latest',
47
+ 'claude-sonnet-4-20250514',
48
+ 'claude-3-7-sonnet-latest',
49
+ 'claude-3-7-sonnet-20250219',
50
+ 'claude-3-5-sonnet-latest',
51
+ 'claude-3-5-sonnet-20241022',
52
+ 'claude-3-5-sonnet-20240620',
53
+ 'claude-3-5-haiku-latest',
54
+ 'claude-3-5-haiku-20241022',
55
+ ]);
56
+
57
+ export const thinkingWithToolClaudeModels = new Set([
58
+ 'claude-opus-4-latest',
59
+ 'claude-opus-4-20250514',
60
+ 'claude-sonnet-4-latest',
61
+ 'claude-sonnet-4-20250514',
62
+ 'claude-3-7-sonnet-latest',
63
+ 'claude-3-7-sonnet-20250219',
64
+ ]);
@@ -1,9 +1,8 @@
1
1
  // @vitest-environment node
2
2
  import { fal } from '@fal-ai/client';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
-
5
- import { CreateImagePayload } from '@/libs/model-runtime/types/image';
3
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
6
4
 
5
+ import { CreateImagePayload } from '../types';
7
6
  import { LobeFalAI } from './index';
8
7
 
9
8
  // Mock the fal client
@@ -5,7 +5,7 @@ import { RuntimeImageGenParamsValue } from 'model-bank';
5
5
  import { ClientOptions } from 'openai';
6
6
 
7
7
  import { LobeRuntimeAI } from '../BaseAI';
8
- import { AgentRuntimeErrorType } from '../error';
8
+ import { AgentRuntimeErrorType } from '../types/error';
9
9
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
10
10
  import { AgentRuntimeError } from '../utils/createError';
11
11
 
@@ -1,5 +1,5 @@
1
- import { AgentRuntimeErrorType } from '../error';
2
1
  import { ModelProvider } from '../types';
2
+ import { AgentRuntimeErrorType } from '../types/error';
3
3
  import { processMultiProviderModelList } from '../utils/modelParse';
4
4
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { pruneReasoningPayload } from '../utils/openaiHelpers';
@@ -1,10 +1,10 @@
1
1
  // @vitest-environment edge-runtime
2
2
  import { GoogleGenAI } from '@google/genai';
3
- import * as imageToBase64Module from '@lobechat/utils';
4
3
  import { beforeEach, describe, expect, it, vi } from 'vitest';
5
4
 
6
5
  import { CreateImagePayload } from '@/libs/model-runtime/types/image';
7
6
 
7
+ import * as imageToBase64Module from '../utils/imageToBase64';
8
8
  import { createGoogleImage } from './createImage';
9
9
 
10
10
  const provider = 'google';
@@ -1,9 +1,9 @@
1
1
  import { Content, GoogleGenAI, Part } from '@google/genai';
2
- import { imageUrlToBase64 } from '@lobechat/utils';
3
2
 
4
3
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
5
4
  import { AgentRuntimeError } from '../utils/createError';
6
5
  import { parseGoogleErrorMessage } from '../utils/googleErrorParser';
6
+ import { imageUrlToBase64 } from '../utils/imageToBase64';
7
7
  import { parseDataUri } from '../utils/uriParser';
8
8
 
9
9
  /**
@@ -1,6 +1,5 @@
1
1
  // @vitest-environment edge-runtime
2
2
  import { GenerateContentResponse, Tool } from '@google/genai';
3
- import * as imageToBase64Module from '@lobechat/utils';
4
3
  import OpenAI from 'openai';
5
4
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
6
5
 
@@ -8,6 +7,7 @@ import { OpenAIChatMessage } from '@/libs/model-runtime';
8
7
  import { ChatStreamPayload } from '@/types/openai/chat';
9
8
 
10
9
  import * as debugStreamModule from '../utils/debugStream';
10
+ import * as imageToBase64Module from '../utils/imageToBase64';
11
11
  import { LobeGoogleAI } from './index';
12
12
 
13
13
  const provider = 'google';
@@ -9,10 +9,8 @@ import {
9
9
  Type as SchemaType,
10
10
  ThinkingConfig,
11
11
  } from '@google/genai';
12
- import { imageUrlToBase64, safeParseJSON } from '@lobechat/utils';
13
12
 
14
13
  import { LobeRuntimeAI } from '../BaseAI';
15
- import { AgentRuntimeErrorType } from '../error';
16
14
  import {
17
15
  ChatCompletionTool,
18
16
  ChatMethodOptions,
@@ -20,11 +18,14 @@ import {
20
18
  OpenAIChatMessage,
21
19
  UserMessageContentPart,
22
20
  } from '../types';
21
+ import { AgentRuntimeErrorType } from '../types/error';
23
22
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
24
23
  import { AgentRuntimeError } from '../utils/createError';
25
24
  import { debugStream } from '../utils/debugStream';
26
25
  import { parseGoogleErrorMessage } from '../utils/googleErrorParser';
26
+ import { imageUrlToBase64 } from '../utils/imageToBase64';
27
27
  import { StreamingResponse } from '../utils/response';
28
+ import { safeParseJSON } from '../utils/safeParseJSON';
28
29
  import { GoogleGenerativeAIStream, VertexAIStream } from '../utils/streams';
29
30
  import { parseDataUri } from '../utils/uriParser';
30
31
  import { createGoogleImage } from './createImage';
@@ -134,7 +135,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
134
135
  const thinkingConfig: ThinkingConfig = {
135
136
  includeThoughts:
136
137
  !!thinkingBudget ||
137
- (!thinkingBudget && model && (model.includes('-2.5-') || model.includes('thinking')))
138
+ (!thinkingBudget && model && (model.includes('-2.5-') || model.includes('thinking')))
138
139
  ? true
139
140
  : undefined,
140
141
  // https://ai.google.dev/gemini-api/docs/thinking#set-budget
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
- import { AgentRuntimeErrorType } from '../error';
4
3
  import { ModelProvider } from '../types';
4
+ import { AgentRuntimeErrorType } from '../types/error';
5
5
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
6
 
7
7
  export interface GroqModelCard {
@@ -1,7 +1,6 @@
1
+ import { MessageToolCall, MessageToolCallChunk, MessageToolCallSchema } from '../types';
1
2
  import { produce } from 'immer';
2
3
 
3
- import { MessageToolCall, MessageToolCallChunk, MessageToolCallSchema } from '@/types/message';
4
-
5
4
  export const parseToolCalls = (origin: MessageToolCall[], value: MessageToolCallChunk[]) =>
6
5
  produce(origin, (draft) => {
7
6
  // if there is no origin, we should parse all the value and set it to draft
@@ -3,8 +3,8 @@ import urlJoin from 'url-join';
3
3
 
4
4
  import type { ChatModelCard } from '@/types/llm';
5
5
 
6
- import { AgentRuntimeErrorType } from '../error';
7
6
  import { ModelProvider } from '../types';
7
+ import { AgentRuntimeErrorType } from '../types/error';
8
8
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
9
9
  import { convertIterableToStream } from '../utils/streams';
10
10
 
@@ -6,7 +6,6 @@ export * from './BaseAI';
6
6
  export { LobeBedrockAI } from './bedrock';
7
7
  export { LobeBflAI } from './bfl';
8
8
  export { LobeDeepSeekAI } from './deepseek';
9
- export * from './error';
10
9
  export { LobeGoogleAI } from './google';
11
10
  export { LobeGroq } from './groq';
12
11
  export * from './helpers';
@@ -21,9 +20,12 @@ export { LobePerplexityAI } from './perplexity';
21
20
  export { LobeQwenAI } from './qwen';
22
21
  export { LobeTogetherAI } from './togetherai';
23
22
  export * from './types';
23
+ export * from './types/error';
24
24
  export { AgentRuntimeError } from './utils/createError';
25
+ export { getModelPropertyWithFallback } from './utils/getFallbackModelProperty';
25
26
  export { createOpenAICompatibleRuntime } from './utils/openaiCompatibleFactory';
26
27
  export { pruneReasoningPayload } from './utils/openaiHelpers';
28
+ export { parseDataUri } from './utils/uriParser';
27
29
  export { LobeVolcengineAI } from './volcengine';
28
30
  export { LobeZeroOneAI } from './zeroone';
29
31
  export { LobeZhipuAI } from './zhipu';
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
- import { AgentRuntimeErrorType } from '../error';
4
3
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
4
+ import { AgentRuntimeErrorType } from '../types/error';
5
5
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
6
 
7
7
  export interface InfiniAIModelCard {
@@ -1,8 +1,8 @@
1
1
  import { Ollama } from 'ollama/browser';
2
2
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
- import { AgentRuntimeErrorType } from '../error';
5
4
  import { ModelProvider } from '../types';
5
+ import { AgentRuntimeErrorType } from '../types/error';
6
6
  import { AgentRuntimeError } from '../utils/createError';
7
7
  import { LobeOllamaAI } from './index';
8
8
 
@@ -2,10 +2,7 @@ import { ChatModelCard } from '@lobechat/types';
2
2
  import { Ollama, Tool } from 'ollama/browser';
3
3
  import { ClientOptions } from 'openai';
4
4
 
5
- import { createErrorResponse } from '@/utils/errorResponse';
6
-
7
5
  import { LobeRuntimeAI } from '../BaseAI';
8
- import { AgentRuntimeErrorType } from '../error';
9
6
  import {
10
7
  ChatMethodOptions,
11
8
  ChatStreamPayload,
@@ -16,8 +13,10 @@ import {
16
13
  OpenAIChatMessage,
17
14
  PullModelParams,
18
15
  } from '../types';
16
+ import { AgentRuntimeErrorType } from '../types/error';
19
17
  import { AgentRuntimeError } from '../utils/createError';
20
18
  import { debugStream } from '../utils/debugStream';
19
+ import { createErrorResponse } from '../utils/errorResponse';
21
20
  import { StreamingResponse } from '../utils/response';
22
21
  import { OllamaStream, convertIterableToStream, createModelPullStream } from '../utils/streams';
23
22
  import { parseDataUri } from '../utils/uriParser';