@lobehub/chat 1.119.1 → 1.120.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (271) hide show
  1. package/.vscode/settings.json +2 -3
  2. package/CHANGELOG.md +58 -0
  3. package/changelog/v1.json +17 -0
  4. package/package.json +5 -6
  5. package/packages/const/src/auth.ts +0 -36
  6. package/packages/const/src/index.ts +3 -1
  7. package/packages/database/src/models/__tests__/aiModel.test.ts +1 -2
  8. package/packages/database/src/models/__tests__/generationBatch.test.ts +47 -1
  9. package/packages/database/src/models/aiModel.ts +2 -3
  10. package/packages/database/src/models/generationBatch.ts +8 -1
  11. package/packages/database/src/repositories/aiInfra/index.test.ts +1 -1
  12. package/packages/database/src/repositories/aiInfra/index.ts +4 -4
  13. package/packages/model-bank/src/aiModels/ai21.ts +1 -1
  14. package/packages/model-bank/src/aiModels/ai302.ts +1 -1
  15. package/packages/model-bank/src/aiModels/ai360.ts +1 -1
  16. package/packages/model-bank/src/aiModels/aihubmix.ts +3 -3
  17. package/packages/model-bank/src/aiModels/akashchat.ts +1 -1
  18. package/packages/model-bank/src/aiModels/anthropic.ts +1 -1
  19. package/packages/model-bank/src/aiModels/azure.ts +1 -1
  20. package/packages/model-bank/src/aiModels/azureai.ts +1 -1
  21. package/packages/model-bank/src/aiModels/baichuan.ts +1 -1
  22. package/packages/model-bank/src/aiModels/bedrock.ts +1 -1
  23. package/packages/model-bank/src/aiModels/bfl.ts +2 -3
  24. package/packages/model-bank/src/aiModels/cloudflare.ts +1 -1
  25. package/packages/model-bank/src/aiModels/cohere.ts +1 -1
  26. package/packages/model-bank/src/aiModels/deepseek.ts +1 -1
  27. package/packages/model-bank/src/aiModels/fal.ts +1 -1
  28. package/packages/model-bank/src/aiModels/fireworksai.ts +1 -1
  29. package/packages/model-bank/src/aiModels/giteeai.ts +1 -1
  30. package/packages/model-bank/src/aiModels/github.ts +1 -1
  31. package/packages/model-bank/src/aiModels/google.ts +6 -7
  32. package/packages/model-bank/src/aiModels/groq.ts +1 -1
  33. package/packages/model-bank/src/aiModels/higress.ts +1 -1
  34. package/packages/model-bank/src/aiModels/huggingface.ts +1 -1
  35. package/packages/model-bank/src/aiModels/hunyuan.ts +1 -1
  36. package/packages/model-bank/src/aiModels/index.ts +1 -1
  37. package/packages/model-bank/src/aiModels/infiniai.ts +1 -1
  38. package/packages/model-bank/src/aiModels/internlm.ts +1 -1
  39. package/packages/model-bank/src/aiModels/jina.ts +1 -1
  40. package/packages/model-bank/src/aiModels/lmstudio.ts +1 -1
  41. package/packages/model-bank/src/aiModels/lobehub.ts +1 -1
  42. package/packages/model-bank/src/aiModels/minimax.ts +1 -1
  43. package/packages/model-bank/src/aiModels/mistral.ts +1 -1
  44. package/packages/model-bank/src/aiModels/modelscope.ts +1 -1
  45. package/packages/model-bank/src/aiModels/moonshot.ts +1 -1
  46. package/packages/model-bank/src/aiModels/novita.ts +1 -1
  47. package/packages/model-bank/src/aiModels/nvidia.ts +1 -1
  48. package/packages/model-bank/src/aiModels/ollama.ts +1 -1
  49. package/packages/model-bank/src/aiModels/openai.ts +1 -1
  50. package/packages/model-bank/src/aiModels/openrouter.ts +3 -3
  51. package/packages/model-bank/src/aiModels/perplexity.ts +1 -1
  52. package/packages/model-bank/src/aiModels/ppio.ts +1 -1
  53. package/packages/model-bank/src/aiModels/qiniu.ts +1 -1
  54. package/packages/model-bank/src/aiModels/qwen.ts +4 -2
  55. package/packages/model-bank/src/aiModels/sambanova.ts +1 -1
  56. package/packages/model-bank/src/aiModels/search1api.ts +1 -1
  57. package/packages/model-bank/src/aiModels/sensenova.ts +1 -1
  58. package/packages/model-bank/src/aiModels/siliconcloud.ts +7 -1
  59. package/packages/model-bank/src/aiModels/spark.ts +1 -1
  60. package/packages/model-bank/src/aiModels/stepfun.ts +1 -1
  61. package/packages/model-bank/src/aiModels/taichu.ts +1 -1
  62. package/packages/model-bank/src/aiModels/tencentcloud.ts +1 -1
  63. package/packages/model-bank/src/aiModels/togetherai.ts +1 -1
  64. package/packages/model-bank/src/aiModels/upstage.ts +1 -1
  65. package/packages/model-bank/src/aiModels/v0.ts +1 -1
  66. package/packages/model-bank/src/aiModels/vertexai.ts +3 -3
  67. package/packages/model-bank/src/aiModels/vllm.ts +1 -1
  68. package/packages/model-bank/src/aiModels/volcengine.ts +1 -1
  69. package/packages/model-bank/src/aiModels/wenxin.ts +1 -1
  70. package/packages/model-bank/src/aiModels/xai.ts +1 -1
  71. package/packages/model-bank/src/aiModels/xinference.ts +1 -1
  72. package/packages/model-bank/src/aiModels/zeroone.ts +1 -1
  73. package/packages/model-bank/src/aiModels/zhipu.ts +1 -1
  74. package/packages/model-bank/src/index.ts +1 -0
  75. package/packages/model-bank/src/standard-parameters/index.ts +48 -0
  76. package/packages/{types/src → model-bank/src/types}/aiModel.ts +12 -1
  77. package/packages/model-bank/src/types/index.ts +1 -0
  78. package/packages/model-runtime/package.json +4 -1
  79. package/packages/model-runtime/src/BaseAI.ts +2 -2
  80. package/packages/model-runtime/src/ModelRuntime.test.ts +4 -4
  81. package/packages/model-runtime/src/RouterRuntime/createRuntime.ts +3 -7
  82. package/packages/model-runtime/src/ai302/index.ts +1 -1
  83. package/packages/model-runtime/src/aihubmix/index.ts +1 -2
  84. package/packages/model-runtime/src/anthropic/index.ts +1 -1
  85. package/packages/model-runtime/src/azureOpenai/index.ts +2 -3
  86. package/packages/model-runtime/src/azureai/index.ts +2 -3
  87. package/packages/model-runtime/src/bedrock/index.ts +1 -1
  88. package/packages/model-runtime/src/bfl/createImage.test.ts +4 -4
  89. package/packages/model-runtime/src/bfl/createImage.ts +2 -2
  90. package/packages/model-runtime/src/bfl/index.ts +1 -1
  91. package/packages/model-runtime/src/cloudflare/index.ts +1 -1
  92. package/packages/model-runtime/src/const/models.ts +64 -0
  93. package/packages/model-runtime/src/fal/index.test.ts +2 -3
  94. package/packages/model-runtime/src/fal/index.ts +1 -1
  95. package/packages/model-runtime/src/github/index.ts +1 -1
  96. package/packages/model-runtime/src/google/createImage.test.ts +1 -1
  97. package/packages/model-runtime/src/google/createImage.ts +53 -25
  98. package/packages/model-runtime/src/google/index.test.ts +1 -1
  99. package/packages/model-runtime/src/google/index.ts +4 -3
  100. package/packages/model-runtime/src/groq/index.ts +1 -1
  101. package/packages/model-runtime/src/helpers/parseToolCalls.ts +1 -2
  102. package/packages/model-runtime/src/huggingface/index.ts +1 -1
  103. package/packages/model-runtime/src/index.ts +3 -1
  104. package/packages/model-runtime/src/infiniai/index.ts +1 -1
  105. package/packages/model-runtime/src/ollama/index.test.ts +1 -1
  106. package/packages/model-runtime/src/ollama/index.ts +2 -3
  107. package/packages/model-runtime/src/openai/index.ts +16 -8
  108. package/packages/model-runtime/src/providerTestUtils.ts +1 -2
  109. package/packages/model-runtime/src/qiniu/index.test.ts +2 -3
  110. package/packages/model-runtime/src/qwen/index.ts +1 -1
  111. package/packages/model-runtime/src/siliconcloud/index.ts +2 -2
  112. package/packages/model-runtime/src/types/chat.ts +2 -22
  113. package/packages/model-runtime/src/{error.ts → types/error.ts} +29 -0
  114. package/packages/model-runtime/src/types/index.ts +4 -0
  115. package/packages/model-runtime/src/types/toolsCalling.ts +48 -0
  116. package/packages/model-runtime/src/types/type.ts +1 -1
  117. package/packages/model-runtime/src/types/usage.ts +27 -0
  118. package/packages/model-runtime/src/utils/anthropicHelpers.test.ts +2 -2
  119. package/packages/model-runtime/src/utils/anthropicHelpers.ts +1 -1
  120. package/packages/model-runtime/src/utils/createError.ts +1 -1
  121. package/packages/model-runtime/src/utils/errorResponse.test.ts +110 -0
  122. package/packages/model-runtime/src/utils/errorResponse.ts +64 -0
  123. package/packages/{utils/src → model-runtime/src/utils}/getFallbackModelProperty.ts +1 -1
  124. package/packages/model-runtime/src/utils/googleErrorParser.test.ts +1 -1
  125. package/packages/model-runtime/src/utils/googleErrorParser.ts +1 -1
  126. package/packages/model-runtime/src/utils/handleOpenAIError.ts +1 -1
  127. package/packages/model-runtime/src/utils/imageToBase64.test.ts +91 -0
  128. package/packages/model-runtime/src/utils/imageToBase64.ts +62 -0
  129. package/packages/model-runtime/src/utils/modelParse.test.ts +2 -2
  130. package/packages/model-runtime/src/utils/modelParse.ts +16 -10
  131. package/packages/model-runtime/src/utils/openaiCompatibleFactory/createImage.ts +1 -1
  132. package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.ts +3 -3
  133. package/packages/model-runtime/src/utils/openaiHelpers.test.ts +2 -2
  134. package/packages/model-runtime/src/utils/openaiHelpers.ts +3 -4
  135. package/packages/model-runtime/src/utils/postProcessModelList.ts +2 -2
  136. package/packages/model-runtime/src/utils/safeParseJSON.test.ts +71 -0
  137. package/packages/model-runtime/src/utils/safeParseJSON.ts +12 -0
  138. package/packages/model-runtime/src/utils/streams/bedrock/claude.ts +1 -1
  139. package/packages/model-runtime/src/utils/streams/bedrock/llama.test.ts +1 -2
  140. package/packages/model-runtime/src/utils/streams/bedrock/llama.ts +1 -1
  141. package/packages/model-runtime/src/utils/streams/google-ai.test.ts +1 -1
  142. package/packages/model-runtime/src/utils/streams/google-ai.ts +1 -1
  143. package/packages/model-runtime/src/utils/streams/ollama.test.ts +1 -1
  144. package/packages/model-runtime/src/utils/streams/ollama.ts +2 -3
  145. package/packages/model-runtime/src/utils/streams/openai/openai.test.ts +1 -2
  146. package/packages/model-runtime/src/utils/streams/openai/openai.ts +1 -1
  147. package/packages/model-runtime/src/utils/streams/openai/responsesStream.ts +1 -1
  148. package/packages/model-runtime/src/utils/streams/protocol.ts +3 -3
  149. package/packages/model-runtime/src/utils/streams/vertex-ai.test.ts +1 -1
  150. package/packages/model-runtime/src/utils/streams/vertex-ai.ts +2 -2
  151. package/packages/model-runtime/src/utils/uuid.ts +7 -0
  152. package/packages/model-runtime/src/vertexai/index.ts +1 -1
  153. package/packages/types/src/agent/index.ts +2 -1
  154. package/packages/types/src/aiProvider.ts +10 -2
  155. package/packages/types/src/auth.ts +35 -0
  156. package/packages/types/src/discover/models.ts +1 -1
  157. package/packages/types/src/discover/providers.ts +1 -1
  158. package/packages/types/src/index.ts +4 -0
  159. package/packages/types/src/llm.ts +2 -47
  160. package/packages/types/src/session/agentSession.ts +3 -3
  161. package/packages/types/src/session/index.ts +2 -2
  162. package/packages/types/src/session/sessionGroup.ts +0 -2
  163. package/packages/types/src/user/settings/general.ts +1 -1
  164. package/packages/types/src/user/settings/modelProvider.ts +1 -1
  165. package/packages/utils/src/fetch/fetchSSE.ts +1 -1
  166. package/packages/utils/src/format.ts +2 -3
  167. package/packages/utils/src/index.ts +3 -1
  168. package/packages/utils/src/number.test.ts +1 -2
  169. package/packages/utils/src/number.ts +1 -2
  170. package/packages/utils/src/parseModels.test.ts +1 -2
  171. package/packages/utils/src/parseModels.ts +2 -3
  172. package/packages/utils/src/pricing.test.ts +1 -2
  173. package/packages/utils/src/pricing.ts +1 -1
  174. package/packages/utils/src/server/xor.ts +3 -1
  175. package/src/app/(backend)/middleware/auth/index.ts +1 -2
  176. package/src/app/(backend)/webapi/chat/vertexai/route.ts +1 -1
  177. package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +1 -2
  178. package/src/app/[variants]/(main)/(mobile)/me/settings/features/useCategory.tsx +2 -16
  179. package/src/app/[variants]/(main)/chat/@session/_layout/Desktop/SessionHeader.tsx +1 -3
  180. package/src/app/[variants]/(main)/chat/@session/_layout/Mobile/SessionHeader.tsx +1 -3
  181. package/src/app/[variants]/(main)/discover/(list)/model/features/List/ModelTypeIcon.tsx +1 -2
  182. package/src/app/[variants]/(main)/image/@menu/components/SeedNumberInput/index.tsx +1 -1
  183. package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/hooks/useAutoDimensions.ts +4 -3
  184. package/src/app/[variants]/(main)/settings/hooks/useCategory.tsx +3 -21
  185. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +1 -1
  186. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelItem.tsx +1 -1
  187. package/src/app/[variants]/(main)/settings/provider/features/ModelList/SortModelModal/ListItem.tsx +1 -1
  188. package/src/app/[variants]/(main)/settings/provider/features/ModelList/SortModelModal/index.tsx +1 -1
  189. package/src/components/ModelSelect/index.tsx +1 -1
  190. package/src/config/featureFlags/schema.test.ts +1 -2
  191. package/src/config/featureFlags/schema.ts +0 -6
  192. package/src/config/featureFlags/utils/parser.test.ts +7 -7
  193. package/src/database/_deprecated/core/index.ts +0 -1
  194. package/src/database/_deprecated/core/migrations/migrateSettingsToUser/type.ts +2 -5
  195. package/src/database/_deprecated/core/model.ts +4 -38
  196. package/src/database/_deprecated/models/message.ts +1 -1
  197. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +1 -1
  198. package/src/features/Conversation/Extras/Usage/UsageDetail/pricing.ts +3 -4
  199. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +1 -1
  200. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +3 -2
  201. package/src/layout/GlobalProvider/StoreInitialization.tsx +0 -3
  202. package/src/libs/trpc/async/context.ts +2 -1
  203. package/src/libs/trpc/edge/context.ts +2 -6
  204. package/src/libs/trpc/lambda/context.ts +1 -1
  205. package/src/migrations/FromV5ToV6/types/v5.ts +2 -2
  206. package/src/migrations/FromV5ToV6/types/v6.ts +2 -1
  207. package/src/server/globalConfig/genServerAiProviderConfig.ts +3 -3
  208. package/src/server/modules/ModelRuntime/index.test.ts +1 -1
  209. package/src/server/modules/ModelRuntime/index.ts +1 -1
  210. package/src/server/routers/async/caller.ts +2 -1
  211. package/src/server/routers/async/image.ts +2 -2
  212. package/src/server/routers/lambda/aiModel.ts +1 -1
  213. package/src/server/services/chunk/index.ts +2 -1
  214. package/src/server/services/generation/index.ts +2 -2
  215. package/src/services/_auth.ts +2 -1
  216. package/src/services/aiModel/server.test.ts +1 -1
  217. package/src/services/aiModel/type.ts +1 -1
  218. package/src/services/chat.ts +1 -1
  219. package/src/services/upload.ts +3 -3
  220. package/src/store/agent/slices/chat/action.ts +1 -1
  221. package/src/store/aiInfra/slices/aiModel/action.ts +6 -6
  222. package/src/store/aiInfra/slices/aiModel/initialState.ts +1 -1
  223. package/src/store/aiInfra/slices/aiModel/selectors.test.ts +1 -1
  224. package/src/store/aiInfra/slices/aiModel/selectors.ts +2 -1
  225. package/src/store/aiInfra/slices/aiProvider/__tests__/action.test.ts +7 -7
  226. package/src/store/aiInfra/slices/aiProvider/action.ts +8 -8
  227. package/src/store/aiInfra/slices/aiProvider/initialState.ts +2 -1
  228. package/src/store/electron/actions/app.ts +1 -1
  229. package/src/store/image/slices/generationConfig/action.test.ts +1 -1
  230. package/src/store/image/slices/generationConfig/action.ts +1 -1
  231. package/src/store/image/slices/generationConfig/hooks.test.ts +1 -1
  232. package/src/store/image/slices/generationConfig/hooks.ts +6 -3
  233. package/src/store/image/slices/generationConfig/selectors.test.ts +1 -1
  234. package/src/store/serverConfig/selectors.test.ts +0 -1
  235. package/src/store/user/initialState.ts +1 -4
  236. package/src/store/user/selectors.ts +0 -1
  237. package/src/store/user/slices/auth/action.ts +1 -1
  238. package/src/store/user/slices/auth/selectors.ts +3 -4
  239. package/src/store/user/slices/modelList/action.ts +8 -7
  240. package/src/store/user/slices/modelList/selectors/modelProvider.ts +8 -5
  241. package/src/store/user/slices/preference/selectors.ts +3 -2
  242. package/src/store/user/slices/settings/selectors/settings.ts +1 -2
  243. package/src/store/user/store.ts +1 -4
  244. package/docs/self-hosting/advanced/webrtc.mdx +0 -86
  245. package/docs/self-hosting/advanced/webrtc.zh-CN.mdx +0 -80
  246. package/packages/const/src/image.ts +0 -51
  247. package/src/app/[variants]/(main)/settings/sync/features/Alert.tsx +0 -53
  248. package/src/app/[variants]/(main)/settings/sync/features/DeviceInfo/Card.tsx +0 -42
  249. package/src/app/[variants]/(main)/settings/sync/features/DeviceInfo/DeviceName.tsx +0 -62
  250. package/src/app/[variants]/(main)/settings/sync/features/DeviceInfo/SystemIcon.tsx +0 -31
  251. package/src/app/[variants]/(main)/settings/sync/features/DeviceInfo/index.tsx +0 -103
  252. package/src/app/[variants]/(main)/settings/sync/features/WebRTC/ChannelNameInput.tsx +0 -45
  253. package/src/app/[variants]/(main)/settings/sync/features/WebRTC/SyncSwitch/index.css +0 -238
  254. package/src/app/[variants]/(main)/settings/sync/features/WebRTC/SyncSwitch/index.tsx +0 -79
  255. package/src/app/[variants]/(main)/settings/sync/features/WebRTC/generateRandomRoomName.ts +0 -4
  256. package/src/app/[variants]/(main)/settings/sync/features/WebRTC/index.tsx +0 -103
  257. package/src/app/[variants]/(main)/settings/sync/index.tsx +0 -17
  258. package/src/app/[variants]/(main)/settings/sync/page.tsx +0 -29
  259. package/src/database/_deprecated/core/sync.ts +0 -321
  260. package/src/features/SyncStatusInspector/DisableSync.tsx +0 -79
  261. package/src/features/SyncStatusInspector/EnableSync.tsx +0 -132
  262. package/src/features/SyncStatusInspector/EnableTag.tsx +0 -66
  263. package/src/features/SyncStatusInspector/index.tsx +0 -27
  264. package/src/hooks/useSyncData.ts +0 -50
  265. package/src/services/__tests__/sync.test.ts +0 -56
  266. package/src/services/sync.ts +0 -19
  267. package/src/store/user/slices/sync/action.test.ts +0 -164
  268. package/src/store/user/slices/sync/action.ts +0 -101
  269. package/src/store/user/slices/sync/initialState.ts +0 -13
  270. package/src/store/user/slices/sync/selectors.ts +0 -20
  271. /package/packages/{utils/src → model-runtime/src/utils}/getFallbackModelProperty.test.ts +0 -0
@@ -0,0 +1,64 @@
1
+ export const systemToUserModels = new Set([
2
+ 'o1-preview',
3
+ 'o1-preview-2024-09-12',
4
+ 'o1-mini',
5
+ 'o1-mini-2024-09-12',
6
+ ]);
7
+
8
+ // TODO: 临时写法,后续要重构成 model card 展示配置
9
+ export const disableStreamModels = new Set([
10
+ 'o1',
11
+ 'o1-2024-12-17',
12
+ 'o1-pro',
13
+ 'o1-pro-2025-03-19',
14
+ /*
15
+ 官网显示不支持,但是实际试下来支持 Streaming,暂时注释掉
16
+ 'o3-pro',
17
+ 'o3-pro-2025-06-10',
18
+ */
19
+ 'computer-use-preview',
20
+ 'computer-use-preview-2025-03-11',
21
+ ]);
22
+
23
+ /**
24
+ * models use Responses API only
25
+ */
26
+ export const responsesAPIModels = new Set([
27
+ 'o1-pro',
28
+ 'o1-pro-2025-03-19',
29
+ 'o3-deep-research',
30
+ 'o3-deep-research-2025-06-26',
31
+ 'o3-pro',
32
+ 'o3-pro-2025-06-10',
33
+ 'o4-mini-deep-research',
34
+ 'o4-mini-deep-research-2025-06-26',
35
+ 'codex-mini-latest',
36
+ 'computer-use-preview',
37
+ 'computer-use-preview-2025-03-11',
38
+ ]);
39
+
40
+ /**
41
+ * models support context caching
42
+ */
43
+ export const contextCachingModels = new Set([
44
+ 'claude-opus-4-latest',
45
+ 'claude-opus-4-20250514',
46
+ 'claude-sonnet-4-latest',
47
+ 'claude-sonnet-4-20250514',
48
+ 'claude-3-7-sonnet-latest',
49
+ 'claude-3-7-sonnet-20250219',
50
+ 'claude-3-5-sonnet-latest',
51
+ 'claude-3-5-sonnet-20241022',
52
+ 'claude-3-5-sonnet-20240620',
53
+ 'claude-3-5-haiku-latest',
54
+ 'claude-3-5-haiku-20241022',
55
+ ]);
56
+
57
+ export const thinkingWithToolClaudeModels = new Set([
58
+ 'claude-opus-4-latest',
59
+ 'claude-opus-4-20250514',
60
+ 'claude-sonnet-4-latest',
61
+ 'claude-sonnet-4-20250514',
62
+ 'claude-3-7-sonnet-latest',
63
+ 'claude-3-7-sonnet-20250219',
64
+ ]);
@@ -1,9 +1,8 @@
1
1
  // @vitest-environment node
2
2
  import { fal } from '@fal-ai/client';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
-
5
- import { CreateImagePayload } from '@/libs/model-runtime/types/image';
3
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
6
4
 
5
+ import { CreateImagePayload } from '../types';
7
6
  import { LobeFalAI } from './index';
8
7
 
9
8
  // Mock the fal client
@@ -5,7 +5,7 @@ import { RuntimeImageGenParamsValue } from 'model-bank';
5
5
  import { ClientOptions } from 'openai';
6
6
 
7
7
  import { LobeRuntimeAI } from '../BaseAI';
8
- import { AgentRuntimeErrorType } from '../error';
8
+ import { AgentRuntimeErrorType } from '../types/error';
9
9
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
10
10
  import { AgentRuntimeError } from '../utils/createError';
11
11
 
@@ -1,5 +1,5 @@
1
- import { AgentRuntimeErrorType } from '../error';
2
1
  import { ModelProvider } from '../types';
2
+ import { AgentRuntimeErrorType } from '../types/error';
3
3
  import { processMultiProviderModelList } from '../utils/modelParse';
4
4
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { pruneReasoningPayload } from '../utils/openaiHelpers';
@@ -1,10 +1,10 @@
1
1
  // @vitest-environment edge-runtime
2
2
  import { GoogleGenAI } from '@google/genai';
3
- import * as imageToBase64Module from '@lobechat/utils';
4
3
  import { beforeEach, describe, expect, it, vi } from 'vitest';
5
4
 
6
5
  import { CreateImagePayload } from '@/libs/model-runtime/types/image';
7
6
 
7
+ import * as imageToBase64Module from '../utils/imageToBase64';
8
8
  import { createGoogleImage } from './createImage';
9
9
 
10
10
  const provider = 'google';
@@ -1,11 +1,45 @@
1
1
  import { Content, GoogleGenAI, Part } from '@google/genai';
2
- import { imageUrlToBase64 } from '@lobechat/utils';
3
2
 
4
3
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
5
4
  import { AgentRuntimeError } from '../utils/createError';
6
5
  import { parseGoogleErrorMessage } from '../utils/googleErrorParser';
6
+ import { imageUrlToBase64 } from '../utils/imageToBase64';
7
7
  import { parseDataUri } from '../utils/uriParser';
8
8
 
9
+ // Maximum number of images allowed for processing
10
+ const MAX_IMAGE_COUNT = 10;
11
+
12
+ /**
13
+ * Process a single image URL and convert it to Google AI Part format
14
+ */
15
+ async function processImageForParts(imageUrl: string): Promise<Part> {
16
+ const { mimeType, base64, type } = parseDataUri(imageUrl);
17
+
18
+ if (type === 'base64') {
19
+ if (!base64) {
20
+ throw new TypeError("Image URL doesn't contain base64 data");
21
+ }
22
+
23
+ return {
24
+ inlineData: {
25
+ data: base64,
26
+ mimeType: mimeType || 'image/png',
27
+ },
28
+ };
29
+ } else if (type === 'url') {
30
+ const { base64: urlBase64, mimeType: urlMimeType } = await imageUrlToBase64(imageUrl);
31
+
32
+ return {
33
+ inlineData: {
34
+ data: urlBase64,
35
+ mimeType: urlMimeType,
36
+ },
37
+ };
38
+ } else {
39
+ throw new TypeError(`currently we don't support image url: ${imageUrl}`);
40
+ }
41
+ }
42
+
9
43
  /**
10
44
  * Extract image data from generateContent response
11
45
  */
@@ -71,36 +105,30 @@ async function generateImageByChatModel(
71
105
  const { model, params } = payload;
72
106
  const actualModel = model.replace(':image', '');
73
107
 
108
+ // Check for conflicting image parameters
109
+ if (params.imageUrl && params.imageUrls && params.imageUrls.length > 0) {
110
+ throw new TypeError('Cannot provide both imageUrl and imageUrls parameters simultaneously');
111
+ }
112
+
74
113
  // Build content parts
75
114
  const parts: Part[] = [{ text: params.prompt }];
76
115
 
77
116
  // Add image for editing if provided
78
117
  if (params.imageUrl && params.imageUrl !== null) {
79
- const { mimeType, base64, type } = parseDataUri(params.imageUrl);
80
-
81
- if (type === 'base64') {
82
- if (!base64) {
83
- throw new TypeError("Image URL doesn't contain base64 data");
84
- }
85
-
86
- parts.push({
87
- inlineData: {
88
- data: base64,
89
- mimeType: mimeType || 'image/png',
90
- },
91
- });
92
- } else if (type === 'url') {
93
- const { base64: urlBase64, mimeType: urlMimeType } = await imageUrlToBase64(params.imageUrl);
94
-
95
- parts.push({
96
- inlineData: {
97
- data: urlBase64,
98
- mimeType: urlMimeType,
99
- },
100
- });
101
- } else {
102
- throw new TypeError(`currently we don't support image url: ${params.imageUrl}`);
118
+ const imagePart = await processImageForParts(params.imageUrl);
119
+ parts.push(imagePart);
120
+ }
121
+
122
+ // Add multiple images for editing if provided
123
+ if (params.imageUrls && Array.isArray(params.imageUrls) && params.imageUrls.length > 0) {
124
+ if (params.imageUrls.length > MAX_IMAGE_COUNT) {
125
+ throw new TypeError(`Too many images provided. Maximum ${MAX_IMAGE_COUNT} images allowed`);
103
126
  }
127
+
128
+ const imageParts = await Promise.all(
129
+ params.imageUrls.map((imageUrl) => processImageForParts(imageUrl)),
130
+ );
131
+ parts.push(...imageParts);
104
132
  }
105
133
 
106
134
  const contents: Content[] = [
@@ -1,6 +1,5 @@
1
1
  // @vitest-environment edge-runtime
2
2
  import { GenerateContentResponse, Tool } from '@google/genai';
3
- import * as imageToBase64Module from '@lobechat/utils';
4
3
  import OpenAI from 'openai';
5
4
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
6
5
 
@@ -8,6 +7,7 @@ import { OpenAIChatMessage } from '@/libs/model-runtime';
8
7
  import { ChatStreamPayload } from '@/types/openai/chat';
9
8
 
10
9
  import * as debugStreamModule from '../utils/debugStream';
10
+ import * as imageToBase64Module from '../utils/imageToBase64';
11
11
  import { LobeGoogleAI } from './index';
12
12
 
13
13
  const provider = 'google';
@@ -9,10 +9,8 @@ import {
9
9
  Type as SchemaType,
10
10
  ThinkingConfig,
11
11
  } from '@google/genai';
12
- import { imageUrlToBase64, safeParseJSON } from '@lobechat/utils';
13
12
 
14
13
  import { LobeRuntimeAI } from '../BaseAI';
15
- import { AgentRuntimeErrorType } from '../error';
16
14
  import {
17
15
  ChatCompletionTool,
18
16
  ChatMethodOptions,
@@ -20,11 +18,14 @@ import {
20
18
  OpenAIChatMessage,
21
19
  UserMessageContentPart,
22
20
  } from '../types';
21
+ import { AgentRuntimeErrorType } from '../types/error';
23
22
  import { CreateImagePayload, CreateImageResponse } from '../types/image';
24
23
  import { AgentRuntimeError } from '../utils/createError';
25
24
  import { debugStream } from '../utils/debugStream';
26
25
  import { parseGoogleErrorMessage } from '../utils/googleErrorParser';
26
+ import { imageUrlToBase64 } from '../utils/imageToBase64';
27
27
  import { StreamingResponse } from '../utils/response';
28
+ import { safeParseJSON } from '../utils/safeParseJSON';
28
29
  import { GoogleGenerativeAIStream, VertexAIStream } from '../utils/streams';
29
30
  import { parseDataUri } from '../utils/uriParser';
30
31
  import { createGoogleImage } from './createImage';
@@ -134,7 +135,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
134
135
  const thinkingConfig: ThinkingConfig = {
135
136
  includeThoughts:
136
137
  !!thinkingBudget ||
137
- (!thinkingBudget && model && (model.includes('-2.5-') || model.includes('thinking')))
138
+ (!thinkingBudget && model && (model.includes('-2.5-') || model.includes('thinking')))
138
139
  ? true
139
140
  : undefined,
140
141
  // https://ai.google.dev/gemini-api/docs/thinking#set-budget
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
- import { AgentRuntimeErrorType } from '../error';
4
3
  import { ModelProvider } from '../types';
4
+ import { AgentRuntimeErrorType } from '../types/error';
5
5
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
6
 
7
7
  export interface GroqModelCard {
@@ -1,7 +1,6 @@
1
+ import { MessageToolCall, MessageToolCallChunk, MessageToolCallSchema } from '../types';
1
2
  import { produce } from 'immer';
2
3
 
3
- import { MessageToolCall, MessageToolCallChunk, MessageToolCallSchema } from '@/types/message';
4
-
5
4
  export const parseToolCalls = (origin: MessageToolCall[], value: MessageToolCallChunk[]) =>
6
5
  produce(origin, (draft) => {
7
6
  // if there is no origin, we should parse all the value and set it to draft
@@ -3,8 +3,8 @@ import urlJoin from 'url-join';
3
3
 
4
4
  import type { ChatModelCard } from '@/types/llm';
5
5
 
6
- import { AgentRuntimeErrorType } from '../error';
7
6
  import { ModelProvider } from '../types';
7
+ import { AgentRuntimeErrorType } from '../types/error';
8
8
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
9
9
  import { convertIterableToStream } from '../utils/streams';
10
10
 
@@ -6,7 +6,6 @@ export * from './BaseAI';
6
6
  export { LobeBedrockAI } from './bedrock';
7
7
  export { LobeBflAI } from './bfl';
8
8
  export { LobeDeepSeekAI } from './deepseek';
9
- export * from './error';
10
9
  export { LobeGoogleAI } from './google';
11
10
  export { LobeGroq } from './groq';
12
11
  export * from './helpers';
@@ -21,9 +20,12 @@ export { LobePerplexityAI } from './perplexity';
21
20
  export { LobeQwenAI } from './qwen';
22
21
  export { LobeTogetherAI } from './togetherai';
23
22
  export * from './types';
23
+ export * from './types/error';
24
24
  export { AgentRuntimeError } from './utils/createError';
25
+ export { getModelPropertyWithFallback } from './utils/getFallbackModelProperty';
25
26
  export { createOpenAICompatibleRuntime } from './utils/openaiCompatibleFactory';
26
27
  export { pruneReasoningPayload } from './utils/openaiHelpers';
28
+ export { parseDataUri } from './utils/uriParser';
27
29
  export { LobeVolcengineAI } from './volcengine';
28
30
  export { LobeZeroOneAI } from './zeroone';
29
31
  export { LobeZhipuAI } from './zhipu';
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
- import { AgentRuntimeErrorType } from '../error';
4
3
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
4
+ import { AgentRuntimeErrorType } from '../types/error';
5
5
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
6
 
7
7
  export interface InfiniAIModelCard {
@@ -1,8 +1,8 @@
1
1
  import { Ollama } from 'ollama/browser';
2
2
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
- import { AgentRuntimeErrorType } from '../error';
5
4
  import { ModelProvider } from '../types';
5
+ import { AgentRuntimeErrorType } from '../types/error';
6
6
  import { AgentRuntimeError } from '../utils/createError';
7
7
  import { LobeOllamaAI } from './index';
8
8
 
@@ -2,10 +2,7 @@ import { ChatModelCard } from '@lobechat/types';
2
2
  import { Ollama, Tool } from 'ollama/browser';
3
3
  import { ClientOptions } from 'openai';
4
4
 
5
- import { createErrorResponse } from '@/utils/errorResponse';
6
-
7
5
  import { LobeRuntimeAI } from '../BaseAI';
8
- import { AgentRuntimeErrorType } from '../error';
9
6
  import {
10
7
  ChatMethodOptions,
11
8
  ChatStreamPayload,
@@ -16,8 +13,10 @@ import {
16
13
  OpenAIChatMessage,
17
14
  PullModelParams,
18
15
  } from '../types';
16
+ import { AgentRuntimeErrorType } from '../types/error';
19
17
  import { AgentRuntimeError } from '../utils/createError';
20
18
  import { debugStream } from '../utils/debugStream';
19
+ import { createErrorResponse } from '../utils/errorResponse';
21
20
  import { StreamingResponse } from '../utils/response';
22
21
  import { OllamaStream, convertIterableToStream, createModelPullStream } from '../utils/streams';
23
22
  import { parseDataUri } from '../utils/uriParser';
@@ -1,5 +1,4 @@
1
- import { responsesAPIModels } from '@/const/models';
2
-
1
+ import { responsesAPIModels } from '../const/models';
3
2
  import { ChatStreamPayload, ModelProvider } from '../types';
4
3
  import { processMultiProviderModelList } from '../utils/modelParse';
5
4
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
@@ -19,7 +18,7 @@ const supportsFlexTier = (model: string) => {
19
18
  if (model.startsWith('o3-mini')) {
20
19
  return false;
21
20
  }
22
- return flexSupportedModels.some(supportedModel => model.startsWith(supportedModel));
21
+ return flexSupportedModels.some((supportedModel) => model.startsWith(supportedModel));
23
22
  };
24
23
 
25
24
  export const LobeOpenAI = createOpenAICompatibleRuntime({
@@ -54,7 +53,12 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
54
53
  } as any;
55
54
  }
56
55
 
57
- return { ...rest, model, ...(enableServiceTierFlex && supportsFlexTier(model) && { service_tier: 'flex' }), stream: payload.stream ?? true };
56
+ return {
57
+ ...rest,
58
+ model,
59
+ ...(enableServiceTierFlex && supportsFlexTier(model) && { service_tier: 'flex' }),
60
+ stream: payload.stream ?? true,
61
+ };
58
62
  },
59
63
  },
60
64
  debug: {
@@ -97,13 +101,17 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
97
101
  tools: openaiTools as any,
98
102
  // computer-use series must set truncation as auto
99
103
  ...(model.startsWith('computer-use') && { truncation: 'auto' }),
100
- text: verbosity
101
- ? { verbosity }
102
- : undefined,
104
+ text: verbosity ? { verbosity } : undefined,
103
105
  }) as any;
104
106
  }
105
107
 
106
- return { ...rest, model, ...(enableServiceTierFlex && supportsFlexTier(model) && { service_tier: 'flex' }), stream: payload.stream ?? true, tools: openaiTools } as any;
108
+ return {
109
+ ...rest,
110
+ model,
111
+ ...(enableServiceTierFlex && supportsFlexTier(model) && { service_tier: 'flex' }),
112
+ stream: payload.stream ?? true,
113
+ tools: openaiTools,
114
+ } as any;
107
115
  },
108
116
  },
109
117
  });
@@ -1,8 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
- import { LobeOpenAICompatibleRuntime } from '@/libs/model-runtime';
5
-
4
+ import { LobeOpenAICompatibleRuntime } from './BaseAI';
6
5
  import * as debugStreamModule from './utils/debugStream';
7
6
 
8
7
  interface TesstProviderParams {
@@ -1,7 +1,6 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
4
-
2
+ import { testProvider } from '../providerTestUtils';
3
+ import { ModelProvider } from '../types';
5
4
  import { LobeQiniuAI } from './index';
6
5
 
7
6
  const provider = ModelProvider.Qiniu;
@@ -35,7 +35,7 @@ export const LobeQwenAI = createOpenAICompatibleRuntime({
35
35
  thinking_budget:
36
36
  thinking?.budget_tokens === 0 ? 0 : thinking?.budget_tokens || undefined,
37
37
  }
38
- : ['qwen3', 'qwen-turbo', 'qwen-plus'].some((keyword) =>
38
+ : ['qwen3', 'qwen-turbo', 'qwen-plus', 'deepseek-v3.1'].some((keyword) =>
39
39
  model.toLowerCase().includes(keyword),
40
40
  )
41
41
  ? {
@@ -1,5 +1,5 @@
1
- import { AgentRuntimeErrorType } from '../error';
2
1
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
2
+ import { AgentRuntimeErrorType } from '../types/error';
3
3
  import { processMultiProviderModelList } from '../utils/modelParse';
4
4
  import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
@@ -45,7 +45,7 @@ export const LobeSiliconCloudAI = createOpenAICompatibleRuntime({
45
45
 
46
46
  return {
47
47
  ...rest,
48
- ...(['qwen3'].some((keyword) => model.toLowerCase().includes(keyword))
48
+ ...(['qwen3', 'deepseek-v3.1'].some((keyword) => model.toLowerCase().includes(keyword))
49
49
  ? {
50
50
  enable_thinking: thinking !== undefined ? thinking.type === 'enabled' : false,
51
51
  thinking_budget:
@@ -1,25 +1,5 @@
1
- import type { PartialDeep } from 'type-fest';
2
-
3
- import { ModelTokensUsage, ToolFunction } from '@/types/message';
4
-
5
- export interface MessageToolCall {
6
- /**
7
- * The function that the model called.
8
- */
9
- function: ToolFunction;
10
-
11
- /**
12
- * The ID of the tool call.
13
- */
14
- id: string;
15
-
16
- /**
17
- * The type of the tool. Currently, only `function` is supported.
18
- */
19
- type: 'function' | string;
20
- }
21
-
22
- export type MessageToolCallChunk = PartialDeep<MessageToolCall> & { index: number };
1
+ import { MessageToolCall, MessageToolCallChunk } from './toolsCalling';
2
+ import { ModelTokensUsage } from './usage';
23
3
 
24
4
  export type LLMRoleType = 'user' | 'system' | 'assistant' | 'function' | 'tool';
25
5
 
@@ -37,3 +37,32 @@ export const AGENT_RUNTIME_ERROR_SET = new Set<string>(Object.values(AgentRuntim
37
37
 
38
38
  export type ILobeAgentRuntimeErrorType =
39
39
  (typeof AgentRuntimeErrorType)[keyof typeof AgentRuntimeErrorType];
40
+
41
+ /* eslint-disable sort-keys-fix/sort-keys-fix */
42
+ export const StandardErrorType = {
43
+ // ******* Client Error ******* //
44
+ BadRequest: 400,
45
+ Unauthorized: 401,
46
+ Forbidden: 403,
47
+ ContentNotFound: 404,
48
+ MethodNotAllowed: 405,
49
+ TooManyRequests: 429,
50
+
51
+ // ******* Server Error ******* //
52
+ InternalServerError: 500,
53
+ BadGateway: 502,
54
+ ServiceUnavailable: 503,
55
+ GatewayTimeout: 504,
56
+ } as const;
57
+ /* eslint-enable */
58
+
59
+ export type ErrorType = (typeof StandardErrorType)[keyof typeof StandardErrorType];
60
+
61
+ /**
62
+ * 聊天消息错误对象
63
+ */
64
+ export interface ChatMessageError {
65
+ body?: any;
66
+ message: string;
67
+ type: ErrorType | ILobeAgentRuntimeErrorType;
68
+ }
@@ -1,6 +1,10 @@
1
1
  export * from './chat';
2
2
  export * from './embeddings';
3
+ export * from './error';
4
+ export * from './image';
3
5
  export * from './model';
4
6
  export * from './textToImage';
7
+ export * from './toolsCalling';
5
8
  export * from './tts';
6
9
  export * from './type';
10
+ export * from './usage';
@@ -0,0 +1,48 @@
1
+ import { z } from 'zod';
2
+ import type { PartialDeep } from 'type-fest';
3
+
4
+ /**
5
+ * The function that the model called.
6
+ */
7
+ export interface ToolFunction {
8
+ /**
9
+ * The arguments to call the function with, as generated by the model in JSON
10
+ * format. Note that the model does not always generate valid JSON, and may
11
+ * hallucinate parameters not defined by your function schema. Validate the
12
+ * arguments in your code before calling your function.
13
+ */
14
+ arguments: string;
15
+
16
+ /**
17
+ * The name of the function to call.
18
+ */
19
+ name: string;
20
+ }
21
+
22
+ export interface MessageToolCall {
23
+ /**
24
+ * The function that the model called.
25
+ */
26
+ function: ToolFunction;
27
+
28
+ /**
29
+ * The ID of the tool call.
30
+ */
31
+ id: string;
32
+
33
+ /**
34
+ * The type of the tool. Currently, only `function` is supported.
35
+ */
36
+ type: 'function' | string;
37
+ }
38
+
39
+ export const MessageToolCallSchema = z.object({
40
+ function: z.object({
41
+ arguments: z.string(),
42
+ name: z.string(),
43
+ }),
44
+ id: z.string(),
45
+ type: z.string(),
46
+ });
47
+
48
+ export type MessageToolCallChunk = PartialDeep<MessageToolCall> & { index: number };
@@ -1,7 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
 
3
- import { ILobeAgentRuntimeErrorType } from '../error';
4
3
  import { ChatStreamPayload } from './chat';
4
+ import { ILobeAgentRuntimeErrorType } from './error';
5
5
 
6
6
  export interface AgentInitErrorPayload {
7
7
  error: object;
@@ -0,0 +1,27 @@
1
+ export interface ModelTokensUsage {
2
+ acceptedPredictionTokens?: number;
3
+ inputAudioTokens?: number;
4
+ inputCacheMissTokens?: number;
5
+ inputCachedTokens?: number;
6
+ /**
7
+ * currently only pplx has citation_tokens
8
+ */
9
+ inputCitationTokens?: number;
10
+ /**
11
+ * user prompt image
12
+ */
13
+ inputImageTokens?: number;
14
+ /**
15
+ * user prompt input
16
+ */
17
+ inputTextTokens?: number;
18
+ inputWriteCacheTokens?: number;
19
+ outputAudioTokens?: number;
20
+ outputImageTokens?: number;
21
+ outputReasoningTokens?: number;
22
+ outputTextTokens?: number;
23
+ rejectedPredictionTokens?: number;
24
+ totalInputTokens?: number;
25
+ totalOutputTokens?: number;
26
+ totalTokens?: number;
27
+ }
@@ -1,8 +1,8 @@
1
- import { imageUrlToBase64 } from '@lobechat/utils';
2
1
  import { OpenAI } from 'openai';
3
2
  import { describe, expect, it, vi } from 'vitest';
4
3
 
5
4
  import { OpenAIChatMessage, UserMessageContentPart } from '../types/chat';
5
+ import { imageUrlToBase64 } from '../utils/imageToBase64';
6
6
  import {
7
7
  buildAnthropicBlock,
8
8
  buildAnthropicMessage,
@@ -19,7 +19,7 @@ vi.mock('./uriParser', () => ({
19
19
  type: 'base64',
20
20
  }),
21
21
  }));
22
- vi.mock('@lobechat/utils');
22
+ vi.mock('../utils/imageToBase64');
23
23
 
24
24
  describe('anthropicHelpers', () => {
25
25
  describe('buildAnthropicBlock', () => {
@@ -1,8 +1,8 @@
1
1
  import Anthropic from '@anthropic-ai/sdk';
2
- import { imageUrlToBase64 } from '@lobechat/utils';
3
2
  import OpenAI from 'openai';
4
3
 
5
4
  import { OpenAIChatMessage, UserMessageContentPart } from '../types';
5
+ import { imageUrlToBase64 } from '../utils/imageToBase64';
6
6
  import { parseDataUri } from './uriParser';
7
7
 
8
8
  export const buildAnthropicBlock = async (
@@ -1,9 +1,9 @@
1
- import { ILobeAgentRuntimeErrorType } from '../error';
2
1
  import {
3
2
  AgentInitErrorPayload,
4
3
  ChatCompletionErrorPayload,
5
4
  CreateImageErrorPayload,
6
5
  } from '../types';
6
+ import { ILobeAgentRuntimeErrorType } from '../types/error';
7
7
 
8
8
  export const AgentRuntimeError = {
9
9
  chat: (error: ChatCompletionErrorPayload): ChatCompletionErrorPayload => error,