@lobehub/chat 1.111.5 → 1.111.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (253) hide show
  1. package/.github/workflows/claude-code-review.yml +78 -0
  2. package/.github/workflows/claude.yml +64 -0
  3. package/.github/workflows/desktop-pr-build.yml +2 -2
  4. package/.github/workflows/release-desktop-beta.yml +2 -2
  5. package/CHANGELOG.md +50 -0
  6. package/README.md +2 -2
  7. package/README.zh-CN.md +2 -2
  8. package/changelog/v1.json +18 -0
  9. package/docker-compose/local/.env.example +1 -0
  10. package/docker-compose/local/.env.zh-CN.example +1 -0
  11. package/docker-compose/local/docker-compose.yml +1 -1
  12. package/docker-compose/local/init_data.json +994 -488
  13. package/docker-compose/setup.sh +3 -3
  14. package/package.json +4 -4
  15. package/packages/model-runtime/package.json +12 -0
  16. package/{src/libs/model-runtime → packages/model-runtime/src}/anthropic/index.ts +8 -6
  17. package/{src/libs/model-runtime → packages/model-runtime/src}/google/index.ts +1 -2
  18. package/{src/libs/model-runtime → packages/model-runtime/src}/minimax/createImage.test.ts +12 -15
  19. package/{src/libs/model-runtime → packages/model-runtime/src}/minimax/createImage.ts +1 -2
  20. package/{src/libs/model-runtime → packages/model-runtime/src}/moonshot/index.ts +1 -1
  21. package/{src/libs/model-runtime → packages/model-runtime/src}/qwen/createImage.ts +5 -5
  22. package/{src/libs/model-runtime → packages/model-runtime/src}/qwen/index.ts +2 -2
  23. package/{src/libs/model-runtime → packages/model-runtime/src}/sensenova/index.ts +14 -4
  24. package/{src/libs/model-runtime → packages/model-runtime/src}/utils/googleErrorParser.test.ts +46 -34
  25. package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/responsesStream.ts +14 -12
  26. package/{src/libs/model-runtime → packages/model-runtime/src}/utils/usageConverter.test.ts +1 -1
  27. package/{src/libs/model-runtime → packages/model-runtime/src}/utils/usageConverter.ts +9 -5
  28. package/{src/libs/model-runtime → packages/model-runtime/src}/volcengine/index.ts +4 -4
  29. package/src/app/(backend)/middleware/auth/index.test.ts +1 -1
  30. package/src/app/(backend)/middleware/auth/index.ts +5 -1
  31. package/src/app/(backend)/middleware/auth/utils.ts +1 -1
  32. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +1 -1
  33. package/src/app/(backend)/webapi/chat/[provider]/route.ts +4 -4
  34. package/src/app/(backend)/webapi/chat/vertexai/route.ts +2 -1
  35. package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +1 -1
  36. package/src/app/(backend)/webapi/models/[provider]/route.ts +1 -1
  37. package/src/app/(backend)/webapi/plugin/gateway/route.ts +2 -2
  38. package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +1 -1
  39. package/src/app/[variants]/(main)/settings/llm/ProviderList/Azure/index.tsx +1 -1
  40. package/src/app/[variants]/(main)/settings/provider/(detail)/azure/page.tsx +1 -1
  41. package/src/app/[variants]/(main)/settings/provider/(detail)/azureai/page.tsx +1 -1
  42. package/src/components/InvalidAPIKey/APIKeyForm/Bedrock.tsx +1 -1
  43. package/src/components/InvalidAPIKey/APIKeyForm/index.tsx +1 -1
  44. package/src/const/settings/llm.ts +2 -1
  45. package/src/database/models/__tests__/aiProvider.test.ts +1 -1
  46. package/src/database/models/aiProvider.ts +1 -1
  47. package/src/features/Conversation/Error/index.tsx +1 -1
  48. package/src/features/Conversation/components/ChatItem/ShareMessageModal/ShareImage/Preview.tsx +55 -48
  49. package/src/features/Conversation/components/ChatItem/ShareMessageModal/ShareImage/index.tsx +82 -75
  50. package/src/features/Conversation/components/ChatItem/ShareMessageModal/index.tsx +5 -2
  51. package/src/libs/trpc/client/lambda.ts +1 -1
  52. package/src/server/globalConfig/_deprecated.test.ts +1 -1
  53. package/src/server/globalConfig/_deprecated.ts +2 -1
  54. package/src/server/globalConfig/genServerAiProviderConfig.test.ts +1 -1
  55. package/src/server/globalConfig/genServerAiProviderConfig.ts +2 -1
  56. package/src/server/modules/ModelRuntime/index.test.ts +10 -7
  57. package/src/server/modules/ModelRuntime/index.ts +2 -1
  58. package/src/server/modules/ModelRuntime/trace.ts +1 -1
  59. package/src/server/routers/async/ragEval.ts +1 -1
  60. package/src/services/__tests__/_auth.test.ts +1 -1
  61. package/src/services/__tests__/chat.test.ts +11 -11
  62. package/src/services/_auth.ts +2 -1
  63. package/src/services/chat.ts +6 -6
  64. package/src/services/textToImage.ts +2 -1
  65. package/src/store/image/slices/generationConfig/initialState.ts +2 -1
  66. package/src/store/user/slices/modelList/action.ts +1 -1
  67. package/src/utils/errorResponse.test.ts +1 -2
  68. package/src/utils/errorResponse.ts +1 -2
  69. package/src/utils/fetch/fetchSSE.ts +1 -1
  70. package/src/utils/genUserLLMConfig.test.ts +1 -1
  71. package/src/utils/genUserLLMConfig.ts +2 -1
  72. package/tsconfig.json +2 -0
  73. package/vitest.config.ts +1 -0
  74. /package/{src/libs/model-runtime → packages/model-runtime/src}/BaseAI.ts +0 -0
  75. /package/{src/libs/model-runtime → packages/model-runtime/src}/ModelRuntime.test.ts +0 -0
  76. /package/{src/libs/model-runtime → packages/model-runtime/src}/ModelRuntime.ts +0 -0
  77. /package/{src/libs/model-runtime → packages/model-runtime/src}/RouterRuntime/baseRuntimeMap.ts +0 -0
  78. /package/{src/libs/model-runtime → packages/model-runtime/src}/RouterRuntime/createRuntime.test.ts +0 -0
  79. /package/{src/libs/model-runtime → packages/model-runtime/src}/RouterRuntime/createRuntime.ts +0 -0
  80. /package/{src/libs/model-runtime → packages/model-runtime/src}/RouterRuntime/index.ts +0 -0
  81. /package/{src/libs/model-runtime → packages/model-runtime/src}/UniformRuntime/index.ts +0 -0
  82. /package/{src/libs/model-runtime → packages/model-runtime/src}/ai21/index.test.ts +0 -0
  83. /package/{src/libs/model-runtime → packages/model-runtime/src}/ai21/index.ts +0 -0
  84. /package/{src/libs/model-runtime → packages/model-runtime/src}/ai302/index.ts +0 -0
  85. /package/{src/libs/model-runtime → packages/model-runtime/src}/ai360/index.test.ts +0 -0
  86. /package/{src/libs/model-runtime → packages/model-runtime/src}/ai360/index.ts +0 -0
  87. /package/{src/libs/model-runtime → packages/model-runtime/src}/aihubmix/index.ts +0 -0
  88. /package/{src/libs/model-runtime → packages/model-runtime/src}/anthropic/handleAnthropicError.ts +0 -0
  89. /package/{src/libs/model-runtime → packages/model-runtime/src}/anthropic/index.test.ts +0 -0
  90. /package/{src/libs/model-runtime → packages/model-runtime/src}/azureOpenai/index.test.ts +0 -0
  91. /package/{src/libs/model-runtime → packages/model-runtime/src}/azureOpenai/index.ts +0 -0
  92. /package/{src/libs/model-runtime → packages/model-runtime/src}/azureai/index.ts +0 -0
  93. /package/{src/libs/model-runtime → packages/model-runtime/src}/baichuan/index.test.ts +0 -0
  94. /package/{src/libs/model-runtime → packages/model-runtime/src}/baichuan/index.ts +0 -0
  95. /package/{src/libs/model-runtime → packages/model-runtime/src}/bedrock/index.test.ts +0 -0
  96. /package/{src/libs/model-runtime → packages/model-runtime/src}/bedrock/index.ts +0 -0
  97. /package/{src/libs/model-runtime → packages/model-runtime/src}/cloudflare/index.test.ts +0 -0
  98. /package/{src/libs/model-runtime → packages/model-runtime/src}/cloudflare/index.ts +0 -0
  99. /package/{src/libs/model-runtime → packages/model-runtime/src}/cohere/index.ts +0 -0
  100. /package/{src/libs/model-runtime → packages/model-runtime/src}/deepseek/index.test.ts +0 -0
  101. /package/{src/libs/model-runtime → packages/model-runtime/src}/deepseek/index.ts +0 -0
  102. /package/{src/libs/model-runtime → packages/model-runtime/src}/error.ts +0 -0
  103. /package/{src/libs/model-runtime → packages/model-runtime/src}/fal/index.test.ts +0 -0
  104. /package/{src/libs/model-runtime → packages/model-runtime/src}/fal/index.ts +0 -0
  105. /package/{src/libs/model-runtime → packages/model-runtime/src}/fireworksai/index.test.ts +0 -0
  106. /package/{src/libs/model-runtime → packages/model-runtime/src}/fireworksai/index.ts +0 -0
  107. /package/{src/libs/model-runtime → packages/model-runtime/src}/giteeai/index.test.ts +0 -0
  108. /package/{src/libs/model-runtime → packages/model-runtime/src}/giteeai/index.ts +0 -0
  109. /package/{src/libs/model-runtime → packages/model-runtime/src}/github/index.test.ts +0 -0
  110. /package/{src/libs/model-runtime → packages/model-runtime/src}/github/index.ts +0 -0
  111. /package/{src/libs/model-runtime → packages/model-runtime/src}/google/index.test.ts +0 -0
  112. /package/{src/libs/model-runtime → packages/model-runtime/src}/groq/index.test.ts +0 -0
  113. /package/{src/libs/model-runtime → packages/model-runtime/src}/groq/index.ts +0 -0
  114. /package/{src/libs/model-runtime → packages/model-runtime/src}/helpers/index.ts +0 -0
  115. /package/{src/libs/model-runtime → packages/model-runtime/src}/helpers/parseToolCalls.test.ts +0 -0
  116. /package/{src/libs/model-runtime → packages/model-runtime/src}/helpers/parseToolCalls.ts +0 -0
  117. /package/{src/libs/model-runtime → packages/model-runtime/src}/higress/index.ts +0 -0
  118. /package/{src/libs/model-runtime → packages/model-runtime/src}/huggingface/index.ts +0 -0
  119. /package/{src/libs/model-runtime → packages/model-runtime/src}/hunyuan/index.test.ts +0 -0
  120. /package/{src/libs/model-runtime → packages/model-runtime/src}/hunyuan/index.ts +0 -0
  121. /package/{src/libs/model-runtime → packages/model-runtime/src}/index.ts +0 -0
  122. /package/{src/libs/model-runtime → packages/model-runtime/src}/infiniai/index.ts +0 -0
  123. /package/{src/libs/model-runtime → packages/model-runtime/src}/internlm/index.test.ts +0 -0
  124. /package/{src/libs/model-runtime → packages/model-runtime/src}/internlm/index.ts +0 -0
  125. /package/{src/libs/model-runtime → packages/model-runtime/src}/jina/index.ts +0 -0
  126. /package/{src/libs/model-runtime → packages/model-runtime/src}/lmstudio/index.test.ts +0 -0
  127. /package/{src/libs/model-runtime → packages/model-runtime/src}/lmstudio/index.ts +0 -0
  128. /package/{src/libs/model-runtime → packages/model-runtime/src}/minimax/index.ts +0 -0
  129. /package/{src/libs/model-runtime → packages/model-runtime/src}/mistral/index.test.ts +0 -0
  130. /package/{src/libs/model-runtime → packages/model-runtime/src}/mistral/index.ts +0 -0
  131. /package/{src/libs/model-runtime → packages/model-runtime/src}/modelscope/index.ts +0 -0
  132. /package/{src/libs/model-runtime → packages/model-runtime/src}/moonshot/index.test.ts +0 -0
  133. /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/__snapshots__/index.test.ts.snap +0 -0
  134. /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/fixtures/models.json +0 -0
  135. /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/index.test.ts +0 -0
  136. /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/index.ts +0 -0
  137. /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/type.ts +0 -0
  138. /package/{src/libs/model-runtime → packages/model-runtime/src}/nvidia/index.ts +0 -0
  139. /package/{src/libs/model-runtime → packages/model-runtime/src}/ollama/index.test.ts +0 -0
  140. /package/{src/libs/model-runtime → packages/model-runtime/src}/ollama/index.ts +0 -0
  141. /package/{src/libs/model-runtime → packages/model-runtime/src}/ollama/type.ts +0 -0
  142. /package/{src/libs/model-runtime → packages/model-runtime/src}/openai/__snapshots__/index.test.ts.snap +0 -0
  143. /package/{src/libs/model-runtime → packages/model-runtime/src}/openai/fixtures/openai-models.json +0 -0
  144. /package/{src/libs/model-runtime → packages/model-runtime/src}/openai/index.test.ts +0 -0
  145. /package/{src/libs/model-runtime → packages/model-runtime/src}/openai/index.ts +0 -0
  146. /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/__snapshots__/index.test.ts.snap +0 -0
  147. /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/fixtures/frontendModels.json +0 -0
  148. /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/fixtures/models.json +0 -0
  149. /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/index.test.ts +0 -0
  150. /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/index.ts +0 -0
  151. /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/type.ts +0 -0
  152. /package/{src/libs/model-runtime → packages/model-runtime/src}/perplexity/index.test.ts +0 -0
  153. /package/{src/libs/model-runtime → packages/model-runtime/src}/perplexity/index.ts +0 -0
  154. /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/__snapshots__/index.test.ts.snap +0 -0
  155. /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/fixtures/models.json +0 -0
  156. /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/index.test.ts +0 -0
  157. /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/index.ts +0 -0
  158. /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/type.ts +0 -0
  159. /package/{src/libs/model-runtime → packages/model-runtime/src}/providerTestUtils.test.ts +0 -0
  160. /package/{src/libs/model-runtime → packages/model-runtime/src}/providerTestUtils.ts +0 -0
  161. /package/{src/libs/model-runtime → packages/model-runtime/src}/qiniu/index.test.ts +0 -0
  162. /package/{src/libs/model-runtime → packages/model-runtime/src}/qiniu/index.ts +0 -0
  163. /package/{src/libs/model-runtime → packages/model-runtime/src}/qwen/createImage.test.ts +0 -0
  164. /package/{src/libs/model-runtime → packages/model-runtime/src}/qwen/index.test.ts +0 -0
  165. /package/{src/libs/model-runtime → packages/model-runtime/src}/runtimeMap.ts +0 -0
  166. /package/{src/libs/model-runtime → packages/model-runtime/src}/sambanova/index.ts +0 -0
  167. /package/{src/libs/model-runtime → packages/model-runtime/src}/search1api/index.ts +0 -0
  168. /package/{src/libs/model-runtime → packages/model-runtime/src}/sensenova/index.test.ts +0 -0
  169. /package/{src/libs/model-runtime → packages/model-runtime/src}/siliconcloud/index.ts +0 -0
  170. /package/{src/libs/model-runtime → packages/model-runtime/src}/spark/index.test.ts +0 -0
  171. /package/{src/libs/model-runtime → packages/model-runtime/src}/spark/index.ts +0 -0
  172. /package/{src/libs/model-runtime → packages/model-runtime/src}/stepfun/index.test.ts +0 -0
  173. /package/{src/libs/model-runtime → packages/model-runtime/src}/stepfun/index.ts +0 -0
  174. /package/{src/libs/model-runtime → packages/model-runtime/src}/taichu/index.test.ts +0 -0
  175. /package/{src/libs/model-runtime → packages/model-runtime/src}/taichu/index.ts +0 -0
  176. /package/{src/libs/model-runtime → packages/model-runtime/src}/tencentcloud/index.test.ts +0 -0
  177. /package/{src/libs/model-runtime → packages/model-runtime/src}/tencentcloud/index.ts +0 -0
  178. /package/{src/libs/model-runtime → packages/model-runtime/src}/togetherai/fixtures/models.json +0 -0
  179. /package/{src/libs/model-runtime → packages/model-runtime/src}/togetherai/index.test.ts +0 -0
  180. /package/{src/libs/model-runtime → packages/model-runtime/src}/togetherai/index.ts +0 -0
  181. /package/{src/libs/model-runtime → packages/model-runtime/src}/togetherai/type.ts +0 -0
  182. /package/{src/libs/model-runtime → packages/model-runtime/src}/types/chat.ts +0 -0
  183. /package/{src/libs/model-runtime → packages/model-runtime/src}/types/embeddings.ts +0 -0
  184. /package/{src/libs/model-runtime → packages/model-runtime/src}/types/image.ts +0 -0
  185. /package/{src/libs/model-runtime → packages/model-runtime/src}/types/index.ts +0 -0
  186. /package/{src/libs/model-runtime → packages/model-runtime/src}/types/model.ts +0 -0
  187. /package/{src/libs/model-runtime → packages/model-runtime/src}/types/textToImage.ts +0 -0
  188. /package/{src/libs/model-runtime → packages/model-runtime/src}/types/tts.ts +0 -0
  189. /package/{src/libs/model-runtime → packages/model-runtime/src}/types/type.ts +0 -0
  190. /package/{src/libs/model-runtime → packages/model-runtime/src}/upstage/index.test.ts +0 -0
  191. /package/{src/libs/model-runtime → packages/model-runtime/src}/upstage/index.ts +0 -0
  192. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/anthropicHelpers.test.ts +0 -0
  193. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/anthropicHelpers.ts +0 -0
  194. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/cloudflareHelpers.test.ts +0 -0
  195. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/cloudflareHelpers.ts +0 -0
  196. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/createError.ts +0 -0
  197. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/debugStream.test.ts +0 -0
  198. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/debugStream.ts +0 -0
  199. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/desensitizeUrl.test.ts +0 -0
  200. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/desensitizeUrl.ts +0 -0
  201. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/googleErrorParser.ts +0 -0
  202. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/handleOpenAIError.ts +0 -0
  203. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/modelParse.test.ts +0 -0
  204. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/modelParse.ts +0 -0
  205. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/openaiCompatibleFactory/index.test.ts +0 -0
  206. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/openaiCompatibleFactory/index.ts +0 -0
  207. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/openaiHelpers.test.ts +0 -0
  208. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/openaiHelpers.ts +0 -0
  209. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/response.ts +0 -0
  210. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/sensenovaHelpers.test.ts +0 -0
  211. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/sensenovaHelpers.ts +0 -0
  212. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/__snapshots__/protocol.test.ts.snap +0 -0
  213. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/anthropic.test.ts +0 -0
  214. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/anthropic.ts +0 -0
  215. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/claude.ts +0 -0
  216. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/common.ts +0 -0
  217. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/index.ts +0 -0
  218. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/llama.test.ts +0 -0
  219. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/llama.ts +0 -0
  220. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/google-ai.test.ts +0 -0
  221. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/google-ai.ts +0 -0
  222. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/index.ts +0 -0
  223. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/model.ts +0 -0
  224. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/ollama.test.ts +0 -0
  225. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/ollama.ts +0 -0
  226. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/__snapshots__/responsesStream.test.ts.snap +0 -0
  227. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/index.ts +0 -0
  228. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/openai.test.ts +0 -0
  229. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/openai.ts +0 -0
  230. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/responsesStream.test.ts +0 -0
  231. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/protocol.test.ts +0 -0
  232. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/protocol.ts +0 -0
  233. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/qwen.test.ts +0 -0
  234. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/qwen.ts +0 -0
  235. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/spark.test.ts +0 -0
  236. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/spark.ts +0 -0
  237. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/utils.ts +0 -0
  238. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/vertex-ai.test.ts +0 -0
  239. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/vertex-ai.ts +0 -0
  240. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/uriParser.test.ts +0 -0
  241. /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/uriParser.ts +0 -0
  242. /package/{src/libs/model-runtime → packages/model-runtime/src}/v0/index.ts +0 -0
  243. /package/{src/libs/model-runtime → packages/model-runtime/src}/vertexai/index.ts +0 -0
  244. /package/{src/libs/model-runtime → packages/model-runtime/src}/vllm/index.ts +0 -0
  245. /package/{src/libs/model-runtime → packages/model-runtime/src}/wenxin/index.test.ts +0 -0
  246. /package/{src/libs/model-runtime → packages/model-runtime/src}/wenxin/index.ts +0 -0
  247. /package/{src/libs/model-runtime → packages/model-runtime/src}/xai/index.test.ts +0 -0
  248. /package/{src/libs/model-runtime → packages/model-runtime/src}/xai/index.ts +0 -0
  249. /package/{src/libs/model-runtime → packages/model-runtime/src}/xinference/index.ts +0 -0
  250. /package/{src/libs/model-runtime → packages/model-runtime/src}/zeroone/index.test.ts +0 -0
  251. /package/{src/libs/model-runtime → packages/model-runtime/src}/zeroone/index.ts +0 -0
  252. /package/{src/libs/model-runtime → packages/model-runtime/src}/zhipu/index.test.ts +0 -0
  253. /package/{src/libs/model-runtime → packages/model-runtime/src}/zhipu/index.ts +0 -0
@@ -450,7 +450,7 @@ ENV_EXAMPLES=(
450
450
  "$SUB_DIR/.env.example"
451
451
  )
452
452
  # Default values
453
- CASDOOR_PASSWORD="123"
453
+ CASDOOR_PASSWORD="pswd123"
454
454
  CASDOOR_SECRET="CASDOOR_SECRET"
455
455
  MINIO_ROOT_PASSWORD="YOUR_MINIO_PASSWORD"
456
456
  CASDOOR_HOST="localhost:8000"
@@ -657,10 +657,10 @@ section_regenerate_secrets() {
657
657
  CASDOOR_PASSWORD=$(generate_key 10)
658
658
  if [ $? -ne 0 ]; then
659
659
  echo $(show_message "security_secrect_regenerate_failed") "CASDOOR_PASSWORD"
660
- CASDOOR_PASSWORD="123"
660
+ CASDOOR_PASSWORD="pswd123"
661
661
  else
662
662
  # replace `password` in init_data.json
663
- sed "${SED_INPLACE_ARGS[@]}" "s/"123"/${CASDOOR_PASSWORD}/" init_data.json
663
+ sed "${SED_INPLACE_ARGS[@]}" "s/"pswd123"/${CASDOOR_PASSWORD}/" init_data.json
664
664
  if [ $? -ne 0 ]; then
665
665
  echo $(show_message "security_secrect_regenerate_failed") "CASDOOR_PASSWORD in \`init_data.json\`"
666
666
  fi
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.111.5",
3
+ "version": "1.111.7",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -121,9 +121,8 @@
121
121
  "dependencies": {
122
122
  "@ant-design/icons": "^5.6.1",
123
123
  "@ant-design/pro-components": "^2.8.10",
124
- "@anthropic-ai/sdk": "^0.57.0",
124
+ "@anthropic-ai/sdk": "^0.59.0",
125
125
  "@auth/core": "^0.40.0",
126
- "@aws-sdk/client-bedrock-runtime": "^3.862.0",
127
126
  "@aws-sdk/client-s3": "^3.862.0",
128
127
  "@aws-sdk/s3-request-presigner": "^3.862.0",
129
128
  "@azure-rest/ai-inference": "1.0.0-beta.5",
@@ -145,6 +144,7 @@
145
144
  "@lobechat/electron-client-ipc": "workspace:*",
146
145
  "@lobechat/electron-server-ipc": "workspace:*",
147
146
  "@lobechat/file-loaders": "workspace:*",
147
+ "@lobechat/model-runtime": "workspace:*",
148
148
  "@lobechat/web-crawler": "workspace:*",
149
149
  "@lobehub/analytics": "^1.6.0",
150
150
  "@lobehub/charts": "^2.0.0",
@@ -363,7 +363,7 @@
363
363
  "vitest": "^3.2.4",
364
364
  "vitest-canvas-mock": "^0.3.3"
365
365
  },
366
- "packageManager": "pnpm@10.10.0",
366
+ "packageManager": "pnpm@10.14.0",
367
367
  "publishConfig": {
368
368
  "access": "public",
369
369
  "registry": "https://registry.npmjs.org"
@@ -0,0 +1,12 @@
1
+ {
2
+ "name": "@lobechat/model-runtime",
3
+ "version": "1.0.0",
4
+ "private": true,
5
+ "main": "./src/index.ts",
6
+ "dependencies": {
7
+ "@aws-sdk/client-bedrock-runtime": "^3.862.0",
8
+ "@lobechat/types": "workspace:*",
9
+ "debug": "^4.4.1",
10
+ "openai": "^4.104.0"
11
+ }
12
+ }
@@ -204,14 +204,16 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
204
204
  model,
205
205
  system: systemPrompts,
206
206
  // For Opus 4.1 models: prefer temperature over top_p if both are provided
207
- temperature: isOpus41Model
208
- ? (shouldSetTemperature ? temperature / 2 : undefined)
209
- : (payload.temperature !== undefined ? temperature / 2 : undefined),
207
+ temperature: isOpus41Model
208
+ ? shouldSetTemperature
209
+ ? temperature / 2
210
+ : undefined
211
+ : payload.temperature !== undefined
212
+ ? temperature / 2
213
+ : undefined,
210
214
  tools: postTools,
211
215
  // For Opus 4.1 models: only set top_p if temperature is not set
212
- top_p: isOpus41Model
213
- ? (shouldSetTemperature ? undefined : top_p)
214
- : top_p,
216
+ top_p: isOpus41Model ? (shouldSetTemperature ? undefined : top_p) : top_p,
215
217
  } satisfies Anthropic.MessageCreateParams;
216
218
  }
217
219
 
@@ -511,7 +511,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
511
511
  .map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg, toolCallNameMap));
512
512
 
513
513
  const contents = await Promise.all(pools);
514
-
514
+
515
515
  // 筛除空消息: contents.parts must not be empty.
516
516
  return contents.filter((content: Content) => content.parts && content.parts.length > 0);
517
517
  };
@@ -563,7 +563,6 @@ export class LobeGoogleAI implements LobeRuntimeAI {
563
563
  },
564
564
  };
565
565
  };
566
-
567
566
  }
568
567
 
569
568
  export default LobeGoogleAI;
@@ -55,22 +55,19 @@ describe('createMiniMaxImage', () => {
55
55
 
56
56
  const result = await createMiniMaxImage(payload, mockOptions);
57
57
 
58
- expect(fetch).toHaveBeenCalledWith(
59
- 'https://api.minimaxi.com/v1/image_generation',
60
- {
61
- method: 'POST',
62
- headers: {
63
- 'Authorization': 'Bearer test-api-key',
64
- 'Content-Type': 'application/json',
65
- },
66
- body: JSON.stringify({
67
- aspect_ratio: undefined,
68
- model: 'image-01',
69
- n: 1,
70
- prompt: 'A beautiful sunset over the mountains',
71
- }),
58
+ expect(fetch).toHaveBeenCalledWith('https://api.minimaxi.com/v1/image_generation', {
59
+ method: 'POST',
60
+ headers: {
61
+ 'Authorization': 'Bearer test-api-key',
62
+ 'Content-Type': 'application/json',
72
63
  },
73
- );
64
+ body: JSON.stringify({
65
+ aspect_ratio: undefined,
66
+ model: 'image-01',
67
+ n: 1,
68
+ prompt: 'A beautiful sunset over the mountains',
69
+ }),
70
+ });
74
71
 
75
72
  expect(result).toEqual({
76
73
  imageUrl: mockImageUrl,
@@ -59,7 +59,7 @@ export async function createMiniMaxImage(
59
59
  }
60
60
 
61
61
  throw new Error(
62
- `MiniMax API error (${response.status}): ${errorData?.base_resp || response.statusText}`
62
+ `MiniMax API error (${response.status}): ${errorData?.base_resp || response.statusText}`,
63
63
  );
64
64
  }
65
65
 
@@ -92,7 +92,6 @@ export async function createMiniMaxImage(
92
92
  log('Image generated successfully: %s', imageUrl);
93
93
 
94
94
  return { imageUrl };
95
-
96
95
  } catch (error) {
97
96
  log('Error in createMiniMaxImage: %O', error);
98
97
 
@@ -14,7 +14,7 @@ export const LobeMoonshotAI = createOpenAICompatibleRuntime({
14
14
  const { enabledSearch, messages, temperature, tools, ...rest } = payload;
15
15
 
16
16
  // 为 assistant 空消息添加一个空格 (#8418)
17
- const filteredMessages = messages.map(message => {
17
+ const filteredMessages = messages.map((message) => {
18
18
  if (message.role === 'assistant' && (!message.content || message.content === '')) {
19
19
  return { ...message, content: ' ' };
20
20
  }
@@ -24,7 +24,7 @@ const QwenText2ImageModels = [
24
24
  'wanx2.0-t2i',
25
25
  'wanx-v1',
26
26
  'flux',
27
- 'stable-diffusion'
27
+ 'stable-diffusion',
28
28
  ];
29
29
 
30
30
  const getModelType = (model: string): string => {
@@ -33,12 +33,12 @@ const getModelType = (model: string): string => {
33
33
  // return 'image2image';
34
34
  // }
35
35
 
36
- if (QwenText2ImageModels.some(prefix => model.startsWith(prefix))) {
36
+ if (QwenText2ImageModels.some((prefix) => model.startsWith(prefix))) {
37
37
  return 'text2image';
38
38
  }
39
39
 
40
40
  throw new Error(`Unsupported model: ${model}`);
41
- }
41
+ };
42
42
 
43
43
  /**
44
44
  * Create an image generation task with Qwen API
@@ -47,7 +47,7 @@ async function createImageTask(payload: CreateImagePayload, apiKey: string): Pro
47
47
  const { model, params } = payload;
48
48
  // I can only say that the design of Alibaba Cloud's API is really bad; each model has a different endpoint path.
49
49
  const modelType = getModelType(model);
50
- const endpoint = `https://dashscope.aliyuncs.com/api/v1/services/aigc/${modelType}/image-synthesis`
50
+ const endpoint = `https://dashscope.aliyuncs.com/api/v1/services/aigc/${modelType}/image-synthesis`;
51
51
  if (!endpoint) {
52
52
  throw new Error(`No endpoint configured for model type: ${modelType}`);
53
53
  }
@@ -66,7 +66,7 @@ async function createImageTask(payload: CreateImagePayload, apiKey: string): Pro
66
66
  ...(typeof params.seed === 'number' ? { seed: params.seed } : {}),
67
67
  ...(params.width && params.height
68
68
  ? { size: `${params.width}*${params.height}` }
69
- : params.size
69
+ : params.size
70
70
  ? { size: params.size.replaceAll('x', '*') }
71
71
  : { size: '1024*1024' }),
72
72
  },
@@ -36,8 +36,8 @@ export const LobeQwenAI = createOpenAICompatibleRuntime({
36
36
  thinking?.budget_tokens === 0 ? 0 : thinking?.budget_tokens || undefined,
37
37
  }
38
38
  : ['qwen3', 'qwen-turbo', 'qwen-plus'].some((keyword) =>
39
- model.toLowerCase().includes(keyword),
40
- )
39
+ model.toLowerCase().includes(keyword),
40
+ )
41
41
  ? {
42
42
  enable_thinking: thinking !== undefined ? thinking.type === 'enabled' : false,
43
43
  thinking_budget:
@@ -12,8 +12,16 @@ export const LobeSenseNovaAI = createOpenAICompatibleRuntime({
12
12
  baseURL: 'https://api.sensenova.cn/compatible-mode/v1',
13
13
  chatCompletion: {
14
14
  handlePayload: (payload) => {
15
- const { frequency_penalty, max_tokens, messages, model, temperature, thinking, top_p, ...rest } =
16
- payload;
15
+ const {
16
+ frequency_penalty,
17
+ max_tokens,
18
+ messages,
19
+ model,
20
+ temperature,
21
+ thinking,
22
+ top_p,
23
+ ...rest
24
+ } = payload;
17
25
 
18
26
  return {
19
27
  ...rest,
@@ -33,8 +41,10 @@ export const LobeSenseNovaAI = createOpenAICompatibleRuntime({
33
41
  temperature !== undefined && temperature > 0 && temperature <= 2
34
42
  ? temperature
35
43
  : undefined,
36
- thinking: thinking
37
- ? (model && model.includes('-V6-5-') && thinking.type === 'enabled' ? { enabled: true } : { enabled: false })
44
+ thinking: thinking
45
+ ? model && model.includes('-V6-5-') && thinking.type === 'enabled'
46
+ ? { enabled: true }
47
+ : { enabled: false }
38
48
  : undefined,
39
49
  top_p: top_p !== undefined && top_p > 0 && top_p < 1 ? top_p : undefined,
40
50
  } as any;
@@ -1,7 +1,6 @@
1
1
  import { describe, expect, it } from 'vitest';
2
2
 
3
3
  import { AgentRuntimeErrorType } from '../error';
4
-
5
4
  import {
6
5
  cleanErrorMessage,
7
6
  extractStatusCodeFromError,
@@ -35,8 +34,10 @@ describe('googleErrorParser', () => {
35
34
  });
36
35
 
37
36
  it('should handle combined formatting issues', () => {
38
- const input = '* API key not valid.\\nPlease check your credentials.\\n\\nContact support if needed. ';
39
- const expected = 'API key not valid. Please check your credentials. Contact support if needed.';
37
+ const input =
38
+ '* API key not valid.\\nPlease check your credentials.\\n\\nContact support if needed. ';
39
+ const expected =
40
+ 'API key not valid. Please check your credentials. Contact support if needed.';
40
41
  expect(cleanErrorMessage(input)).toBe(expected);
41
42
  });
42
43
  });
@@ -45,7 +46,7 @@ describe('googleErrorParser', () => {
45
46
  it('should extract status code and message correctly', () => {
46
47
  const input = 'Connection failed [503 Service Unavailable] Please try again later';
47
48
  const result = extractStatusCodeFromError(input);
48
-
49
+
49
50
  expect(result.errorDetails).toEqual({
50
51
  message: 'Please try again later',
51
52
  statusCode: 503,
@@ -57,7 +58,7 @@ describe('googleErrorParser', () => {
57
58
  it('should handle different status codes', () => {
58
59
  const input = 'Request failed [401 Unauthorized] Invalid credentials';
59
60
  const result = extractStatusCodeFromError(input);
60
-
61
+
61
62
  expect(result.errorDetails).toEqual({
62
63
  message: 'Invalid credentials',
63
64
  statusCode: 401,
@@ -69,7 +70,7 @@ describe('googleErrorParser', () => {
69
70
  it('should return null for messages without status codes', () => {
70
71
  const input = 'Simple error message without status code';
71
72
  const result = extractStatusCodeFromError(input);
72
-
73
+
73
74
  expect(result.errorDetails).toBeNull();
74
75
  expect(result.prefix).toBe('Simple error message without status code');
75
76
  });
@@ -77,7 +78,7 @@ describe('googleErrorParser', () => {
77
78
  it('should handle empty message after status code', () => {
78
79
  const input = 'Error [404 Not Found]';
79
80
  const result = extractStatusCodeFromError(input);
80
-
81
+
81
82
  expect(result.errorDetails).toEqual({
82
83
  message: '',
83
84
  statusCode: 404,
@@ -91,15 +92,16 @@ describe('googleErrorParser', () => {
91
92
  it('should handle location not supported error', () => {
92
93
  const input = 'This location is not supported for Google AI services';
93
94
  const result = parseGoogleErrorMessage(input);
94
-
95
+
95
96
  expect(result.errorType).toBe(AgentRuntimeErrorType.LocationNotSupportError);
96
97
  expect(result.error.message).toBe(input);
97
98
  });
98
99
 
99
100
  it('should handle status JSON format', () => {
100
- const input = 'got status: UNAVAILABLE. {"error":{"code":503,"message":"Service temporarily unavailable","status":"UNAVAILABLE"}}';
101
+ const input =
102
+ 'got status: UNAVAILABLE. {"error":{"code":503,"message":"Service temporarily unavailable","status":"UNAVAILABLE"}}';
101
103
  const result = parseGoogleErrorMessage(input);
102
-
104
+
103
105
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
104
106
  expect(result.error).toEqual({
105
107
  code: 503,
@@ -109,9 +111,10 @@ describe('googleErrorParser', () => {
109
111
  });
110
112
 
111
113
  it('should handle direct JSON parsing', () => {
112
- const input = '{"error":{"code":400,"message":"* API key not valid. Please pass a valid API key.","status":"INVALID_ARGUMENT"}}';
114
+ const input =
115
+ '{"error":{"code":400,"message":"* API key not valid. Please pass a valid API key.","status":"INVALID_ARGUMENT"}}';
113
116
  const result = parseGoogleErrorMessage(input);
114
-
117
+
115
118
  expect(result.errorType).toBe(AgentRuntimeErrorType.InvalidProviderAPIKey);
116
119
  expect(result.error).toEqual({
117
120
  code: 400,
@@ -121,9 +124,10 @@ describe('googleErrorParser', () => {
121
124
  });
122
125
 
123
126
  it('should handle quota limit error', () => {
124
- const input = '{"error":{"code":429,"message":"Quota limit reached","status":"RESOURCE_EXHAUSTED"}}';
127
+ const input =
128
+ '{"error":{"code":429,"message":"Quota limit reached","status":"RESOURCE_EXHAUSTED"}}';
125
129
  const result = parseGoogleErrorMessage(input);
126
-
130
+
127
131
  expect(result.errorType).toBe(AgentRuntimeErrorType.QuotaLimitReached);
128
132
  expect(result.error).toEqual({
129
133
  code: 429,
@@ -133,9 +137,10 @@ describe('googleErrorParser', () => {
133
137
  });
134
138
 
135
139
  it('should handle nested JSON format', () => {
136
- const input = '{"error":{"message":"{\\"error\\":{\\"code\\":400,\\"message\\":\\"Invalid request\\"}}"}}';
140
+ const input =
141
+ '{"error":{"message":"{\\"error\\":{\\"code\\":400,\\"message\\":\\"Invalid request\\"}}"}}';
137
142
  const result = parseGoogleErrorMessage(input);
138
-
143
+
139
144
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
140
145
  expect(result.error).toEqual({
141
146
  code: 400,
@@ -145,28 +150,32 @@ describe('googleErrorParser', () => {
145
150
  });
146
151
 
147
152
  it('should handle array format with API_KEY_INVALID', () => {
148
- const input = 'Request failed [{"@type": "type.googleapis.com/google.rpc.ErrorInfo", "reason": "API_KEY_INVALID", "domain": "googleapis.com"}]';
153
+ const input =
154
+ 'Request failed [{"@type": "type.googleapis.com/google.rpc.ErrorInfo", "reason": "API_KEY_INVALID", "domain": "googleapis.com"}]';
149
155
  const result = parseGoogleErrorMessage(input);
150
-
156
+
151
157
  expect(result.errorType).toBe(AgentRuntimeErrorType.InvalidProviderAPIKey);
152
158
  });
153
159
 
154
160
  it('should handle array format with other errors', () => {
155
- const input = 'Request failed [{"@type": "type.googleapis.com/google.rpc.ErrorInfo", "reason": "QUOTA_EXCEEDED", "domain": "googleapis.com"}]';
161
+ const input =
162
+ 'Request failed [{"@type": "type.googleapis.com/google.rpc.ErrorInfo", "reason": "QUOTA_EXCEEDED", "domain": "googleapis.com"}]';
156
163
  const result = parseGoogleErrorMessage(input);
157
-
164
+
158
165
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
159
- expect(result.error).toEqual([{
160
- "@type": "type.googleapis.com/google.rpc.ErrorInfo",
161
- "reason": "QUOTA_EXCEEDED",
162
- "domain": "googleapis.com"
163
- }]);
166
+ expect(result.error).toEqual([
167
+ {
168
+ '@type': 'type.googleapis.com/google.rpc.ErrorInfo',
169
+ 'reason': 'QUOTA_EXCEEDED',
170
+ 'domain': 'googleapis.com',
171
+ },
172
+ ]);
164
173
  });
165
174
 
166
175
  it('should handle status code extraction fallback', () => {
167
176
  const input = 'Connection failed [503 Service Unavailable] Please try again later';
168
177
  const result = parseGoogleErrorMessage(input);
169
-
178
+
170
179
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
171
180
  expect(result.error).toEqual({
172
181
  message: 'Please try again later',
@@ -176,17 +185,20 @@ describe('googleErrorParser', () => {
176
185
  });
177
186
 
178
187
  it('should handle complex nested JSON with message cleaning', () => {
179
- const input = '{"error":{"code":400,"message":"* Request contains invalid parameters\\nPlease check the documentation\\n\\nContact support for help"}}';
188
+ const input =
189
+ '{"error":{"code":400,"message":"* Request contains invalid parameters\\nPlease check the documentation\\n\\nContact support for help"}}';
180
190
  const result = parseGoogleErrorMessage(input);
181
-
191
+
182
192
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
183
- expect(result.error.message).toBe('Request contains invalid parameters Please check the documentation Contact support for help');
193
+ expect(result.error.message).toBe(
194
+ 'Request contains invalid parameters Please check the documentation Contact support for help',
195
+ );
184
196
  });
185
197
 
186
198
  it('should return default error for unparseable messages', () => {
187
199
  const input = 'Some random error message that cannot be parsed';
188
200
  const result = parseGoogleErrorMessage(input);
189
-
201
+
190
202
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
191
203
  expect(result.error.message).toBe(input);
192
204
  });
@@ -194,7 +206,7 @@ describe('googleErrorParser', () => {
194
206
  it('should handle malformed JSON gracefully', () => {
195
207
  const input = '{"error":{"code":400,"message":"Invalid JSON{incomplete';
196
208
  const result = parseGoogleErrorMessage(input);
197
-
209
+
198
210
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
199
211
  expect(result.error.message).toBe(input);
200
212
  });
@@ -202,7 +214,7 @@ describe('googleErrorParser', () => {
202
214
  it('should handle empty error object in JSON', () => {
203
215
  const input = '{"error":{}}';
204
216
  const result = parseGoogleErrorMessage(input);
205
-
217
+
206
218
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
207
219
  expect(result.error).toEqual({
208
220
  code: null,
@@ -217,9 +229,9 @@ describe('googleErrorParser', () => {
217
229
  for (let i = 0; i < 6; i++) {
218
230
  deeplyNested = `{"error":{"message":"${deeplyNested.replaceAll('"', '\\"')}"}}`;
219
231
  }
220
-
232
+
221
233
  const result = parseGoogleErrorMessage(deeplyNested);
222
-
234
+
223
235
  // Should still return a valid result, but might not reach the deepest level
224
236
  expect(result.errorType).toBe(AgentRuntimeErrorType.ProviderBizError);
225
237
  expect(result.error).toBeDefined();
@@ -20,17 +20,19 @@ import {
20
20
  import { OpenAIStreamOptions } from './openai';
21
21
 
22
22
  const transformOpenAIStream = (
23
- chunk: OpenAI.Responses.ResponseStreamEvent | {
24
- annotation: {
25
- end_index: number;
26
- start_index: number;
27
- title: string;
28
- type: 'url_citation';
29
- url: string;
30
- };
31
- item_id: string;
32
- type: 'response.output_text.annotation.added';
33
- },
23
+ chunk:
24
+ | OpenAI.Responses.ResponseStreamEvent
25
+ | {
26
+ annotation: {
27
+ end_index: number;
28
+ start_index: number;
29
+ title: string;
30
+ type: 'url_citation';
31
+ url: string;
32
+ };
33
+ item_id: string;
34
+ type: 'response.output_text.annotation.added';
35
+ },
34
36
  streamContext: StreamContext,
35
37
  ): StreamProtocolChunk | StreamProtocolChunk[] => {
36
38
  // handle the first chunk error
@@ -136,7 +138,7 @@ const transformOpenAIStream = (
136
138
  data: { citations: streamContext.returnedCitationArray },
137
139
  id: chunk.item.id,
138
140
  type: 'grounding',
139
- }
141
+ };
140
142
  }
141
143
 
142
144
  return { data: null, id: chunk.item.id, type: 'text' };
@@ -1,7 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import { describe, expect, it } from 'vitest';
3
3
 
4
- import { convertUsage, convertResponseUsage } from './usageConverter';
4
+ import { convertResponseUsage, convertUsage } from './usageConverter';
5
5
 
6
6
  describe('convertUsage', () => {
7
7
  it('should convert basic OpenAI usage data correctly', () => {
@@ -2,7 +2,10 @@ import OpenAI from 'openai';
2
2
 
3
3
  import { ModelTokensUsage } from '@/types/message';
4
4
 
5
- export const convertUsage = (usage: OpenAI.Completions.CompletionUsage, provider?: string): ModelTokensUsage => {
5
+ export const convertUsage = (
6
+ usage: OpenAI.Completions.CompletionUsage,
7
+ provider?: string,
8
+ ): ModelTokensUsage => {
6
9
  // 目前只有 pplx 才有 citation_tokens
7
10
  const inputTextTokens = usage.prompt_tokens || 0;
8
11
  const inputCitationTokens = (usage as any).citation_tokens || 0;
@@ -17,11 +20,12 @@ export const convertUsage = (usage: OpenAI.Completions.CompletionUsage, provider
17
20
  const totalOutputTokens = usage.completion_tokens;
18
21
  const outputReasoning = usage.completion_tokens_details?.reasoning_tokens || 0;
19
22
  const outputAudioTokens = usage.completion_tokens_details?.audio_tokens || 0;
20
-
23
+
21
24
  // XAI 的 completion_tokens 不包含 reasoning_tokens,需要特殊处理
22
- const outputTextTokens = provider === 'xai'
23
- ? totalOutputTokens - outputAudioTokens
24
- : totalOutputTokens - outputReasoning - outputAudioTokens;
25
+ const outputTextTokens =
26
+ provider === 'xai'
27
+ ? totalOutputTokens - outputAudioTokens
28
+ : totalOutputTokens - outputReasoning - outputAudioTokens;
25
29
 
26
30
  const totalTokens = inputCitationTokens + usage.total_tokens;
27
31
 
@@ -1,12 +1,12 @@
1
1
  import { ModelProvider } from '../types';
2
- import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
2
  import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
3
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
4
4
 
5
5
  const THINKING_MODELS = [
6
6
  'thinking-vision-pro',
7
- 'thinking-pro-m',
7
+ 'thinking-pro-m',
8
8
  'doubao-seed-1-6',
9
- 'doubao-1-5-ui-tars'
9
+ 'doubao-1-5-ui-tars',
10
10
  ];
11
11
 
12
12
  export interface VolcengineModelCard {
@@ -24,7 +24,7 @@ export const LobeVolcengineAI = createOpenAICompatibleRuntime({
24
24
  model,
25
25
  ...(THINKING_MODELS.some((keyword) => model.toLowerCase().includes(keyword))
26
26
  ? {
27
- thinking: { type: thinking?.type }
27
+ thinking: { type: thinking?.type },
28
28
  }
29
29
  : {}),
30
30
  } as any;
@@ -1,7 +1,7 @@
1
+ import { AgentRuntimeError } from '@lobechat/model-runtime';
1
2
  import { ChatErrorType } from '@lobechat/types';
2
3
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
- import { AgentRuntimeError } from '@/libs/model-runtime';
5
5
  import { createErrorResponse } from '@/utils/errorResponse';
6
6
  import { getXorPayload } from '@/utils/server/xor';
7
7
 
@@ -1,4 +1,9 @@
1
1
  import { AuthObject } from '@clerk/backend';
2
+ import {
3
+ AgentRuntimeError,
4
+ ChatCompletionErrorPayload,
5
+ ModelRuntime,
6
+ } from '@lobechat/model-runtime';
2
7
  import { ChatErrorType } from '@lobechat/types';
3
8
  import { NextRequest } from 'next/server';
4
9
 
@@ -10,7 +15,6 @@ import {
10
15
  enableClerk,
11
16
  } from '@/const/auth';
12
17
  import { ClerkAuth } from '@/libs/clerk-auth';
13
- import { AgentRuntimeError, ChatCompletionErrorPayload, ModelRuntime } from '@/libs/model-runtime';
14
18
  import { validateOIDCJWT } from '@/libs/oidc-provider/jwt';
15
19
  import { createErrorResponse } from '@/utils/errorResponse';
16
20
  import { getXorPayload } from '@/utils/server/xor';
@@ -1,9 +1,9 @@
1
1
  import { type AuthObject } from '@clerk/backend';
2
+ import { AgentRuntimeError } from '@lobechat/model-runtime';
2
3
  import { ChatErrorType } from '@lobechat/types';
3
4
 
4
5
  import { enableClerk, enableNextAuth } from '@/const/auth';
5
6
  import { getAppConfig } from '@/envs/app';
6
- import { AgentRuntimeError } from '@/libs/model-runtime';
7
7
 
8
8
  interface CheckAuthParams {
9
9
  accessCode?: string;
@@ -1,11 +1,11 @@
1
1
  // @vitest-environment node
2
2
  import { getAuth } from '@clerk/nextjs/server';
3
+ import { LobeRuntimeAI, ModelRuntime } from '@lobechat/model-runtime';
3
4
  import { ChatErrorType } from '@lobechat/types';
4
5
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
6
 
6
7
  import { checkAuthMethod } from '@/app/(backend)/middleware/auth/utils';
7
8
  import { LOBE_CHAT_AUTH_HEADER, OAUTH_AUTHORIZED } from '@/const/auth';
8
- import { LobeRuntimeAI, ModelRuntime } from '@/libs/model-runtime';
9
9
  import { getXorPayload } from '@/utils/server/xor';
10
10
 
11
11
  import { POST } from './route';
@@ -1,11 +1,11 @@
1
- import { ChatErrorType } from '@lobechat/types';
2
-
3
- import { checkAuth } from '@/app/(backend)/middleware/auth';
4
1
  import {
5
2
  AGENT_RUNTIME_ERROR_SET,
6
3
  ChatCompletionErrorPayload,
7
4
  ModelRuntime,
8
- } from '@/libs/model-runtime';
5
+ } from '@lobechat/model-runtime';
6
+ import { ChatErrorType } from '@lobechat/types';
7
+
8
+ import { checkAuth } from '@/app/(backend)/middleware/auth';
9
9
  import { createTraceOptions, initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
10
10
  import { ChatStreamPayload } from '@/types/openai/chat';
11
11
  import { createErrorResponse } from '@/utils/errorResponse';