@lobehub/chat 1.111.6 → 1.111.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.i18nrc.js +1 -1
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/docker-compose/local/.env.example +1 -0
- package/docker-compose/local/.env.zh-CN.example +1 -0
- package/docker-compose/local/docker-compose.yml +1 -1
- package/docker-compose/local/init_data.json +994 -488
- package/docker-compose/setup.sh +3 -3
- package/locales/ar/chat.json +3 -0
- package/locales/bg-BG/chat.json +3 -0
- package/locales/de-DE/chat.json +3 -0
- package/locales/en-US/chat.json +3 -0
- package/locales/es-ES/chat.json +3 -0
- package/locales/fa-IR/chat.json +3 -0
- package/locales/fr-FR/chat.json +3 -0
- package/locales/it-IT/chat.json +3 -0
- package/locales/ja-JP/chat.json +3 -0
- package/locales/ko-KR/chat.json +3 -0
- package/locales/nl-NL/chat.json +3 -0
- package/locales/pl-PL/chat.json +3 -0
- package/locales/pt-BR/chat.json +3 -0
- package/locales/ru-RU/chat.json +3 -0
- package/locales/tr-TR/chat.json +3 -0
- package/locales/vi-VN/chat.json +3 -0
- package/locales/zh-CN/chat.json +3 -0
- package/locales/zh-TW/chat.json +3 -0
- package/package.json +3 -3
- package/packages/model-runtime/package.json +12 -0
- package/{src/libs/model-runtime → packages/model-runtime/src}/anthropic/index.ts +8 -6
- package/{src/libs/model-runtime → packages/model-runtime/src}/azureOpenai/index.ts +22 -5
- package/{src/libs/model-runtime → packages/model-runtime/src}/google/index.ts +1 -2
- package/{src/libs/model-runtime → packages/model-runtime/src}/minimax/createImage.test.ts +12 -15
- package/{src/libs/model-runtime → packages/model-runtime/src}/minimax/createImage.ts +1 -2
- package/{src/libs/model-runtime → packages/model-runtime/src}/moonshot/index.ts +1 -1
- package/{src/libs/model-runtime → packages/model-runtime/src}/openai/index.ts +4 -1
- package/{src/libs/model-runtime → packages/model-runtime/src}/qwen/createImage.ts +5 -5
- package/{src/libs/model-runtime → packages/model-runtime/src}/qwen/index.ts +2 -2
- package/{src/libs/model-runtime → packages/model-runtime/src}/sensenova/index.ts +14 -4
- package/{src/libs/model-runtime → packages/model-runtime/src}/types/chat.ts +5 -1
- package/{src/libs/model-runtime → packages/model-runtime/src}/utils/googleErrorParser.test.ts +46 -34
- package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/responsesStream.ts +14 -12
- package/{src/libs/model-runtime → packages/model-runtime/src}/utils/usageConverter.test.ts +1 -1
- package/{src/libs/model-runtime → packages/model-runtime/src}/utils/usageConverter.ts +9 -5
- package/{src/libs/model-runtime → packages/model-runtime/src}/volcengine/index.ts +4 -4
- package/packages/types/src/agent/chatConfig.ts +6 -0
- package/packages/types/src/aiModel.ts +2 -0
- package/src/app/(backend)/middleware/auth/index.test.ts +1 -1
- package/src/app/(backend)/middleware/auth/index.ts +5 -1
- package/src/app/(backend)/middleware/auth/utils.ts +1 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +1 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.ts +4 -4
- package/src/app/(backend)/webapi/chat/vertexai/route.ts +2 -1
- package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +1 -1
- package/src/app/(backend)/webapi/models/[provider]/route.ts +1 -1
- package/src/app/(backend)/webapi/plugin/gateway/route.ts +2 -2
- package/src/app/(backend)/webapi/text-to-image/[provider]/route.ts +1 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/Azure/index.tsx +1 -1
- package/src/app/[variants]/(main)/settings/provider/(detail)/azure/page.tsx +1 -1
- package/src/app/[variants]/(main)/settings/provider/(detail)/azureai/page.tsx +1 -1
- package/src/components/InvalidAPIKey/APIKeyForm/Bedrock.tsx +1 -1
- package/src/components/InvalidAPIKey/APIKeyForm/index.tsx +1 -1
- package/src/config/aiModels/aihubmix.ts +10 -12
- package/src/config/aiModels/openai.ts +7 -4
- package/src/const/settings/llm.ts +2 -1
- package/src/database/models/__tests__/aiProvider.test.ts +1 -1
- package/src/database/models/aiProvider.ts +1 -1
- package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +24 -0
- package/src/features/ChatInput/ActionBar/Model/GPT5ReasoningEffortSlider.tsx +58 -0
- package/src/features/ChatInput/ActionBar/Model/TextVerbositySlider.tsx +57 -0
- package/src/features/Conversation/Error/index.tsx +1 -1
- package/src/libs/trpc/client/lambda.ts +1 -1
- package/src/locales/default/chat.ts +3 -0
- package/src/server/globalConfig/_deprecated.test.ts +1 -1
- package/src/server/globalConfig/_deprecated.ts +2 -1
- package/src/server/globalConfig/genServerAiProviderConfig.test.ts +1 -1
- package/src/server/globalConfig/genServerAiProviderConfig.ts +2 -1
- package/src/server/modules/ModelRuntime/index.test.ts +10 -7
- package/src/server/modules/ModelRuntime/index.ts +2 -1
- package/src/server/modules/ModelRuntime/trace.ts +1 -1
- package/src/server/routers/async/ragEval.ts +1 -1
- package/src/services/__tests__/_auth.test.ts +1 -1
- package/src/services/__tests__/chat.test.ts +11 -11
- package/src/services/_auth.ts +2 -1
- package/src/services/chat.ts +14 -6
- package/src/services/textToImage.ts +2 -1
- package/src/store/image/slices/generationConfig/initialState.ts +2 -1
- package/src/store/user/slices/modelList/action.ts +1 -1
- package/src/utils/errorResponse.test.ts +1 -2
- package/src/utils/errorResponse.ts +1 -2
- package/src/utils/fetch/fetchSSE.ts +1 -1
- package/src/utils/genUserLLMConfig.test.ts +1 -1
- package/src/utils/genUserLLMConfig.ts +2 -1
- package/tsconfig.json +2 -0
- package/vitest.config.ts +1 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/BaseAI.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ModelRuntime.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ModelRuntime.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/RouterRuntime/baseRuntimeMap.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/RouterRuntime/createRuntime.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/RouterRuntime/createRuntime.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/RouterRuntime/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/UniformRuntime/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ai21/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ai21/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ai302/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ai360/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ai360/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/aihubmix/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/anthropic/handleAnthropicError.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/anthropic/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/azureOpenai/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/azureai/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/baichuan/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/baichuan/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/bedrock/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/bedrock/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/cloudflare/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/cloudflare/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/cohere/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/deepseek/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/deepseek/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/error.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/fal/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/fal/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/fireworksai/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/fireworksai/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/giteeai/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/giteeai/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/github/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/github/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/google/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/groq/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/groq/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/helpers/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/helpers/parseToolCalls.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/helpers/parseToolCalls.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/higress/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/huggingface/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/hunyuan/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/hunyuan/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/infiniai/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/internlm/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/internlm/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/jina/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/lmstudio/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/lmstudio/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/minimax/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/mistral/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/mistral/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/modelscope/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/moonshot/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/__snapshots__/index.test.ts.snap +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/fixtures/models.json +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/novita/type.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/nvidia/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ollama/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ollama/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ollama/type.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openai/__snapshots__/index.test.ts.snap +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openai/fixtures/openai-models.json +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openai/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/__snapshots__/index.test.ts.snap +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/fixtures/frontendModels.json +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/fixtures/models.json +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/openrouter/type.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/perplexity/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/perplexity/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/__snapshots__/index.test.ts.snap +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/fixtures/models.json +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/ppio/type.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/providerTestUtils.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/providerTestUtils.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/qiniu/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/qiniu/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/qwen/createImage.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/qwen/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/runtimeMap.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/sambanova/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/search1api/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/sensenova/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/siliconcloud/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/spark/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/spark/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/stepfun/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/stepfun/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/taichu/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/taichu/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/tencentcloud/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/tencentcloud/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/togetherai/fixtures/models.json +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/togetherai/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/togetherai/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/togetherai/type.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/types/embeddings.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/types/image.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/types/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/types/model.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/types/textToImage.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/types/tts.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/types/type.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/upstage/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/upstage/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/anthropicHelpers.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/anthropicHelpers.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/cloudflareHelpers.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/cloudflareHelpers.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/createError.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/debugStream.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/debugStream.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/desensitizeUrl.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/desensitizeUrl.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/googleErrorParser.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/handleOpenAIError.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/modelParse.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/modelParse.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/openaiCompatibleFactory/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/openaiCompatibleFactory/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/openaiHelpers.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/openaiHelpers.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/response.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/sensenovaHelpers.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/sensenovaHelpers.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/__snapshots__/protocol.test.ts.snap +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/anthropic.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/anthropic.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/claude.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/common.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/llama.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/bedrock/llama.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/google-ai.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/google-ai.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/model.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/ollama.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/ollama.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/__snapshots__/responsesStream.test.ts.snap +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/openai.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/openai.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/openai/responsesStream.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/protocol.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/protocol.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/qwen.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/qwen.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/spark.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/spark.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/utils.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/vertex-ai.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/streams/vertex-ai.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/uriParser.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/utils/uriParser.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/v0/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/vertexai/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/vllm/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/wenxin/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/wenxin/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/xai/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/xai/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/xinference/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/zeroone/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/zeroone/index.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/zhipu/index.test.ts +0 -0
- /package/{src/libs/model-runtime → packages/model-runtime/src}/zhipu/index.ts +0 -0
@@ -20,12 +20,12 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
20
20
|
units: [
|
21
21
|
{ name: 'textInput', rate: 1.25, strategy: 'fixed', unit: 'millionTokens' },
|
22
22
|
{ name: 'textOutput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
|
23
|
-
{ name: 'textInput_cacheRead', rate: 0.
|
23
|
+
{ name: 'textInput_cacheRead', rate: 0.125, strategy: 'fixed', unit: 'millionTokens' },
|
24
24
|
],
|
25
25
|
},
|
26
26
|
releasedAt: '2025-08-07',
|
27
27
|
settings: {
|
28
|
-
extendParams: ['
|
28
|
+
extendParams: ['gpt5ReasoningEffort', 'textVerbosity'],
|
29
29
|
searchImpl: 'params',
|
30
30
|
},
|
31
31
|
type: 'chat',
|
@@ -48,12 +48,12 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
48
48
|
units: [
|
49
49
|
{ name: 'textInput', rate: 0.25, strategy: 'fixed', unit: 'millionTokens' },
|
50
50
|
{ name: 'textOutput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
|
51
|
-
{ name: 'textInput_cacheRead', rate: 0.
|
51
|
+
{ name: 'textInput_cacheRead', rate: 0.025, strategy: 'fixed', unit: 'millionTokens' },
|
52
52
|
],
|
53
53
|
},
|
54
54
|
releasedAt: '2025-08-07',
|
55
55
|
settings: {
|
56
|
-
extendParams: ['
|
56
|
+
extendParams: ['gpt5ReasoningEffort', 'textVerbosity'],
|
57
57
|
searchImpl: 'params',
|
58
58
|
},
|
59
59
|
type: 'chat',
|
@@ -61,13 +61,13 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
61
61
|
{
|
62
62
|
abilities: {
|
63
63
|
functionCall: true,
|
64
|
+
imageOutput: true,
|
64
65
|
reasoning: true,
|
65
66
|
vision: true,
|
66
67
|
},
|
67
68
|
contextWindowTokens: 400_000,
|
68
69
|
description: '最快、最经济高效的 GPT-5 版本。非常适合需要快速响应且成本敏感的应用场景。',
|
69
70
|
displayName: 'GPT-5 nano',
|
70
|
-
enabled: true,
|
71
71
|
id: 'gpt-5-nano',
|
72
72
|
maxOutput: 128_000,
|
73
73
|
pricing: {
|
@@ -78,10 +78,14 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
78
78
|
],
|
79
79
|
},
|
80
80
|
releasedAt: '2025-08-07',
|
81
|
+
settings: {
|
82
|
+
extendParams: ['gpt5ReasoningEffort', 'textVerbosity'],
|
83
|
+
},
|
81
84
|
type: 'chat',
|
82
85
|
},
|
83
86
|
{
|
84
87
|
abilities: {
|
88
|
+
reasoning: true,
|
85
89
|
vision: true,
|
86
90
|
},
|
87
91
|
contextWindowTokens: 400_000,
|
@@ -95,7 +99,7 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
95
99
|
units: [
|
96
100
|
{ name: 'textInput', rate: 1.25, strategy: 'fixed', unit: 'millionTokens' },
|
97
101
|
{ name: 'textOutput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
|
98
|
-
{ name: 'textInput_cacheRead', rate: 0.
|
102
|
+
{ name: 'textInput_cacheRead', rate: 0.125, strategy: 'fixed', unit: 'millionTokens' },
|
99
103
|
],
|
100
104
|
},
|
101
105
|
releasedAt: '2025-08-07',
|
@@ -112,7 +116,6 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
112
116
|
description:
|
113
117
|
'o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。',
|
114
118
|
displayName: 'o4-mini',
|
115
|
-
enabled: true,
|
116
119
|
id: 'o4-mini',
|
117
120
|
maxOutput: 100_000,
|
118
121
|
pricing: {
|
@@ -193,7 +196,6 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
193
196
|
description:
|
194
197
|
'o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。',
|
195
198
|
displayName: 'o3',
|
196
|
-
enabled: true,
|
197
199
|
id: 'o3',
|
198
200
|
maxOutput: 100_000,
|
199
201
|
pricing: {
|
@@ -699,7 +701,6 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
699
701
|
description:
|
700
702
|
'基于Qwen3的思考模式开源模型,相较上一版本(通义千问3-235B-A22B)逻辑能力、通用能力、知识增强及创作能力均有大幅提升,适用于高难度强推理场景。',
|
701
703
|
displayName: 'Qwen3 235B A22B Thinking 2507',
|
702
|
-
enabled: true,
|
703
704
|
id: 'qwen3-235b-a22b-thinking-2507',
|
704
705
|
maxOutput: 32_768,
|
705
706
|
organization: 'Qwen',
|
@@ -721,7 +722,6 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
721
722
|
description:
|
722
723
|
'基于Qwen3的非思考模式开源模型,相较上一版本(通义千问3-235B-A22B)主观创作能力与模型安全性均有小幅度提升。',
|
723
724
|
displayName: 'Qwen3 235B A22B Instruct 2507',
|
724
|
-
enabled: true,
|
725
725
|
id: 'qwen3-235b-a22b-instruct-2507',
|
726
726
|
maxOutput: 32_768,
|
727
727
|
organization: 'Qwen',
|
@@ -744,7 +744,6 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
744
744
|
description:
|
745
745
|
'基于Qwen3的思考模式开源模型,相较上一版本(通义千问3-30B-A3B)逻辑能力、通用能力、知识增强及创作能力均有大幅提升,适用于高难度强推理场景。',
|
746
746
|
displayName: 'Qwen3 30B A3B Thinking 2507',
|
747
|
-
enabled: true,
|
748
747
|
id: 'qwen3-30b-a3b-thinking-2507',
|
749
748
|
maxOutput: 32_768,
|
750
749
|
organization: 'Qwen',
|
@@ -766,7 +765,6 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
766
765
|
description:
|
767
766
|
'相较上一版本(Qwen3-30B-A3B)中英文和多语言整体通用能力有大幅提升。主观开放类任务专项优化,显著更加符合用户偏好,能够提供更有帮助性的回复。',
|
768
767
|
displayName: 'Qwen3 30B A3B Instruct 2507',
|
769
|
-
enabled: true,
|
770
768
|
id: 'qwen3-30b-a3b-instruct-2507',
|
771
769
|
maxOutput: 32_768,
|
772
770
|
organization: 'Qwen',
|
@@ -42,7 +42,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
42
42
|
},
|
43
43
|
releasedAt: '2025-08-07',
|
44
44
|
settings: {
|
45
|
-
extendParams: ['
|
45
|
+
extendParams: ['gpt5ReasoningEffort', 'textVerbosity'],
|
46
46
|
searchImpl: 'params',
|
47
47
|
},
|
48
48
|
type: 'chat',
|
@@ -70,7 +70,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
70
70
|
},
|
71
71
|
releasedAt: '2025-08-07',
|
72
72
|
settings: {
|
73
|
-
extendParams: ['
|
73
|
+
extendParams: ['gpt5ReasoningEffort', 'textVerbosity'],
|
74
74
|
searchImpl: 'params',
|
75
75
|
},
|
76
76
|
type: 'chat',
|
@@ -78,6 +78,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
78
78
|
{
|
79
79
|
abilities: {
|
80
80
|
functionCall: true,
|
81
|
+
imageOutput: true,
|
81
82
|
reasoning: true,
|
82
83
|
vision: true,
|
83
84
|
},
|
@@ -94,10 +95,14 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
94
95
|
],
|
95
96
|
},
|
96
97
|
releasedAt: '2025-08-07',
|
98
|
+
settings: {
|
99
|
+
extendParams: ['gpt5ReasoningEffort', 'textVerbosity'],
|
100
|
+
},
|
97
101
|
type: 'chat',
|
98
102
|
},
|
99
103
|
{
|
100
104
|
abilities: {
|
105
|
+
reasoning: true,
|
101
106
|
vision: true,
|
102
107
|
},
|
103
108
|
contextWindowTokens: 400_000,
|
@@ -128,7 +133,6 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
128
133
|
description:
|
129
134
|
'o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。',
|
130
135
|
displayName: 'o4-mini',
|
131
|
-
enabled: true,
|
132
136
|
id: 'o4-mini',
|
133
137
|
maxOutput: 100_000,
|
134
138
|
pricing: {
|
@@ -209,7 +213,6 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
209
213
|
description:
|
210
214
|
'o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。',
|
211
215
|
displayName: 'o3',
|
212
|
-
enabled: true,
|
213
216
|
id: 'o3',
|
214
217
|
maxOutput: 100_000,
|
215
218
|
pricing: {
|
@@ -1,9 +1,9 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
2
3
|
import { eq } from 'drizzle-orm';
|
3
4
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
5
|
|
5
6
|
import { LobeChatDatabase } from '@/database/type';
|
6
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
7
7
|
import { sleep } from '@/utils/sleep';
|
8
8
|
|
9
9
|
import { aiProviders, users } from '../../schemas';
|
@@ -1,9 +1,9 @@
|
|
1
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
1
2
|
import { and, asc, desc, eq } from 'drizzle-orm';
|
2
3
|
import { isEmpty } from 'lodash-es';
|
3
4
|
|
4
5
|
import { DEFAULT_MODEL_PROVIDER_LIST } from '@/config/modelProviders';
|
5
6
|
import { LobeChatDatabase } from '@/database/type';
|
6
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
7
7
|
import {
|
8
8
|
AiProviderDetailItem,
|
9
9
|
AiProviderListItem,
|
@@ -11,8 +11,10 @@ import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/selector
|
|
11
11
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
12
12
|
|
13
13
|
import ContextCachingSwitch from './ContextCachingSwitch';
|
14
|
+
import GPT5ReasoningEffortSlider from './GPT5ReasoningEffortSlider';
|
14
15
|
import ReasoningEffortSlider from './ReasoningEffortSlider';
|
15
16
|
import ReasoningTokenSlider from './ReasoningTokenSlider';
|
17
|
+
import TextVerbositySlider from './TextVerbositySlider';
|
16
18
|
import ThinkingBudgetSlider from './ThinkingBudgetSlider';
|
17
19
|
import ThinkingSlider from './ThinkingSlider';
|
18
20
|
|
@@ -95,6 +97,28 @@ const ControlsForm = memo(() => {
|
|
95
97
|
paddingBottom: 0,
|
96
98
|
},
|
97
99
|
},
|
100
|
+
{
|
101
|
+
children: <GPT5ReasoningEffortSlider />,
|
102
|
+
desc: 'reasoning_effort',
|
103
|
+
label: t('extendParams.reasoningEffort.title'),
|
104
|
+
layout: 'horizontal',
|
105
|
+
minWidth: undefined,
|
106
|
+
name: 'gpt5ReasoningEffort',
|
107
|
+
style: {
|
108
|
+
paddingBottom: 0,
|
109
|
+
},
|
110
|
+
},
|
111
|
+
{
|
112
|
+
children: <TextVerbositySlider />,
|
113
|
+
desc: 'text_verbosity',
|
114
|
+
label: t('extendParams.textVerbosity.title'),
|
115
|
+
layout: 'horizontal',
|
116
|
+
minWidth: undefined,
|
117
|
+
name: 'textVerbosity',
|
118
|
+
style: {
|
119
|
+
paddingBottom: 0,
|
120
|
+
},
|
121
|
+
},
|
98
122
|
{
|
99
123
|
children: <ThinkingBudgetSlider />,
|
100
124
|
label: t('extendParams.reasoningBudgetToken.title'),
|
@@ -0,0 +1,58 @@
|
|
1
|
+
import { Slider } from 'antd';
|
2
|
+
import { memo, useCallback } from 'react';
|
3
|
+
import { Flexbox } from 'react-layout-kit';
|
4
|
+
|
5
|
+
import { useAgentStore } from '@/store/agent';
|
6
|
+
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
7
|
+
|
8
|
+
const GPT5ReasoningEffortSlider = memo(() => {
|
9
|
+
const [config, updateAgentChatConfig] = useAgentStore((s) => [
|
10
|
+
agentChatConfigSelectors.currentChatConfig(s),
|
11
|
+
s.updateAgentChatConfig,
|
12
|
+
]);
|
13
|
+
|
14
|
+
const gpt5ReasoningEffort = config.gpt5ReasoningEffort || 'medium'; // Default to 'medium' if not set
|
15
|
+
|
16
|
+
const marks = {
|
17
|
+
0: 'minimal',
|
18
|
+
1: 'low',
|
19
|
+
2: 'medium',
|
20
|
+
3: 'high',
|
21
|
+
};
|
22
|
+
|
23
|
+
const effortValues = ['minimal', 'low', 'medium', 'high'];
|
24
|
+
const indexValue = effortValues.indexOf(gpt5ReasoningEffort);
|
25
|
+
const currentValue = indexValue === -1 ? 2 : indexValue;
|
26
|
+
|
27
|
+
const updateGPT5ReasoningEffort = useCallback(
|
28
|
+
(value: number) => {
|
29
|
+
const effort = effortValues[value] as 'minimal' | 'low' | 'medium' | 'high';
|
30
|
+
updateAgentChatConfig({ gpt5ReasoningEffort: effort });
|
31
|
+
},
|
32
|
+
[updateAgentChatConfig],
|
33
|
+
);
|
34
|
+
|
35
|
+
return (
|
36
|
+
<Flexbox
|
37
|
+
align={'center'}
|
38
|
+
gap={12}
|
39
|
+
horizontal
|
40
|
+
paddingInline={'0 20px'}
|
41
|
+
style={{ minWidth: 200, width: '100%' }}
|
42
|
+
>
|
43
|
+
<Flexbox flex={1}>
|
44
|
+
<Slider
|
45
|
+
marks={marks}
|
46
|
+
max={3}
|
47
|
+
min={0}
|
48
|
+
onChange={updateGPT5ReasoningEffort}
|
49
|
+
step={1}
|
50
|
+
tooltip={{ open: false }}
|
51
|
+
value={currentValue}
|
52
|
+
/>
|
53
|
+
</Flexbox>
|
54
|
+
</Flexbox>
|
55
|
+
);
|
56
|
+
});
|
57
|
+
|
58
|
+
export default GPT5ReasoningEffortSlider;
|
@@ -0,0 +1,57 @@
|
|
1
|
+
import { Slider } from 'antd';
|
2
|
+
import { memo, useCallback } from 'react';
|
3
|
+
import { Flexbox } from 'react-layout-kit';
|
4
|
+
|
5
|
+
import { useAgentStore } from '@/store/agent';
|
6
|
+
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
7
|
+
|
8
|
+
const TextVerbositySlider = memo(() => {
|
9
|
+
const [config, updateAgentChatConfig] = useAgentStore((s) => [
|
10
|
+
agentChatConfigSelectors.currentChatConfig(s),
|
11
|
+
s.updateAgentChatConfig,
|
12
|
+
]);
|
13
|
+
|
14
|
+
const textVerbosity = config.textVerbosity || 'medium'; // Default to 'medium' if not set
|
15
|
+
|
16
|
+
const marks = {
|
17
|
+
0: 'low',
|
18
|
+
1: 'medium',
|
19
|
+
2: 'high',
|
20
|
+
};
|
21
|
+
|
22
|
+
const verbosityValues = ['low', 'medium', 'high'];
|
23
|
+
const indexValue = verbosityValues.indexOf(textVerbosity);
|
24
|
+
const currentValue = indexValue === -1 ? 1 : indexValue;
|
25
|
+
|
26
|
+
const updateTextVerbosity = useCallback(
|
27
|
+
(value: number) => {
|
28
|
+
const verbosity = verbosityValues[value] as 'low' | 'medium' | 'high';
|
29
|
+
updateAgentChatConfig({ textVerbosity: verbosity });
|
30
|
+
},
|
31
|
+
[updateAgentChatConfig],
|
32
|
+
);
|
33
|
+
|
34
|
+
return (
|
35
|
+
<Flexbox
|
36
|
+
align={'center'}
|
37
|
+
gap={12}
|
38
|
+
horizontal
|
39
|
+
paddingInline={'0 20px'}
|
40
|
+
style={{ minWidth: 200, width: '100%' }}
|
41
|
+
>
|
42
|
+
<Flexbox flex={1}>
|
43
|
+
<Slider
|
44
|
+
marks={marks}
|
45
|
+
max={2}
|
46
|
+
min={0}
|
47
|
+
onChange={updateTextVerbosity}
|
48
|
+
step={1}
|
49
|
+
tooltip={{ open: false }}
|
50
|
+
value={currentValue}
|
51
|
+
/>
|
52
|
+
</Flexbox>
|
53
|
+
</Flexbox>
|
54
|
+
);
|
55
|
+
});
|
56
|
+
|
57
|
+
export default TextVerbositySlider;
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '@lobechat/model-runtime';
|
1
2
|
import { ChatErrorType, ErrorType } from '@lobechat/types';
|
2
3
|
import { IPluginErrorType } from '@lobehub/chat-plugin-sdk';
|
3
4
|
import type { AlertProps } from '@lobehub/ui';
|
@@ -7,7 +8,6 @@ import { Suspense, memo, useMemo } from 'react';
|
|
7
8
|
import { useTranslation } from 'react-i18next';
|
8
9
|
|
9
10
|
import { useProviderName } from '@/hooks/useProviderName';
|
10
|
-
import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '@/libs/model-runtime';
|
11
11
|
import { ChatMessage, ChatMessageError } from '@/types/message';
|
12
12
|
|
13
13
|
import ChatInvalidAPIKey from './ChatInvalidApiKey';
|
@@ -1,10 +1,10 @@
|
|
1
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
1
2
|
import { createTRPCClient, httpBatchLink } from '@trpc/client';
|
2
3
|
import { createTRPCReact } from '@trpc/react-query';
|
3
4
|
import debug from 'debug';
|
4
5
|
import superjson from 'superjson';
|
5
6
|
|
6
7
|
import { isDesktop } from '@/const/version';
|
7
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
8
8
|
import type { LambdaRouter } from '@/server/routers/lambda';
|
9
9
|
|
10
10
|
import { ErrorResponse } from './types';
|
@@ -3,7 +3,7 @@ import { describe, expect, it, vi } from 'vitest';
|
|
3
3
|
import { genServerLLMConfig } from './_deprecated';
|
4
4
|
|
5
5
|
// Mock ModelProvider enum
|
6
|
-
vi.mock('
|
6
|
+
vi.mock('@lobechat/model-runtime', () => ({
|
7
7
|
ModelProvider: {
|
8
8
|
Azure: 'azure',
|
9
9
|
Bedrock: 'bedrock',
|
@@ -1,6 +1,7 @@
|
|
1
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
2
|
+
|
1
3
|
import { getLLMConfig } from '@/config/llm';
|
2
4
|
import * as ProviderCards from '@/config/modelProviders';
|
3
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
4
5
|
import { ModelProviderCard } from '@/types/llm';
|
5
6
|
import { extractEnabledModels, transformToChatModelCards } from '@/utils/_deprecated/parseModels';
|
6
7
|
|
@@ -212,7 +212,7 @@ describe('genServerAiProvidersConfig Error Handling', () => {
|
|
212
212
|
}));
|
213
213
|
|
214
214
|
// Mock ModelProvider to include the missing provider
|
215
|
-
vi.doMock('
|
215
|
+
vi.doMock('@lobechat/model-runtime', () => ({
|
216
216
|
ModelProvider: {
|
217
217
|
openai: 'openai', // This exists in enum
|
218
218
|
anthropic: 'anthropic', // This exists in both enum and aiModels
|
@@ -1,6 +1,7 @@
|
|
1
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
2
|
+
|
1
3
|
import * as AiModels from '@/config/aiModels';
|
2
4
|
import { getLLMConfig } from '@/config/llm';
|
3
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
4
5
|
import { AiFullModelCard } from '@/types/aiModel';
|
5
6
|
import { ProviderConfig } from '@/types/user/settings';
|
6
7
|
import { extractEnabledModels, transformToAiModelList } from '@/utils/parseModels';
|
@@ -1,7 +1,4 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { describe, expect, it, vi } from 'vitest';
|
3
|
-
|
4
|
-
import { ClientSecretPayload } from '@/const/auth';
|
5
2
|
import {
|
6
3
|
LobeAnthropicAI,
|
7
4
|
LobeAzureOpenAI,
|
@@ -22,8 +19,11 @@ import {
|
|
22
19
|
LobeZeroOneAI,
|
23
20
|
LobeZhipuAI,
|
24
21
|
ModelProvider,
|
25
|
-
} from '
|
26
|
-
import { ModelRuntime } from '
|
22
|
+
} from '@lobechat/model-runtime';
|
23
|
+
import { ModelRuntime } from '@lobechat/model-runtime';
|
24
|
+
import { describe, expect, it, vi } from 'vitest';
|
25
|
+
|
26
|
+
import { ClientSecretPayload } from '@/const/auth';
|
27
27
|
import { LobeStepfunAI } from '@/libs/model-runtime/stepfun';
|
28
28
|
|
29
29
|
import { initModelRuntimeWithUserPayload } from './index';
|
@@ -60,14 +60,17 @@ vi.mock('@/config/llm', () => ({
|
|
60
60
|
|
61
61
|
/**
|
62
62
|
* Test cases for function initModelRuntimeWithUserPayload
|
63
|
-
* this method will use ModelRuntime from
|
63
|
+
* this method will use ModelRuntime from `@lobechat/model-runtime`
|
64
64
|
* and method `getLlmOptionsFromPayload` to initialize runtime
|
65
65
|
* with user payload. Test case below will test both the methods
|
66
66
|
*/
|
67
67
|
describe('initModelRuntimeWithUserPayload method', () => {
|
68
68
|
describe('should initialize with options correctly', () => {
|
69
69
|
it('OpenAI provider: with apikey and endpoint', async () => {
|
70
|
-
const jwtPayload: ClientSecretPayload = {
|
70
|
+
const jwtPayload: ClientSecretPayload = {
|
71
|
+
apiKey: 'user-openai-key',
|
72
|
+
baseURL: 'user-endpoint',
|
73
|
+
};
|
71
74
|
const runtime = await initModelRuntimeWithUserPayload(ModelProvider.OpenAI, jwtPayload);
|
72
75
|
expect(runtime).toBeInstanceOf(ModelRuntime);
|
73
76
|
expect(runtime['_runtime']).toBeInstanceOf(LobeOpenAI);
|
@@ -1,6 +1,7 @@
|
|
1
|
+
import { ModelProvider, ModelRuntime } from '@lobechat/model-runtime';
|
2
|
+
|
1
3
|
import { getLLMConfig } from '@/config/llm';
|
2
4
|
import { ClientSecretPayload } from '@/const/auth';
|
3
|
-
import { ModelProvider, ModelRuntime } from '@/libs/model-runtime';
|
4
5
|
|
5
6
|
import apiKeyManager from './apiKeyManager';
|
6
7
|
|
@@ -1,9 +1,9 @@
|
|
1
|
+
import { ChatStreamCallbacks, ChatStreamPayload } from '@lobechat/model-runtime';
|
1
2
|
import { TracePayload, TraceTagMap } from '@lobechat/types';
|
2
3
|
import { after } from 'next/server';
|
3
4
|
|
4
5
|
import { INBOX_SESSION_ID } from '@/const/session';
|
5
6
|
import { LOBE_CHAT_OBSERVATION_ID, LOBE_CHAT_TRACE_ID } from '@/const/trace';
|
6
|
-
import { ChatStreamCallbacks, ChatStreamPayload } from '@/libs/model-runtime';
|
7
7
|
import { TraceClient } from '@/libs/traces';
|
8
8
|
|
9
9
|
export interface AgentChatOptions {
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
1
2
|
import { TRPCError } from '@trpc/server';
|
2
3
|
import OpenAI from 'openai';
|
3
4
|
import { z } from 'zod';
|
@@ -12,7 +13,6 @@ import {
|
|
12
13
|
EvalEvaluationModel,
|
13
14
|
EvaluationRecordModel,
|
14
15
|
} from '@/database/server/models/ragEval';
|
15
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
16
16
|
import { asyncAuthedProcedure, asyncRouter as router } from '@/libs/trpc/async';
|
17
17
|
import { initModelRuntimeWithUserPayload } from '@/server/modules/ModelRuntime';
|
18
18
|
import { ChunkService } from '@/server/services/chunk';
|
@@ -1,7 +1,7 @@
|
|
1
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
1
2
|
import { act } from '@testing-library/react';
|
2
3
|
import { describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
5
5
|
import { useUserStore } from '@/store/user';
|
6
6
|
import {
|
7
7
|
GlobalLLMProviderKey,
|
@@ -1,12 +1,3 @@
|
|
1
|
-
import { ChatErrorType } from '@lobechat/types';
|
2
|
-
import { LobeChatPluginManifest } from '@lobehub/chat-plugin-sdk';
|
3
|
-
import { act } from '@testing-library/react';
|
4
|
-
import { merge } from 'lodash-es';
|
5
|
-
import OpenAI from 'openai';
|
6
|
-
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
7
|
-
|
8
|
-
import { DEFAULT_USER_AVATAR } from '@/const/meta';
|
9
|
-
import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
|
10
1
|
import {
|
11
2
|
LobeAnthropicAI,
|
12
3
|
LobeAzureOpenAI,
|
@@ -26,8 +17,17 @@ import {
|
|
26
17
|
LobeZeroOneAI,
|
27
18
|
LobeZhipuAI,
|
28
19
|
ModelProvider,
|
29
|
-
} from '
|
30
|
-
import { ModelRuntime } from '
|
20
|
+
} from '@lobechat/model-runtime';
|
21
|
+
import { ModelRuntime } from '@lobechat/model-runtime';
|
22
|
+
import { ChatErrorType } from '@lobechat/types';
|
23
|
+
import { LobeChatPluginManifest } from '@lobehub/chat-plugin-sdk';
|
24
|
+
import { act } from '@testing-library/react';
|
25
|
+
import { merge } from 'lodash-es';
|
26
|
+
import OpenAI from 'openai';
|
27
|
+
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
28
|
+
|
29
|
+
import { DEFAULT_USER_AVATAR } from '@/const/meta';
|
30
|
+
import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
|
31
31
|
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
32
32
|
import { aiModelSelectors } from '@/store/aiInfra';
|
33
33
|
import { useToolStore } from '@/store/tool';
|
package/src/services/_auth.ts
CHANGED
@@ -1,6 +1,7 @@
|
|
1
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
2
|
+
|
1
3
|
import { ClientSecretPayload, LOBE_CHAT_AUTH_HEADER } from '@/const/auth';
|
2
4
|
import { isDeprecatedEdition } from '@/const/version';
|
3
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
4
5
|
import { aiProviderSelectors, useAiInfraStore } from '@/store/aiInfra';
|
5
6
|
import { useUserStore } from '@/store/user';
|
6
7
|
import { keyVaultsConfigSelectors, userProfileSelectors } from '@/store/user/selectors';
|
package/src/services/chat.ts
CHANGED
@@ -1,3 +1,9 @@
|
|
1
|
+
import {
|
2
|
+
AgentRuntimeError,
|
3
|
+
ChatCompletionErrorPayload,
|
4
|
+
ModelProvider,
|
5
|
+
ModelRuntime,
|
6
|
+
} from '@lobechat/model-runtime';
|
1
7
|
import { ChatErrorType, TracePayload, TraceTagMap } from '@lobechat/types';
|
2
8
|
import { PluginRequestPayload, createHeadersWithPluginSettings } from '@lobehub/chat-plugin-sdk';
|
3
9
|
import { produce } from 'immer';
|
@@ -8,12 +14,6 @@ import { INBOX_GUIDE_SYSTEMROLE } from '@/const/guide';
|
|
8
14
|
import { INBOX_SESSION_ID } from '@/const/session';
|
9
15
|
import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
|
10
16
|
import { isDeprecatedEdition, isDesktop, isServerMode } from '@/const/version';
|
11
|
-
import {
|
12
|
-
AgentRuntimeError,
|
13
|
-
ChatCompletionErrorPayload,
|
14
|
-
ModelProvider,
|
15
|
-
ModelRuntime,
|
16
|
-
} from '@/libs/model-runtime';
|
17
17
|
import { parseDataUri } from '@/libs/model-runtime/utils/uriParser';
|
18
18
|
import { filesPrompts } from '@/prompts/files';
|
19
19
|
import { BuiltinSystemRolePrompts } from '@/prompts/systemRole';
|
@@ -280,6 +280,14 @@ class ChatService {
|
|
280
280
|
extendParams.reasoning_effort = chatConfig.reasoningEffort;
|
281
281
|
}
|
282
282
|
|
283
|
+
if (modelExtendParams!.includes('gpt5ReasoningEffort') && chatConfig.gpt5ReasoningEffort) {
|
284
|
+
extendParams.reasoning_effort = chatConfig.gpt5ReasoningEffort;
|
285
|
+
}
|
286
|
+
|
287
|
+
if (modelExtendParams!.includes('textVerbosity') && chatConfig.textVerbosity) {
|
288
|
+
extendParams.verbosity = chatConfig.textVerbosity;
|
289
|
+
}
|
290
|
+
|
283
291
|
if (modelExtendParams!.includes('thinking') && chatConfig.thinking) {
|
284
292
|
extendParams.thinking = { type: chatConfig.thinking };
|
285
293
|
}
|
@@ -1,6 +1,7 @@
|
|
1
1
|
/* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
|
2
4
|
import { gptImage1ParamsSchema } from '@/config/aiModels/openai';
|
3
|
-
import { ModelProvider } from '@/libs/model-runtime/types/type';
|
4
5
|
import {
|
5
6
|
ModelParamsSchema,
|
6
7
|
RuntimeImageGenParams,
|
@@ -1,8 +1,8 @@
|
|
1
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
1
2
|
import { produce } from 'immer';
|
2
3
|
import useSWR, { SWRResponse } from 'swr';
|
3
4
|
import type { StateCreator } from 'zustand/vanilla';
|
4
5
|
|
5
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
6
6
|
import { UserStore } from '@/store/user';
|
7
7
|
import type { ChatModelCard, ModelProviderCard } from '@/types/llm';
|
8
8
|
import type {
|
@@ -1,8 +1,7 @@
|
|
1
|
+
import { AgentRuntimeErrorType } from '@lobechat/model-runtime';
|
1
2
|
import { ChatErrorType } from '@lobechat/types';
|
2
3
|
import { describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import { AgentRuntimeErrorType } from '@/libs/model-runtime';
|
5
|
-
|
6
5
|
import { createErrorResponse } from './errorResponse';
|
7
6
|
|
8
7
|
describe('createErrorResponse', () => {
|
@@ -1,7 +1,6 @@
|
|
1
|
+
import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '@lobechat/model-runtime';
|
1
2
|
import { ChatErrorType, ErrorResponse, ErrorType } from '@lobechat/types';
|
2
3
|
|
3
|
-
import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '@/libs/model-runtime';
|
4
|
-
|
5
4
|
const getStatus = (errorType: ILobeAgentRuntimeErrorType | ErrorType) => {
|
6
5
|
// InvalidAccessCode / InvalidAzureAPIKey / InvalidOpenAIAPIKey / InvalidZhipuAPIKey ....
|
7
6
|
if (errorType.toString().includes('Invalid')) return 401;
|